mirror of
https://github.com/openai/codex.git
synced 2026-02-02 06:57:03 +00:00
Compare commits
275 Commits
shell-tool
...
dev/zhao/r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7d263dce2 | ||
|
|
bb8fdb20dc | ||
|
|
238ce7dfad | ||
|
|
d4554ce6c8 | ||
|
|
29381ba5c2 | ||
|
|
b2280d6205 | ||
|
|
dca7f4cb60 | ||
|
|
13c0919bff | ||
|
|
83aac0f985 | ||
|
|
057250020a | ||
|
|
3fc8b2894f | ||
|
|
ce19dbbb22 | ||
|
|
038767af69 | ||
|
|
7cabe54fc7 | ||
|
|
c1367808fb | ||
|
|
87f5b69b24 | ||
|
|
e2559ab28d | ||
|
|
90f262e9a4 | ||
|
|
321625072a | ||
|
|
b36ecb6c32 | ||
|
|
eb2e5458cc | ||
|
|
bfb4d5710b | ||
|
|
4953b2ae09 | ||
|
|
1a5809624d | ||
|
|
cb9a189857 | ||
|
|
8a71f8b634 | ||
|
|
4b684c53ae | ||
|
|
9f40d6eeeb | ||
|
|
bd51d1b103 | ||
|
|
f677d05871 | ||
|
|
c4af707e09 | ||
|
|
e0fb3ca1db | ||
|
|
97b90094cd | ||
|
|
463249eff3 | ||
|
|
0ad54982ae | ||
|
|
d1c5db5796 | ||
|
|
6fa24d65f5 | ||
|
|
ab9ddcd50b | ||
|
|
f11520f5f1 | ||
|
|
42e0817398 | ||
|
|
fc4249313b | ||
|
|
967d063f4b | ||
|
|
893f5261eb | ||
|
|
fa4cac1e6b | ||
|
|
0c8828c5e2 | ||
|
|
225a5f7ffb | ||
|
|
05e546ee1f | ||
|
|
7836aeddae | ||
|
|
ac3237721e | ||
|
|
9df70a0772 | ||
|
|
a7e3e37da8 | ||
|
|
164265bed1 | ||
|
|
2237b701b6 | ||
|
|
6382dc2338 | ||
|
|
80140c6d9d | ||
|
|
933e247e9f | ||
|
|
68505abf0f | ||
|
|
cacfd003ac | ||
|
|
0f2b589d5e | ||
|
|
06704b1a0f | ||
|
|
382f047a10 | ||
|
|
ac5fa6baf8 | ||
|
|
badda736c6 | ||
|
|
cb45139244 | ||
|
|
a9f566af7b | ||
|
|
71c75e648c | ||
|
|
0a32acaa2d | ||
|
|
222a491570 | ||
|
|
4a3e9ed88d | ||
|
|
28e7218c0b | ||
|
|
585f75bd5a | ||
|
|
da983c1761 | ||
|
|
c2bdee0946 | ||
|
|
cfda44b98b | ||
|
|
5e888ab48e | ||
|
|
9fa9e3e7bb | ||
|
|
7a6d6090d7 | ||
|
|
701f42b74b | ||
|
|
056c2ee276 | ||
|
|
98923654d0 | ||
|
|
57ba9fa100 | ||
|
|
acb8ed493f | ||
|
|
53a486f7ea | ||
|
|
3c3d3d1adc | ||
|
|
3c087e8fda | ||
|
|
7386e2efbc | ||
|
|
b2cb05d562 | ||
|
|
9a74228c66 | ||
|
|
315b1e957d | ||
|
|
82090803d9 | ||
|
|
f521d29726 | ||
|
|
93f61dbc5f | ||
|
|
6c9c563faf | ||
|
|
952d6c9465 | ||
|
|
2e4a402521 | ||
|
|
f48d88067e | ||
|
|
a8cbbdbc6e | ||
|
|
d08efb1743 | ||
|
|
5f80ad6da8 | ||
|
|
e91bb6b947 | ||
|
|
b8eab7ce90 | ||
|
|
b1c918d8f7 | ||
|
|
4c9762d15c | ||
|
|
7b359c9c8e | ||
|
|
6736d1828d | ||
|
|
073a8533b8 | ||
|
|
0972cd9404 | ||
|
|
28dcdb566a | ||
|
|
e8f6d65899 | ||
|
|
342c084cc3 | ||
|
|
903b7774bc | ||
|
|
6e6338aa87 | ||
|
|
7dfc3a4dc7 | ||
|
|
9b2055586d | ||
|
|
ce0b38c056 | ||
|
|
37c36024c7 | ||
|
|
291b54a762 | ||
|
|
2b5d0b2935 | ||
|
|
404a1ea34b | ||
|
|
36edb412b1 | ||
|
|
1b2509f05a | ||
|
|
f1b7cdc3bd | ||
|
|
c4e18f1b63 | ||
|
|
8f4e00e1f1 | ||
|
|
87666695ba | ||
|
|
871f44f385 | ||
|
|
3d35cb4619 | ||
|
|
e925a380dc | ||
|
|
ccdeb9d9c4 | ||
|
|
67e67e054f | ||
|
|
edd98dd3b7 | ||
|
|
3e6cd5660c | ||
|
|
cee37a32b2 | ||
|
|
8da91d1c89 | ||
|
|
00cc00ead8 | ||
|
|
70b97790be | ||
|
|
1cfc967eb8 | ||
|
|
9a50a04400 | ||
|
|
231ff19ca2 | ||
|
|
de08c735a6 | ||
|
|
3395ebd96e | ||
|
|
71504325d3 | ||
|
|
7f068cfbcc | ||
|
|
9e6c2c1e64 | ||
|
|
8d0f023fa9 | ||
|
|
2ad980abf4 | ||
|
|
3ef76ff29d | ||
|
|
844de19561 | ||
|
|
343aa35db1 | ||
|
|
c3e4f920b4 | ||
|
|
4785344c9c | ||
|
|
9b3251f28f | ||
|
|
45f3250eec | ||
|
|
51307eaf07 | ||
|
|
42ae738f67 | ||
|
|
00ef9d3784 | ||
|
|
f3989f6092 | ||
|
|
dbec741ef0 | ||
|
|
06e7667d0e | ||
|
|
1ef1fe67ec | ||
|
|
ee191dbe81 | ||
|
|
ad9eeeb287 | ||
|
|
6b5b9a687e | ||
|
|
58e1e570fa | ||
|
|
ec93b6daf3 | ||
|
|
4d4778ec1c | ||
|
|
77c457121e | ||
|
|
5ebdc9af1b | ||
|
|
f6a7da4ac3 | ||
|
|
1d09ac89a1 | ||
|
|
127e307f89 | ||
|
|
21ad1c1c90 | ||
|
|
349734e38d | ||
|
|
2222cab9ea | ||
|
|
c2f8c4e9f4 | ||
|
|
72b95db12f | ||
|
|
37ee6bf2c3 | ||
|
|
8b1e397211 | ||
|
|
85e687c74a | ||
|
|
9ee855ec57 | ||
|
|
4b78e2ab09 | ||
|
|
85e2fabc9f | ||
|
|
a8d5ad37b8 | ||
|
|
32e4a3a4d7 | ||
|
|
f443555728 | ||
|
|
ff4ca9959c | ||
|
|
5b25915d7e | ||
|
|
c0564edebe | ||
|
|
c936c68c84 | ||
|
|
41760f8a09 | ||
|
|
440c7acd8f | ||
|
|
0cc3b50228 | ||
|
|
8532876ad8 | ||
|
|
44d92675eb | ||
|
|
a421eba31f | ||
|
|
40006808a3 | ||
|
|
ba58184349 | ||
|
|
14df5c9492 | ||
|
|
cb85a7b96e | ||
|
|
3f12f1140f | ||
|
|
c22cd2e953 | ||
|
|
ebd485b1a0 | ||
|
|
457c9fdb87 | ||
|
|
6eeaf46ac1 | ||
|
|
aaec8abf58 | ||
|
|
cbd7d0d543 | ||
|
|
fabdbfef9c | ||
|
|
8b314e2d04 | ||
|
|
963009737f | ||
|
|
e953092949 | ||
|
|
28ff364c3a | ||
|
|
981e2f742d | ||
|
|
401f94ca31 | ||
|
|
865e225bde | ||
|
|
4502b1b263 | ||
|
|
2845e2c006 | ||
|
|
d8a7a63959 | ||
|
|
caf2749d5b | ||
|
|
9ba27cfa0a | ||
|
|
157a16cefa | ||
|
|
37d83e075e | ||
|
|
523b40a129 | ||
|
|
0dd822264a | ||
|
|
3308dc5e48 | ||
|
|
fc2ff624ac | ||
|
|
b897880378 | ||
|
|
99e5340c54 | ||
|
|
ec49b56874 | ||
|
|
4ed4c73d6b | ||
|
|
523dabc1ee | ||
|
|
3741f387e9 | ||
|
|
2c885edefa | ||
|
|
f6f4c8f5ee | ||
|
|
1875700220 | ||
|
|
207d94b0e7 | ||
|
|
481c84e118 | ||
|
|
6dd3606773 | ||
|
|
35850caff6 | ||
|
|
e8b6fb7937 | ||
|
|
be022be381 | ||
|
|
1e832b1438 | ||
|
|
c31663d745 | ||
|
|
35d89e820f | ||
|
|
486b1c4d9d | ||
|
|
b2cddec3d7 | ||
|
|
920239f272 | ||
|
|
99bcb90353 | ||
|
|
b519267d05 | ||
|
|
529eb4ff2a | ||
|
|
c6f68c9df8 | ||
|
|
af63e6eccc | ||
|
|
87b211709e | ||
|
|
67975ed33a | ||
|
|
7561a6aaf0 | ||
|
|
3ea33a0616 | ||
|
|
e8ef6d3c16 | ||
|
|
e52cc38dfd | ||
|
|
3bdcbc7292 | ||
|
|
a0434bbdb4 | ||
|
|
d5f661c91d | ||
|
|
8ecaad948b | ||
|
|
aa4e0d823e | ||
|
|
0e051644a9 | ||
|
|
40d14c0756 | ||
|
|
af65666561 | ||
|
|
2ae1f81d84 | ||
|
|
d363a0968e | ||
|
|
bce030ddb5 | ||
|
|
f4af6e389e | ||
|
|
b315b22f7b | ||
|
|
c9e149fd5c | ||
|
|
bacdc004be | ||
|
|
ab5972d447 | ||
|
|
767b66f407 | ||
|
|
830ab4ce20 |
@@ -1 +1,2 @@
|
||||
iTerm
|
||||
psuedo
|
||||
44
.github/actions/linux-code-sign/action.yml
vendored
Normal file
44
.github/actions/linux-code-sign/action.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: linux-code-sign
|
||||
description: Sign Linux artifacts with cosign.
|
||||
inputs:
|
||||
target:
|
||||
description: Target triple for the artifacts to sign.
|
||||
required: true
|
||||
artifacts-dir:
|
||||
description: Absolute path to the directory containing built binaries to sign.
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@v3.7.0
|
||||
|
||||
- name: Cosign Linux artifacts
|
||||
shell: bash
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: "1"
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_OIDC_CLIENT_ID: "sigstore"
|
||||
COSIGN_OIDC_ISSUER: "https://oauth2.sigstore.dev/auth"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
dest="${{ inputs.artifacts-dir }}"
|
||||
if [[ ! -d "$dest" ]]; then
|
||||
echo "Destination $dest does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for binary in codex codex-responses-api-proxy; do
|
||||
artifact="${dest}/${binary}"
|
||||
if [[ ! -f "$artifact" ]]; then
|
||||
echo "Binary $artifact not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cosign sign-blob \
|
||||
--yes \
|
||||
--bundle "${artifact}.sigstore" \
|
||||
"$artifact"
|
||||
done
|
||||
55
.github/actions/windows-code-sign/action.yml
vendored
Normal file
55
.github/actions/windows-code-sign/action.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: windows-code-sign
|
||||
description: Sign Windows binaries with Azure Trusted Signing.
|
||||
inputs:
|
||||
target:
|
||||
description: Target triple for the artifacts to sign.
|
||||
required: true
|
||||
client-id:
|
||||
description: Azure Trusted Signing client ID.
|
||||
required: true
|
||||
tenant-id:
|
||||
description: Azure tenant ID for Trusted Signing.
|
||||
required: true
|
||||
subscription-id:
|
||||
description: Azure subscription ID for Trusted Signing.
|
||||
required: true
|
||||
endpoint:
|
||||
description: Azure Trusted Signing endpoint.
|
||||
required: true
|
||||
account-name:
|
||||
description: Azure Trusted Signing account name.
|
||||
required: true
|
||||
certificate-profile-name:
|
||||
description: Certificate profile name for signing.
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Azure login for Trusted Signing (OIDC)
|
||||
uses: azure/login@v2
|
||||
with:
|
||||
client-id: ${{ inputs.client-id }}
|
||||
tenant-id: ${{ inputs.tenant-id }}
|
||||
subscription-id: ${{ inputs.subscription-id }}
|
||||
|
||||
- name: Sign Windows binaries with Azure Trusted Signing
|
||||
uses: azure/trusted-signing-action@v0
|
||||
with:
|
||||
endpoint: ${{ inputs.endpoint }}
|
||||
trusted-signing-account-name: ${{ inputs.account-name }}
|
||||
certificate-profile-name: ${{ inputs.certificate-profile-name }}
|
||||
exclude-environment-credential: true
|
||||
exclude-workload-identity-credential: true
|
||||
exclude-managed-identity-credential: true
|
||||
exclude-shared-token-cache-credential: true
|
||||
exclude-visual-studio-credential: true
|
||||
exclude-visual-studio-code-credential: true
|
||||
exclude-azure-cli-credential: false
|
||||
exclude-azure-powershell-credential: true
|
||||
exclude-azure-developer-cli-credential: true
|
||||
exclude-interactive-browser-credential: true
|
||||
cache-dependencies: false
|
||||
files: |
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex.exe
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-responses-api-proxy.exe
|
||||
26
.github/workflows/cargo-deny.yml
vendored
Normal file
26
.github/workflows/cargo-deny.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: cargo-deny
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
cargo-deny:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./codex-rs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
NODE_OPTIONS: --max-old-space-size=4096
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
4
.github/workflows/cla.yml
vendored
4
.github/workflows/cla.yml
vendored
@@ -46,6 +46,4 @@ jobs:
|
||||
path-to-document: https://github.com/openai/codex/blob/main/docs/CLA.md
|
||||
path-to-signatures: signatures/cla.json
|
||||
branch: cla-signatures
|
||||
allowlist: |
|
||||
codex
|
||||
dependabot[bot]
|
||||
allowlist: codex,dependabot,dependabot[bot],github-actions[bot]
|
||||
|
||||
2
.github/workflows/codespell.yml
vendored
2
.github/workflows/codespell.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: Annotate locations with typos
|
||||
uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1
|
||||
- name: Codespell
|
||||
|
||||
3
.github/workflows/issue-deduplicator.yml
vendored
3
.github/workflows/issue-deduplicator.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Prepare Codex inputs
|
||||
env:
|
||||
@@ -46,7 +46,6 @@ jobs:
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
model: gpt-5.1
|
||||
prompt: |
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
|
||||
2
.github/workflows/issue-labeler.yml
vendored
2
.github/workflows/issue-labeler.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- id: codex
|
||||
uses: openai/codex-action@main
|
||||
|
||||
91
.github/workflows/rust-ci.yml
vendored
91
.github/workflows/rust-ci.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
codex: ${{ steps.detect.outputs.codex }}
|
||||
workflows: ${{ steps.detect.outputs.workflows }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Detect changed paths (no external action)
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
components: rustfmt
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
@@ -147,12 +147,21 @@ jobs:
|
||||
profile: release
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
components: clippy
|
||||
|
||||
- name: Compute lockfile hash
|
||||
id: lockhash
|
||||
working-directory: codex-rs
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
|
||||
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Explicit cache restore: split cargo home vs target, so we can
|
||||
# avoid caching the large target dir on the gnu-dev job.
|
||||
- name: Restore cargo home cache
|
||||
@@ -164,7 +173,7 @@ jobs:
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
|
||||
restore-keys: |
|
||||
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
||||
|
||||
@@ -201,9 +210,9 @@ jobs:
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-
|
||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
|
||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
@@ -278,7 +287,7 @@ jobs:
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
|
||||
|
||||
- name: Save sccache cache (fallback)
|
||||
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
|
||||
@@ -286,7 +295,7 @@ jobs:
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
|
||||
- name: sccache stats
|
||||
if: always() && env.USE_SCCACHE == 'true'
|
||||
@@ -359,11 +368,63 @@ jobs:
|
||||
profile: dev
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
# We have been running out of space when running this job on Linux for
|
||||
# x86_64-unknown-linux-gnu, so remove some unnecessary dependencies.
|
||||
- name: Remove unnecessary dependencies to save space
|
||||
if: ${{ startsWith(matrix.runner, 'ubuntu') }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
sudo rm -rf \
|
||||
/usr/local/lib/android \
|
||||
/usr/share/dotnet \
|
||||
/usr/local/share/boost \
|
||||
/usr/local/lib/node_modules \
|
||||
/opt/ghc
|
||||
sudo apt-get remove -y docker.io docker-compose podman buildah
|
||||
|
||||
# Ensure brew includes this fix so that brew's shellenv.sh loads
|
||||
# cleanly in the Codex sandbox (it is frequently eval'd via .zprofile
|
||||
# for Brew users, including the macOS runners on GitHub):
|
||||
#
|
||||
# https://github.com/Homebrew/brew/pull/21157
|
||||
#
|
||||
# Once brew 5.0.5 is released and is the default on macOS runners, this
|
||||
# step can be removed.
|
||||
- name: Upgrade brew
|
||||
if: ${{ startsWith(matrix.runner, 'macos') }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
brew --version
|
||||
git -C "$(brew --repo)" fetch origin
|
||||
git -C "$(brew --repo)" checkout main
|
||||
git -C "$(brew --repo)" reset --hard origin/main
|
||||
export HOMEBREW_UPDATE_TO_TAG=0
|
||||
brew update
|
||||
brew upgrade
|
||||
brew --version
|
||||
|
||||
# Some integration tests rely on DotSlash being installed.
|
||||
# See https://github.com/openai/codex/pull/7617.
|
||||
- name: Install DotSlash
|
||||
uses: facebook/install-dotslash@v2
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- name: Compute lockfile hash
|
||||
id: lockhash
|
||||
working-directory: codex-rs
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "hash=$(sha256sum Cargo.lock | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
|
||||
echo "toolchain_hash=$(sha256sum rust-toolchain.toml | cut -d' ' -f1)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Restore cargo home cache
|
||||
id: cache_cargo_home_restore
|
||||
uses: actions/cache/restore@v4
|
||||
@@ -373,7 +434,7 @@ jobs:
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
|
||||
restore-keys: |
|
||||
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
||||
|
||||
@@ -409,9 +470,9 @@ jobs:
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-
|
||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
|
||||
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
|
||||
|
||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
@@ -436,7 +497,7 @@ jobs:
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ hashFiles('codex-rs/rust-toolchain.toml') }}
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
|
||||
|
||||
- name: Save sccache cache (fallback)
|
||||
if: always() && !cancelled() && env.USE_SCCACHE == 'true' && env.SCCACHE_GHA_ENABLED != 'true'
|
||||
@@ -444,7 +505,7 @@ jobs:
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/.sccache/
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}-${{ github.run_id }}
|
||||
key: sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ github.run_id }}
|
||||
|
||||
- name: sccache stats
|
||||
if: always() && env.USE_SCCACHE == 'true'
|
||||
|
||||
59
.github/workflows/rust-release.yml
vendored
59
.github/workflows/rust-release.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
tag-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
@@ -50,6 +50,9 @@ jobs:
|
||||
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
@@ -76,7 +79,7 @@ jobs:
|
||||
target: aarch64-pc-windows-msvc
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
@@ -100,6 +103,25 @@ jobs:
|
||||
- name: Cargo build
|
||||
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
|
||||
|
||||
- if: ${{ contains(matrix.target, 'linux') }}
|
||||
name: Cosign Linux artifacts
|
||||
uses: ./.github/actions/linux-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
|
||||
|
||||
- if: ${{ contains(matrix.target, 'windows') }}
|
||||
name: Sign Windows binaries with Azure Trusted Signing
|
||||
uses: ./.github/actions/windows-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
|
||||
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
|
||||
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
|
||||
account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
|
||||
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE_NAME }}
|
||||
|
||||
- if: ${{ matrix.runner == 'macos-15-xlarge' }}
|
||||
name: Configure Apple code signing
|
||||
shell: bash
|
||||
@@ -283,6 +305,11 @@ jobs:
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
|
||||
fi
|
||||
|
||||
if [[ "${{ matrix.target }}" == *linux* ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
|
||||
fi
|
||||
|
||||
- if: ${{ matrix.runner == 'windows-11-arm' }}
|
||||
name: Install zstd
|
||||
shell: powershell
|
||||
@@ -321,6 +348,11 @@ jobs:
|
||||
continue
|
||||
fi
|
||||
|
||||
# Don't try to compress signature bundles.
|
||||
if [[ "$base" == *.sigstore ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Create per-binary tar.gz
|
||||
tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base"
|
||||
|
||||
@@ -371,8 +403,19 @@ jobs:
|
||||
path: |
|
||||
codex-rs/dist/${{ matrix.target }}/*
|
||||
|
||||
shell-tool-mcp:
|
||||
name: shell-tool-mcp
|
||||
needs: tag-check
|
||||
uses: ./.github/workflows/shell-tool-mcp.yml
|
||||
with:
|
||||
release-tag: ${{ github.ref_name }}
|
||||
publish: true
|
||||
secrets: inherit
|
||||
|
||||
release:
|
||||
needs: build
|
||||
needs:
|
||||
- build
|
||||
- shell-tool-mcp
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -386,7 +429,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -395,6 +438,14 @@ jobs:
|
||||
- name: List
|
||||
run: ls -R dist/
|
||||
|
||||
# This is a temporary fix: we should modify shell-tool-mcp.yml so these
|
||||
# files do not end up in dist/ in the first place.
|
||||
- name: Delete entries from dist/ that should not go in the release
|
||||
run: |
|
||||
rm -rf dist/shell-tool-mcp*
|
||||
|
||||
ls -R dist/
|
||||
|
||||
- name: Define release name
|
||||
id: release_name
|
||||
run: |
|
||||
|
||||
2
.github/workflows/sdk.yml
vendored
2
.github/workflows/sdk.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
48
.github/workflows/shell-tool-mcp-ci.yml
vendored
Normal file
48
.github/workflows/shell-tool-mcp-ci.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: shell-tool-mcp CI
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "shell-tool-mcp/**"
|
||||
- ".github/workflows/shell-tool-mcp-ci.yml"
|
||||
- "pnpm-lock.yaml"
|
||||
- "pnpm-workspace.yaml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "shell-tool-mcp/**"
|
||||
- ".github/workflows/shell-tool-mcp-ci.yml"
|
||||
- "pnpm-lock.yaml"
|
||||
- "pnpm-workspace.yaml"
|
||||
|
||||
env:
|
||||
NODE_VERSION: 22
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: "pnpm"
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Format check
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run format
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp test
|
||||
|
||||
- name: Build
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run build
|
||||
405
.github/workflows/shell-tool-mcp.yml
vendored
Normal file
405
.github/workflows/shell-tool-mcp.yml
vendored
Normal file
@@ -0,0 +1,405 @@
|
||||
name: shell-tool-mcp
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
release-version:
|
||||
description: Version to publish (x.y.z or x.y.z-alpha.N). Defaults to GITHUB_REF_NAME when it starts with rust-v.
|
||||
required: false
|
||||
type: string
|
||||
release-tag:
|
||||
description: Tag name to use when downloading release artifacts (defaults to rust-v<version>).
|
||||
required: false
|
||||
type: string
|
||||
publish:
|
||||
description: Whether to publish to npm when the version is releasable.
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
NODE_VERSION: 22
|
||||
|
||||
jobs:
|
||||
metadata:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.compute.outputs.version }}
|
||||
release_tag: ${{ steps.compute.outputs.release_tag }}
|
||||
should_publish: ${{ steps.compute.outputs.should_publish }}
|
||||
npm_tag: ${{ steps.compute.outputs.npm_tag }}
|
||||
steps:
|
||||
- name: Compute version and tags
|
||||
id: compute
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
version="${{ inputs.release-version }}"
|
||||
release_tag="${{ inputs.release-tag }}"
|
||||
|
||||
if [[ -z "$version" ]]; then
|
||||
if [[ -n "$release_tag" && "$release_tag" =~ ^rust-v.+ ]]; then
|
||||
version="${release_tag#rust-v}"
|
||||
elif [[ "${GITHUB_REF_NAME:-}" =~ ^rust-v.+ ]]; then
|
||||
version="${GITHUB_REF_NAME#rust-v}"
|
||||
release_tag="${GITHUB_REF_NAME}"
|
||||
else
|
||||
echo "release-version is required when GITHUB_REF_NAME is not a rust-v tag."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "$release_tag" ]]; then
|
||||
release_tag="rust-v${version}"
|
||||
fi
|
||||
|
||||
npm_tag=""
|
||||
should_publish="false"
|
||||
if [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
should_publish="true"
|
||||
elif [[ "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then
|
||||
should_publish="true"
|
||||
npm_tag="alpha"
|
||||
fi
|
||||
|
||||
echo "version=${version}" >> "$GITHUB_OUTPUT"
|
||||
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "npm_tag=${npm_tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "should_publish=${should_publish}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
rust-binaries:
|
||||
name: Build Rust - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
- runner: macos-15-xlarge
|
||||
target: x86_64-apple-darwin
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
install_musl: true
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
install_musl: true
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.90
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- if: ${{ matrix.install_musl }}
|
||||
name: Install musl build dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y musl-tools pkg-config
|
||||
|
||||
- name: Build exec server binaries
|
||||
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
|
||||
|
||||
- name: Stage exec server binaries
|
||||
run: |
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}"
|
||||
mkdir -p "$dest"
|
||||
cp "target/${{ matrix.target }}/release/codex-exec-mcp-server" "$dest/"
|
||||
cp "target/${{ matrix.target }}/release/codex-execve-wrapper" "$dest/"
|
||||
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: shell-tool-mcp-rust-${{ matrix.target }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
bash-linux:
|
||||
name: Build Bash (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: ubuntu:24.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: ubuntu:22.04
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: debian:12
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: debian:11
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-24.04
|
||||
image: arm64v8/ubuntu:24.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-22.04
|
||||
image: arm64v8/ubuntu:22.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: ubuntu-20.04
|
||||
image: arm64v8/ubuntu:20.04
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-12
|
||||
image: arm64v8/debian:12
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: debian-11
|
||||
image: arm64v8/debian:11
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
variant: centos-9
|
||||
image: quay.io/centos/centos:stream9
|
||||
steps:
|
||||
- name: Install build prerequisites
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y git build-essential bison autoconf gettext
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
dnf install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
yum install -y git gcc gcc-c++ make bison autoconf gettext
|
||||
else
|
||||
echo "Unsupported package manager in container"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Build patched Bash
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
|
||||
cd /tmp/bash
|
||||
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
|
||||
./configure --without-bash-malloc
|
||||
cores="$(command -v nproc >/dev/null 2>&1 && nproc || getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
bash-darwin:
|
||||
name: Build Bash (macOS) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-15
|
||||
- runner: macos-14
|
||||
target: aarch64-apple-darwin
|
||||
variant: macos-14
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Build patched Bash
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
|
||||
cd /tmp/bash
|
||||
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
|
||||
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
|
||||
./configure --without-bash-malloc
|
||||
cores="$(getconf _NPROCESSORS_ONLN)"
|
||||
make -j"${cores}"
|
||||
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/bash/${{ matrix.variant }}"
|
||||
mkdir -p "$dest"
|
||||
cp bash "$dest/bash"
|
||||
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: shell-tool-mcp-bash-${{ matrix.target }}-${{ matrix.variant }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
package:
|
||||
name: Package npm module
|
||||
needs:
|
||||
- metadata
|
||||
- rust-binaries
|
||||
- bash-linux
|
||||
- bash-darwin
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PACKAGE_VERSION: ${{ needs.metadata.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
- name: Install JavaScript dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build (shell-tool-mcp)
|
||||
run: pnpm --filter @openai/codex-shell-tool-mcp run build
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Assemble staging directory
|
||||
id: staging
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
staging="${STAGING_DIR}"
|
||||
mkdir -p "$staging" "$staging/vendor"
|
||||
cp shell-tool-mcp/README.md "$staging/"
|
||||
cp shell-tool-mcp/package.json "$staging/"
|
||||
cp -R shell-tool-mcp/bin "$staging/"
|
||||
|
||||
found_vendor="false"
|
||||
shopt -s nullglob
|
||||
for vendor_dir in artifacts/*/vendor; do
|
||||
rsync -av "$vendor_dir/" "$staging/vendor/"
|
||||
found_vendor="true"
|
||||
done
|
||||
if [[ "$found_vendor" == "false" ]]; then
|
||||
echo "No vendor payloads were downloaded."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
node - <<'NODE'
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
|
||||
const stagingDir = process.env.STAGING_DIR;
|
||||
const version = process.env.PACKAGE_VERSION;
|
||||
const pkgPath = path.join(stagingDir, "package.json");
|
||||
const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf8"));
|
||||
pkg.version = version;
|
||||
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + "\n");
|
||||
NODE
|
||||
|
||||
echo "dir=$staging" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
STAGING_DIR: ${{ runner.temp }}/shell-tool-mcp
|
||||
|
||||
- name: Ensure binaries are executable
|
||||
run: |
|
||||
set -euo pipefail
|
||||
staging="${{ steps.staging.outputs.dir }}"
|
||||
chmod +x \
|
||||
"$staging"/vendor/*/codex-exec-mcp-server \
|
||||
"$staging"/vendor/*/codex-execve-wrapper \
|
||||
"$staging"/vendor/*/bash/*/bash
|
||||
|
||||
- name: Create npm tarball
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
mkdir -p dist/npm
|
||||
staging="${{ steps.staging.outputs.dir }}"
|
||||
pack_info=$(cd "$staging" && npm pack --ignore-scripts --json --pack-destination "${GITHUB_WORKSPACE}/dist/npm")
|
||||
filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);')
|
||||
mv "dist/npm/${filename}" "dist/npm/codex-shell-tool-mcp-npm-${PACKAGE_VERSION}.tgz"
|
||||
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm/codex-shell-tool-mcp-npm-${{ env.PACKAGE_VERSION }}.tgz
|
||||
if-no-files-found: error
|
||||
|
||||
publish:
|
||||
name: Publish npm package
|
||||
needs:
|
||||
- metadata
|
||||
- package
|
||||
if: ${{ inputs.publish && needs.metadata.outputs.should_publish == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.8.1
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
registry-url: https://registry.npmjs.org
|
||||
scope: "@openai"
|
||||
|
||||
- name: Update npm
|
||||
run: npm install -g npm@latest
|
||||
|
||||
- name: Download npm tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: codex-shell-tool-mcp-npm
|
||||
path: dist/npm
|
||||
|
||||
- name: Publish to npm
|
||||
env:
|
||||
NPM_TAG: ${{ needs.metadata.outputs.npm_tag }}
|
||||
VERSION: ${{ needs.metadata.outputs.version }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
tag_args=()
|
||||
if [[ -n "${NPM_TAG}" ]]; then
|
||||
tag_args+=(--tag "${NPM_TAG}")
|
||||
fi
|
||||
npm publish "dist/npm/codex-shell-tool-mcp-npm-${VERSION}.tgz" "${tag_args[@]}"
|
||||
@@ -75,6 +75,7 @@ If you don’t have the tool:
|
||||
### Test assertions
|
||||
|
||||
- Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already.
|
||||
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
|
||||
|
||||
### Integration tests (core)
|
||||
|
||||
|
||||
34
README.md
34
README.md
@@ -69,38 +69,9 @@ Codex can access MCP servers. To configure them, refer to the [config docs](./do
|
||||
|
||||
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||
|
||||
### Execpolicy Quickstart
|
||||
### Execpolicy
|
||||
|
||||
Codex can enforce your own rules-based execution policy before it runs shell commands.
|
||||
|
||||
1. Create a policy directory: `mkdir -p ~/.codex/policy`.
|
||||
2. Create one or more `.codexpolicy` files in that folder. Codex automatically loads every `.codexpolicy` file in there on startup.
|
||||
3. Write `prefix_rule` entries to describe the commands you want to allow, prompt, or block:
|
||||
|
||||
```starlark
|
||||
prefix_rule(
|
||||
pattern = ["git", ["push", "fetch"]],
|
||||
decision = "prompt", # allow | prompt | forbidden
|
||||
match = [["git", "push", "origin", "main"]], # examples that must match
|
||||
not_match = [["git", "status"]], # examples that must not match
|
||||
)
|
||||
```
|
||||
|
||||
- `pattern` is a list of shell tokens, evaluated from left to right; wrap tokens in a nested list to express alternatives (e.g., match both `push` and `fetch`).
|
||||
- `decision` sets the severity; Codex picks the strictest decision when multiple rules match (forbidden > prompt > allow).
|
||||
- `match` and `not_match` act as (optional) unit tests. Codex validates them when it loads your policy, so you get feedback if an example has unexpected behavior.
|
||||
|
||||
In this example rule, if Codex wants to run commands with the prefix `git push` or `git fetch`, it will first ask for user approval.
|
||||
|
||||
Use the `codex execpolicy check` subcommand to preview decisions before you save a rule (see the [`codex-execpolicy` README](./codex-rs/execpolicy/README.md) for syntax details):
|
||||
|
||||
```shell
|
||||
codex execpolicy check --policy ~/.codex/policy/default.codexpolicy git push origin main
|
||||
```
|
||||
|
||||
Pass multiple `--policy` flags to test how several files combine, and use `--pretty` for formatted JSON output. See the [`codex-rs/execpolicy` README](./codex-rs/execpolicy/README.md) for a more detailed walkthrough of the available syntax.
|
||||
|
||||
## Note: `execpolicy` commands are still in preview. The API may have breaking changes in the future.
|
||||
See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute.
|
||||
|
||||
### Docs & FAQ
|
||||
|
||||
@@ -114,6 +85,7 @@ Pass multiple `--policy` flags to test how several files combine, and use `--pre
|
||||
- [**Configuration**](./docs/config.md)
|
||||
- [Example config](./docs/example-config.md)
|
||||
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||
- [**Execpolicy quickstart**](./docs/execpolicy.md)
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
|
||||
@@ -95,14 +95,6 @@ function detectPackageManager() {
|
||||
return "bun";
|
||||
}
|
||||
|
||||
if (
|
||||
process.env.BUN_INSTALL ||
|
||||
process.env.BUN_INSTALL_GLOBAL_DIR ||
|
||||
process.env.BUN_INSTALL_BIN_DIR
|
||||
) {
|
||||
return "bun";
|
||||
}
|
||||
|
||||
return userAgent ? "npm" : null;
|
||||
}
|
||||
|
||||
|
||||
6
codex-rs/.cargo/audit.toml
Normal file
6
codex-rs/.cargo/audit.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[advisories]
|
||||
ignore = [
|
||||
"RUSTSEC-2024-0388", # derivative 2.2.0 via starlark; upstream crate is unmaintained
|
||||
"RUSTSEC-2025-0057", # fxhash 0.2.1 via starlark_map; upstream crate is unmaintained
|
||||
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
|
||||
]
|
||||
26
codex-rs/.github/workflows/cargo-audit.yml
vendored
Normal file
26
codex-rs/.github/workflows/cargo-audit.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Cargo audit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: cargo-audit
|
||||
- name: Run cargo audit
|
||||
run: cargo audit --deny warnings
|
||||
547
codex-rs/Cargo.lock
generated
547
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -34,6 +34,8 @@ members = [
|
||||
"stdio-to-uds",
|
||||
"otel",
|
||||
"tui",
|
||||
"tui2",
|
||||
"utils/absolute-path",
|
||||
"utils/git",
|
||||
"utils/cache",
|
||||
"utils/image",
|
||||
@@ -41,6 +43,8 @@ members = [
|
||||
"utils/pty",
|
||||
"utils/readiness",
|
||||
"utils/string",
|
||||
"codex-client",
|
||||
"codex-api",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -51,11 +55,13 @@ version = "0.0.0"
|
||||
# crates created with `cargo new -w ...` automatically inherit the 2024
|
||||
# edition.
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Internal
|
||||
app_test_support = { path = "app-server/tests/common" }
|
||||
codex-ansi-escape = { path = "ansi-escape" }
|
||||
codex-api = { path = "codex-api" }
|
||||
codex-app-server = { path = "app-server" }
|
||||
codex-app-server-protocol = { path = "app-server-protocol" }
|
||||
codex-apply-patch = { path = "apply-patch" }
|
||||
@@ -63,6 +69,7 @@ codex-arg0 = { path = "arg0" }
|
||||
codex-async-utils = { path = "async-utils" }
|
||||
codex-backend-client = { path = "backend-client" }
|
||||
codex-chatgpt = { path = "chatgpt" }
|
||||
codex-client = { path = "codex-client" }
|
||||
codex-common = { path = "common" }
|
||||
codex-core = { path = "core" }
|
||||
codex-exec = { path = "exec" }
|
||||
@@ -83,6 +90,8 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
|
||||
codex-rmcp-client = { path = "rmcp-client" }
|
||||
codex-stdio-to-uds = { path = "stdio-to-uds" }
|
||||
codex-tui = { path = "tui" }
|
||||
codex-tui2 = { path = "tui2" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
codex-utils-cache = { path = "utils/cache" }
|
||||
codex-utils-image = { path = "utils/image" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
@@ -91,6 +100,7 @@ codex-utils-readiness = { path = "utils/readiness" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
codex-windows-sandbox = { path = "windows-sandbox-rs" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
exec_server_test_support = { path = "exec-server/tests/common" }
|
||||
mcp-types = { path = "mcp-types" }
|
||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||
|
||||
@@ -99,7 +109,6 @@ allocative = "0.3.3"
|
||||
ansi-to-tui = "7.0.0"
|
||||
anyhow = "1"
|
||||
arboard = { version = "3", features = ["wayland-data-control"] }
|
||||
askama = "0.14"
|
||||
assert_cmd = "2"
|
||||
assert_matches = "1.5.0"
|
||||
async-channel = "2.3.1"
|
||||
@@ -108,8 +117,8 @@ async-trait = "0.1.89"
|
||||
axum = { version = "0.8", default-features = false }
|
||||
base64 = "0.22.1"
|
||||
bytes = "1.10.1"
|
||||
chrono = "0.4.42"
|
||||
chardetng = "0.1.17"
|
||||
chrono = "0.4.42"
|
||||
clap = "4"
|
||||
clap_complete = "4"
|
||||
color-eyre = "0.6.3"
|
||||
@@ -120,9 +129,9 @@ diffy = "0.4.2"
|
||||
dirs = "6"
|
||||
dotenvy = "0.15.7"
|
||||
dunce = "1.0.4"
|
||||
encoding_rs = "0.8.35"
|
||||
env-flags = "0.1.1"
|
||||
env_logger = "0.11.5"
|
||||
encoding_rs = "0.8.35"
|
||||
escargot = "0.5"
|
||||
eventsource-stream = "0.2.3"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
@@ -131,14 +140,14 @@ icu_decimal = "2.1"
|
||||
icu_locale_core = "2.1"
|
||||
icu_provider = { version = "2.1", features = ["sync"] }
|
||||
ignore = "0.4.23"
|
||||
image = { version = "^0.25.8", default-features = false }
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
indexmap = "2.12.0"
|
||||
insta = "1.43.2"
|
||||
insta = "1.44.3"
|
||||
itertools = "0.14.0"
|
||||
keyring = { version = "3.6", default-features = false }
|
||||
landlock = "0.4.1"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.175"
|
||||
libc = "0.2.177"
|
||||
log = "0.4"
|
||||
lru = "0.12.5"
|
||||
maplit = "1.0.2"
|
||||
@@ -146,7 +155,7 @@ mime_guess = "2.0.5"
|
||||
multimap = "0.10.0"
|
||||
notify = "8.2.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
once_cell = "1"
|
||||
once_cell = "1.20.2"
|
||||
openssl-sys = "*"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-appender-tracing = "0.30.0"
|
||||
@@ -164,14 +173,17 @@ pulldown-cmark = "0.10"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
regex = "1.12.2"
|
||||
regex-lite = "0.1.7"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.8.5", default-features = false }
|
||||
rmcp = { version = "0.10.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
seccompiler = "0.5.0"
|
||||
sentry = "0.34.0"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_with = "3.14"
|
||||
serde_with = "3.16"
|
||||
serde_yaml = "0.9"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10"
|
||||
@@ -194,9 +206,9 @@ tokio-stream = "0.1.17"
|
||||
tokio-test = "0.4"
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.4"
|
||||
toml_edit = "0.23.5"
|
||||
tonic = "0.13.1"
|
||||
tracing = "0.1.41"
|
||||
tracing = "0.1.43"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-test = "0.2.5"
|
||||
@@ -214,7 +226,7 @@ vt100 = "0.16.2"
|
||||
walkdir = "2.5.0"
|
||||
webbrowser = "1.0"
|
||||
which = "6"
|
||||
wildmatch = "2.5.0"
|
||||
wildmatch = "2.6.1"
|
||||
|
||||
wiremock = "0.6"
|
||||
zeroize = "1.8.2"
|
||||
@@ -260,11 +272,7 @@ unwrap_used = "deny"
|
||||
# cargo-shear cannot see the platform-specific openssl-sys usage, so we
|
||||
# silence the false positive here instead of deleting a real dependency.
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = [
|
||||
"icu_provider",
|
||||
"openssl-sys",
|
||||
"codex-utils-readiness",
|
||||
]
|
||||
ignored = ["icu_provider", "openssl-sys", "codex-utils-readiness"]
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
[package]
|
||||
edition = "2024"
|
||||
name = "codex-ansi-escape"
|
||||
version = { workspace = true }
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_ansi_escape"
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
[package]
|
||||
edition = "2024"
|
||||
name = "codex-app-server-protocol"
|
||||
version = { workspace = true }
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_app_server_protocol"
|
||||
|
||||
@@ -131,7 +131,7 @@ client_request_definitions! {
|
||||
},
|
||||
ReviewStart => "review/start" {
|
||||
params: v2::ReviewStartParams,
|
||||
response: v2::TurnStartResponse,
|
||||
response: v2::ReviewStartResponse,
|
||||
},
|
||||
|
||||
ModelList => "model/list" {
|
||||
@@ -139,6 +139,16 @@ client_request_definitions! {
|
||||
response: v2::ModelListResponse,
|
||||
},
|
||||
|
||||
McpServerOauthLogin => "mcpServer/oauth/login" {
|
||||
params: v2::McpServerOauthLoginParams,
|
||||
response: v2::McpServerOauthLoginResponse,
|
||||
},
|
||||
|
||||
McpServersList => "mcpServers/list" {
|
||||
params: v2::ListMcpServersParams,
|
||||
response: v2::ListMcpServersResponse,
|
||||
},
|
||||
|
||||
LoginAccount => "account/login/start" {
|
||||
params: v2::LoginAccountParams,
|
||||
response: v2::LoginAccountResponse,
|
||||
@@ -164,6 +174,25 @@ client_request_definitions! {
|
||||
response: v2::FeedbackUploadResponse,
|
||||
},
|
||||
|
||||
/// Execute a command (argv vector) under the server's sandbox.
|
||||
OneOffCommandExec => "command/exec" {
|
||||
params: v2::CommandExecParams,
|
||||
response: v2::CommandExecResponse,
|
||||
},
|
||||
|
||||
ConfigRead => "config/read" {
|
||||
params: v2::ConfigReadParams,
|
||||
response: v2::ConfigReadResponse,
|
||||
},
|
||||
ConfigValueWrite => "config/value/write" {
|
||||
params: v2::ConfigValueWriteParams,
|
||||
response: v2::ConfigWriteResponse,
|
||||
},
|
||||
ConfigBatchWrite => "config/batchWrite" {
|
||||
params: v2::ConfigBatchWriteParams,
|
||||
response: v2::ConfigWriteResponse,
|
||||
},
|
||||
|
||||
GetAccount => "account/read" {
|
||||
params: v2::GetAccountParams,
|
||||
response: v2::GetAccountResponse,
|
||||
@@ -489,18 +518,25 @@ server_notification_definitions! {
|
||||
/// NEW NOTIFICATIONS
|
||||
Error => "error" (v2::ErrorNotification),
|
||||
ThreadStarted => "thread/started" (v2::ThreadStartedNotification),
|
||||
ThreadTokenUsageUpdated => "thread/tokenUsage/updated" (v2::ThreadTokenUsageUpdatedNotification),
|
||||
TurnStarted => "turn/started" (v2::TurnStartedNotification),
|
||||
TurnCompleted => "turn/completed" (v2::TurnCompletedNotification),
|
||||
TurnDiffUpdated => "turn/diff/updated" (v2::TurnDiffUpdatedNotification),
|
||||
TurnPlanUpdated => "turn/plan/updated" (v2::TurnPlanUpdatedNotification),
|
||||
ItemStarted => "item/started" (v2::ItemStartedNotification),
|
||||
ItemCompleted => "item/completed" (v2::ItemCompletedNotification),
|
||||
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
|
||||
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
|
||||
TerminalInteraction => "item/commandExecution/terminalInteraction" (v2::TerminalInteractionNotification),
|
||||
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
|
||||
McpToolCallProgress => "item/mcpToolCall/progress" (v2::McpToolCallProgressNotification),
|
||||
McpServerOauthLoginCompleted => "mcpServer/oauthLogin/completed" (v2::McpServerOauthLoginCompletedNotification),
|
||||
AccountUpdated => "account/updated" (v2::AccountUpdatedNotification),
|
||||
AccountRateLimitsUpdated => "account/rateLimits/updated" (v2::AccountRateLimitsUpdatedNotification),
|
||||
ReasoningSummaryTextDelta => "item/reasoning/summaryTextDelta" (v2::ReasoningSummaryTextDeltaNotification),
|
||||
ReasoningSummaryPartAdded => "item/reasoning/summaryPartAdded" (v2::ReasoningSummaryPartAddedNotification),
|
||||
ReasoningTextDelta => "item/reasoning/textDelta" (v2::ReasoningTextDeltaNotification),
|
||||
ContextCompacted => "thread/compacted" (v2::ContextCompactedNotification),
|
||||
|
||||
/// Notifies the user of world-writable directories on Windows, which cannot be protected by the sandbox.
|
||||
WindowsWorldWritableWarning => "windows/worldWritableWarning" (v2::WindowsWorldWritableWarningNotification),
|
||||
@@ -618,7 +654,6 @@ mod tests {
|
||||
command: vec!["echo".to_string(), "hello".to_string()],
|
||||
cwd: PathBuf::from("/tmp"),
|
||||
reason: Some("because tests".to_string()),
|
||||
risk: None,
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
cmd: "echo hello".to_string(),
|
||||
}],
|
||||
@@ -638,7 +673,6 @@ mod tests {
|
||||
"command": ["echo", "hello"],
|
||||
"cwd": "/tmp",
|
||||
"reason": "because tests",
|
||||
"risk": null,
|
||||
"parsedCmd": [
|
||||
{
|
||||
"type": "unknown",
|
||||
|
||||
15
codex-rs/app-server-protocol/src/protocol/mappers.rs
Normal file
15
codex-rs/app-server-protocol/src/protocol/mappers.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use crate::protocol::v1;
|
||||
use crate::protocol::v2;
|
||||
|
||||
impl From<v1::ExecOneOffCommandParams> for v2::CommandExecParams {
|
||||
fn from(value: v1::ExecOneOffCommandParams) -> Self {
|
||||
Self {
|
||||
command: value.command,
|
||||
timeout_ms: value
|
||||
.timeout_ms
|
||||
.map(|timeout| i64::try_from(timeout).unwrap_or(60_000)),
|
||||
cwd: value.cwd,
|
||||
sandbox_policy: value.sandbox_policy.map(std::convert::Into::into),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
|
||||
|
||||
pub mod common;
|
||||
mod mappers;
|
||||
pub mod thread_history;
|
||||
pub mod v1;
|
||||
pub mod v2;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::protocol::v2::ThreadItem;
|
||||
use crate::protocol::v2::Turn;
|
||||
use crate::protocol::v2::TurnError;
|
||||
use crate::protocol::v2::TurnStatus;
|
||||
use crate::protocol::v2::UserInput;
|
||||
use codex_protocol::protocol::AgentReasoningEvent;
|
||||
@@ -142,6 +143,7 @@ impl ThreadHistoryBuilder {
|
||||
PendingTurn {
|
||||
id: self.next_turn_id(),
|
||||
items: Vec::new(),
|
||||
error: None,
|
||||
status: TurnStatus::Completed,
|
||||
}
|
||||
}
|
||||
@@ -190,6 +192,7 @@ impl ThreadHistoryBuilder {
|
||||
struct PendingTurn {
|
||||
id: String,
|
||||
items: Vec<ThreadItem>,
|
||||
error: Option<TurnError>,
|
||||
status: TurnStatus,
|
||||
}
|
||||
|
||||
@@ -198,6 +201,7 @@ impl From<PendingTurn> for Turn {
|
||||
Self {
|
||||
id: value.id,
|
||||
items: value.items,
|
||||
error: value.error,
|
||||
status: value.status,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,17 +3,16 @@ use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::FileChange;
|
||||
use codex_protocol::protocol::ReviewDecision;
|
||||
use codex_protocol::protocol::SandboxCommandAssessment;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
@@ -226,7 +225,6 @@ pub struct ExecCommandApprovalParams {
|
||||
pub command: Vec<String>,
|
||||
pub cwd: PathBuf,
|
||||
pub reason: Option<String>,
|
||||
pub risk: Option<SandboxCommandAssessment>,
|
||||
pub parsed_cmd: Vec<ParsedCommand>,
|
||||
}
|
||||
|
||||
|
||||
@@ -2,21 +2,32 @@ use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::protocol::common::AuthMode;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::approvals::SandboxCommandAssessment as CoreSandboxCommandAssessment;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode as CoreSandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
|
||||
use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::parse_command::ParsedCommand as CoreParsedCommand;
|
||||
use codex_protocol::plan_tool::PlanItemArg as CorePlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus as CorePlanStepStatus;
|
||||
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
|
||||
use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
|
||||
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
|
||||
use codex_protocol::protocol::SessionSource as CoreSessionSource;
|
||||
use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
|
||||
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
use mcp_types::ContentBlock as McpContentBlock;
|
||||
use mcp_types::Resource as McpResource;
|
||||
use mcp_types::ResourceTemplate as McpResourceTemplate;
|
||||
use mcp_types::Tool as McpTool;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
@@ -115,31 +126,285 @@ impl From<CoreCodexErrorInfo> for CodexErrorInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[ts(rename_all = "kebab-case", export_to = "v2/")]
|
||||
pub enum AskForApproval {
|
||||
#[serde(rename = "untrusted")]
|
||||
#[ts(rename = "untrusted")]
|
||||
UnlessTrusted,
|
||||
OnFailure,
|
||||
OnRequest,
|
||||
Never,
|
||||
}
|
||||
|
||||
impl AskForApproval {
|
||||
pub fn to_core(self) -> CoreAskForApproval {
|
||||
match self {
|
||||
AskForApproval::UnlessTrusted => CoreAskForApproval::UnlessTrusted,
|
||||
AskForApproval::OnFailure => CoreAskForApproval::OnFailure,
|
||||
AskForApproval::OnRequest => CoreAskForApproval::OnRequest,
|
||||
AskForApproval::Never => CoreAskForApproval::Never,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreAskForApproval> for AskForApproval {
|
||||
fn from(value: CoreAskForApproval) -> Self {
|
||||
match value {
|
||||
CoreAskForApproval::UnlessTrusted => AskForApproval::UnlessTrusted,
|
||||
CoreAskForApproval::OnFailure => AskForApproval::OnFailure,
|
||||
CoreAskForApproval::OnRequest => AskForApproval::OnRequest,
|
||||
CoreAskForApproval::Never => AskForApproval::Never,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[ts(rename_all = "kebab-case", export_to = "v2/")]
|
||||
pub enum SandboxMode {
|
||||
ReadOnly,
|
||||
WorkspaceWrite,
|
||||
DangerFullAccess,
|
||||
}
|
||||
|
||||
impl SandboxMode {
|
||||
pub fn to_core(self) -> CoreSandboxMode {
|
||||
match self {
|
||||
SandboxMode::ReadOnly => CoreSandboxMode::ReadOnly,
|
||||
SandboxMode::WorkspaceWrite => CoreSandboxMode::WorkspaceWrite,
|
||||
SandboxMode::DangerFullAccess => CoreSandboxMode::DangerFullAccess,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreSandboxMode> for SandboxMode {
|
||||
fn from(value: CoreSandboxMode) -> Self {
|
||||
match value {
|
||||
CoreSandboxMode::ReadOnly => SandboxMode::ReadOnly,
|
||||
CoreSandboxMode::WorkspaceWrite => SandboxMode::WorkspaceWrite,
|
||||
CoreSandboxMode::DangerFullAccess => SandboxMode::DangerFullAccess,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v2_enum_from_core!(
|
||||
pub enum AskForApproval from codex_protocol::protocol::AskForApproval {
|
||||
UnlessTrusted, OnFailure, OnRequest, Never
|
||||
pub enum ReviewDelivery from codex_protocol::protocol::ReviewDelivery {
|
||||
Inline, Detached
|
||||
}
|
||||
);
|
||||
|
||||
v2_enum_from_core!(
|
||||
pub enum SandboxMode from codex_protocol::config_types::SandboxMode {
|
||||
ReadOnly, WorkspaceWrite, DangerFullAccess
|
||||
}
|
||||
);
|
||||
|
||||
v2_enum_from_core!(
|
||||
pub enum CommandRiskLevel from codex_protocol::approvals::SandboxRiskLevel {
|
||||
Low,
|
||||
Medium,
|
||||
High
|
||||
pub enum McpAuthStatus from codex_protocol::protocol::McpAuthStatus {
|
||||
Unsupported,
|
||||
NotLoggedIn,
|
||||
BearerToken,
|
||||
OAuth
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ConfigLayerName {
|
||||
Mdm,
|
||||
System,
|
||||
SessionFlags,
|
||||
User,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<PathBuf>,
|
||||
#[serde(default)]
|
||||
pub network_access: bool,
|
||||
#[serde(default)]
|
||||
pub exclude_tmpdir_env_var: bool,
|
||||
#[serde(default)]
|
||||
pub exclude_slash_tmp: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ToolsV2 {
|
||||
#[serde(alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
pub view_image: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProfileV2 {
|
||||
pub model: Option<String>,
|
||||
pub model_provider: Option<String>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct Config {
|
||||
pub model: Option<String>,
|
||||
pub review_model: Option<String>,
|
||||
pub model_context_window: Option<i64>,
|
||||
pub model_auto_compact_token_limit: Option<i64>,
|
||||
pub model_provider: Option<String>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
|
||||
pub forced_chatgpt_workspace_id: Option<String>,
|
||||
pub forced_login_method: Option<ForcedLoginMethod>,
|
||||
pub tools: Option<ToolsV2>,
|
||||
pub profile: Option<String>,
|
||||
#[serde(default)]
|
||||
pub profiles: HashMap<String, ProfileV2>,
|
||||
pub instructions: Option<String>,
|
||||
pub developer_instructions: Option<String>,
|
||||
pub compact_prompt: Option<String>,
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigLayerMetadata {
|
||||
pub name: ConfigLayerName,
|
||||
pub source: String,
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigLayer {
|
||||
pub name: ConfigLayerName,
|
||||
pub source: String,
|
||||
pub version: String,
|
||||
pub config: JsonValue,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum MergeStrategy {
|
||||
Replace,
|
||||
Upsert,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum WriteStatus {
|
||||
Ok,
|
||||
OkOverridden,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct OverriddenMetadata {
|
||||
pub message: String,
|
||||
pub overriding_layer: ConfigLayerMetadata,
|
||||
pub effective_value: JsonValue,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigWriteResponse {
|
||||
pub status: WriteStatus,
|
||||
pub version: String,
|
||||
/// Canonical path to the config file that was written.
|
||||
pub file_path: String,
|
||||
pub overridden_metadata: Option<OverriddenMetadata>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ConfigWriteErrorCode {
|
||||
ConfigLayerReadonly,
|
||||
ConfigVersionConflict,
|
||||
ConfigValidationError,
|
||||
ConfigPathNotFound,
|
||||
ConfigSchemaUnknownKey,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigReadParams {
|
||||
#[serde(default)]
|
||||
pub include_layers: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigReadResponse {
|
||||
pub config: Config,
|
||||
pub origins: HashMap<String, ConfigLayerMetadata>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub layers: Option<Vec<ConfigLayer>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigValueWriteParams {
|
||||
pub key_path: String,
|
||||
pub value: JsonValue,
|
||||
pub merge_strategy: MergeStrategy,
|
||||
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
|
||||
pub file_path: Option<String>,
|
||||
pub expected_version: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigBatchWriteParams {
|
||||
pub edits: Vec<ConfigEdit>,
|
||||
/// Path to the config file to write; defaults to the user's `config.toml` when omitted.
|
||||
pub file_path: Option<String>,
|
||||
pub expected_version: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ConfigEdit {
|
||||
pub key_path: String,
|
||||
pub value: JsonValue,
|
||||
pub merge_strategy: MergeStrategy,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ApprovalDecision {
|
||||
Accept,
|
||||
/// Approve and remember the approval for the session.
|
||||
AcceptForSession,
|
||||
AcceptWithExecpolicyAmendment {
|
||||
execpolicy_amendment: ExecPolicyAmendment,
|
||||
},
|
||||
Decline,
|
||||
Cancel,
|
||||
}
|
||||
@@ -209,28 +474,23 @@ impl From<codex_protocol::protocol::SandboxPolicy> for SandboxPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SandboxCommandAssessment {
|
||||
pub description: String,
|
||||
pub risk_level: CommandRiskLevel,
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(transparent)]
|
||||
#[ts(type = "Array<string>", export_to = "v2/")]
|
||||
pub struct ExecPolicyAmendment {
|
||||
pub command: Vec<String>,
|
||||
}
|
||||
|
||||
impl SandboxCommandAssessment {
|
||||
pub fn into_core(self) -> CoreSandboxCommandAssessment {
|
||||
CoreSandboxCommandAssessment {
|
||||
description: self.description,
|
||||
risk_level: self.risk_level.to_core(),
|
||||
}
|
||||
impl ExecPolicyAmendment {
|
||||
pub fn into_core(self) -> CoreExecPolicyAmendment {
|
||||
CoreExecPolicyAmendment::new(self.command)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreSandboxCommandAssessment> for SandboxCommandAssessment {
|
||||
fn from(value: CoreSandboxCommandAssessment) -> Self {
|
||||
impl From<CoreExecPolicyAmendment> for ExecPolicyAmendment {
|
||||
fn from(value: CoreExecPolicyAmendment) -> Self {
|
||||
Self {
|
||||
description: value.description,
|
||||
risk_level: CommandRiskLevel::from(value.risk_level),
|
||||
command: value.command().to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -259,6 +519,56 @@ pub enum CommandAction {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase", export_to = "v2/")]
|
||||
#[derive(Default)]
|
||||
pub enum SessionSource {
|
||||
Cli,
|
||||
#[serde(rename = "vscode")]
|
||||
#[ts(rename = "vscode")]
|
||||
#[default]
|
||||
VsCode,
|
||||
Exec,
|
||||
AppServer,
|
||||
#[serde(other)]
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl From<CoreSessionSource> for SessionSource {
|
||||
fn from(value: CoreSessionSource) -> Self {
|
||||
match value {
|
||||
CoreSessionSource::Cli => SessionSource::Cli,
|
||||
CoreSessionSource::VSCode => SessionSource::VsCode,
|
||||
CoreSessionSource::Exec => SessionSource::Exec,
|
||||
CoreSessionSource::Mcp => SessionSource::AppServer,
|
||||
CoreSessionSource::SubAgent(_) => SessionSource::Unknown,
|
||||
CoreSessionSource::Unknown => SessionSource::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SessionSource> for CoreSessionSource {
|
||||
fn from(value: SessionSource) -> Self {
|
||||
match value {
|
||||
SessionSource::Cli => CoreSessionSource::Cli,
|
||||
SessionSource::VsCode => CoreSessionSource::VSCode,
|
||||
SessionSource::Exec => CoreSessionSource::Exec,
|
||||
SessionSource::AppServer => CoreSessionSource::Mcp,
|
||||
SessionSource::Unknown => CoreSessionSource::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct GitInfo {
|
||||
pub sha: Option<String>,
|
||||
pub branch: Option<String>,
|
||||
pub origin_url: Option<String>,
|
||||
}
|
||||
|
||||
impl CommandAction {
|
||||
pub fn into_core(self) -> CoreParsedCommand {
|
||||
match self {
|
||||
@@ -358,10 +668,21 @@ pub struct CancelLoginAccountParams {
|
||||
pub login_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum CancelLoginAccountStatus {
|
||||
Canceled,
|
||||
NotFound,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CancelLoginAccountResponse {}
|
||||
pub struct CancelLoginAccountResponse {
|
||||
pub status: CancelLoginAccountStatus,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -433,13 +754,64 @@ pub struct ModelListResponse {
|
||||
pub next_cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ListMcpServersParams {
|
||||
/// Opaque pagination cursor returned by a previous call.
|
||||
pub cursor: Option<String>,
|
||||
/// Optional page size; defaults to a server-defined value.
|
||||
pub limit: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServer {
|
||||
pub name: String,
|
||||
pub tools: std::collections::HashMap<String, McpTool>,
|
||||
pub resources: Vec<McpResource>,
|
||||
pub resource_templates: Vec<McpResourceTemplate>,
|
||||
pub auth_status: McpAuthStatus,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ListMcpServersResponse {
|
||||
pub data: Vec<McpServer>,
|
||||
/// Opaque cursor to pass to the next call to continue after the last item.
|
||||
/// If None, there are no more items to return.
|
||||
pub next_cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerOauthLoginParams {
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub scopes: Option<Vec<String>>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub timeout_secs: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerOauthLoginResponse {
|
||||
pub authorization_url: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct FeedbackUploadParams {
|
||||
pub classification: String,
|
||||
pub reason: Option<String>,
|
||||
pub conversation_id: Option<ConversationId>,
|
||||
pub thread_id: Option<String>,
|
||||
pub include_logs: bool,
|
||||
}
|
||||
|
||||
@@ -450,6 +822,26 @@ pub struct FeedbackUploadResponse {
|
||||
pub thread_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecParams {
|
||||
pub command: Vec<String>,
|
||||
#[ts(type = "number | null")]
|
||||
pub timeout_ms: Option<i64>,
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub sandbox_policy: Option<SandboxPolicy>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecResponse {
|
||||
pub exit_code: i32,
|
||||
pub stdout: String,
|
||||
pub stderr: String,
|
||||
}
|
||||
|
||||
// === Threads, Turns, and Items ===
|
||||
// Thread APIs
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
@@ -581,11 +973,21 @@ pub struct Thread {
|
||||
pub id: String,
|
||||
/// Usually the first user message in the thread, if available.
|
||||
pub preview: String,
|
||||
/// Model provider used for this thread (for example, 'openai').
|
||||
pub model_provider: String,
|
||||
/// Unix timestamp (in seconds) when the thread was created.
|
||||
#[ts(type = "number")]
|
||||
pub created_at: i64,
|
||||
/// [UNSTABLE] Path to the thread on disk.
|
||||
pub path: PathBuf,
|
||||
/// Working directory captured for the thread.
|
||||
pub cwd: PathBuf,
|
||||
/// Version of the CLI that created the thread.
|
||||
pub cli_version: String,
|
||||
/// Origin of the thread (CLI, VSCode, codex exec, codex app-server, etc.).
|
||||
pub source: SessionSource,
|
||||
/// Optional Git metadata captured when the thread was created.
|
||||
pub git_info: Option<GitInfo>,
|
||||
/// Only populated on a `thread/resume` response.
|
||||
/// For all other responses and notifications returning a Thread,
|
||||
/// the turns field will be an empty list.
|
||||
@@ -599,6 +1001,63 @@ pub struct AccountUpdatedNotification {
|
||||
pub auth_mode: Option<AuthMode>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ThreadTokenUsageUpdatedNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub token_usage: ThreadTokenUsage,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ThreadTokenUsage {
|
||||
pub total: TokenUsageBreakdown,
|
||||
pub last: TokenUsageBreakdown,
|
||||
#[ts(type = "number | null")]
|
||||
pub model_context_window: Option<i64>,
|
||||
}
|
||||
|
||||
impl From<CoreTokenUsageInfo> for ThreadTokenUsage {
|
||||
fn from(value: CoreTokenUsageInfo) -> Self {
|
||||
Self {
|
||||
total: value.total_token_usage.into(),
|
||||
last: value.last_token_usage.into(),
|
||||
model_context_window: value.model_context_window,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TokenUsageBreakdown {
|
||||
#[ts(type = "number")]
|
||||
pub total_tokens: i64,
|
||||
#[ts(type = "number")]
|
||||
pub input_tokens: i64,
|
||||
#[ts(type = "number")]
|
||||
pub cached_input_tokens: i64,
|
||||
#[ts(type = "number")]
|
||||
pub output_tokens: i64,
|
||||
#[ts(type = "number")]
|
||||
pub reasoning_output_tokens: i64,
|
||||
}
|
||||
|
||||
impl From<CoreTokenUsage> for TokenUsageBreakdown {
|
||||
fn from(value: CoreTokenUsage) -> Self {
|
||||
Self {
|
||||
total_tokens: value.total_tokens,
|
||||
input_tokens: value.input_tokens,
|
||||
cached_input_tokens: value.cached_input_tokens,
|
||||
output_tokens: value.output_tokens,
|
||||
reasoning_output_tokens: value.reasoning_output_tokens,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -608,8 +1067,9 @@ pub struct Turn {
|
||||
/// For all other responses and notifications returning a Turn,
|
||||
/// the items field will be an empty list.
|
||||
pub items: Vec<ThreadItem>,
|
||||
#[serde(flatten)]
|
||||
pub status: TurnStatus,
|
||||
/// Only populated when the Turn's status is failed.
|
||||
pub error: Option<TurnError>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS, Error)]
|
||||
@@ -626,15 +1086,20 @@ pub struct TurnError {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ErrorNotification {
|
||||
pub error: TurnError,
|
||||
// Set to true if the error is transient and the app-server process will automatically retry.
|
||||
// If true, this will not interrupt a turn.
|
||||
pub will_retry: bool,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(tag = "status", rename_all = "camelCase")]
|
||||
#[ts(tag = "status", export_to = "v2/")]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum TurnStatus {
|
||||
Completed,
|
||||
Interrupted,
|
||||
Failed { error: TurnError },
|
||||
Failed,
|
||||
InProgress,
|
||||
}
|
||||
|
||||
@@ -666,9 +1131,22 @@ pub struct ReviewStartParams {
|
||||
pub thread_id: String,
|
||||
pub target: ReviewTarget,
|
||||
|
||||
/// When true, also append the final review message to the original thread.
|
||||
/// Where to run the review: inline (default) on the current thread or
|
||||
/// detached on a new thread (returned in `reviewThreadId`).
|
||||
#[serde(default)]
|
||||
pub append_to_original_thread: bool,
|
||||
pub delivery: Option<ReviewDelivery>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ReviewStartResponse {
|
||||
pub turn: Turn,
|
||||
/// Identifies the thread where the review runs.
|
||||
///
|
||||
/// For inline reviews, this is the original thread id.
|
||||
/// For detached reviews, this is the id of the new review thread.
|
||||
pub review_thread_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -778,6 +1256,8 @@ pub enum ThreadItem {
|
||||
command: String,
|
||||
/// The command's working directory.
|
||||
cwd: PathBuf,
|
||||
/// Identifier for the underlying PTY process (when available).
|
||||
process_id: Option<String>,
|
||||
status: CommandExecutionStatus,
|
||||
/// A best-effort parsing of the command to understand the action(s) it will perform.
|
||||
/// This returns a list of CommandAction objects because a single shell command may
|
||||
@@ -788,6 +1268,7 @@ pub enum ThreadItem {
|
||||
/// The command's exit code.
|
||||
exit_code: Option<i32>,
|
||||
/// The duration of the command execution in milliseconds.
|
||||
#[ts(type = "number | null")]
|
||||
duration_ms: Option<i64>,
|
||||
},
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -807,19 +1288,22 @@ pub enum ThreadItem {
|
||||
arguments: JsonValue,
|
||||
result: Option<McpToolCallResult>,
|
||||
error: Option<McpToolCallError>,
|
||||
/// The duration of the MCP tool call in milliseconds.
|
||||
#[ts(type = "number | null")]
|
||||
duration_ms: Option<i64>,
|
||||
},
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
WebSearch { id: String, query: String },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
TodoList { id: String, items: Vec<TodoItem> },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
ImageView { id: String, path: String },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
CodeReview { id: String, review: String },
|
||||
EnteredReviewMode { id: String, review: String },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
ExitedReviewMode { id: String, review: String },
|
||||
}
|
||||
|
||||
impl From<CoreTurnItem> for ThreadItem {
|
||||
@@ -859,6 +1343,7 @@ pub enum CommandExecutionStatus {
|
||||
InProgress,
|
||||
Completed,
|
||||
Failed,
|
||||
Declined,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -914,15 +1399,6 @@ pub struct McpToolCallError {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TodoItem {
|
||||
pub id: String,
|
||||
pub text: String,
|
||||
pub completed: bool,
|
||||
}
|
||||
|
||||
// === Server Notifications ===
|
||||
// Thread/Turn lifecycle notifications and item progress events
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -936,6 +1412,7 @@ pub struct ThreadStartedNotification {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TurnStartedNotification {
|
||||
pub thread_id: String,
|
||||
pub turn: Turn,
|
||||
}
|
||||
|
||||
@@ -952,14 +1429,74 @@ pub struct Usage {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TurnCompletedNotification {
|
||||
pub thread_id: String,
|
||||
pub turn: Turn,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
/// Notification that the turn-level unified diff has changed.
|
||||
/// Contains the latest aggregated diff across all file changes in the turn.
|
||||
pub struct TurnDiffUpdatedNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub diff: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TurnPlanUpdatedNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub explanation: Option<String>,
|
||||
pub plan: Vec<TurnPlanStep>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TurnPlanStep {
|
||||
pub step: String,
|
||||
pub status: TurnPlanStepStatus,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum TurnPlanStepStatus {
|
||||
Pending,
|
||||
InProgress,
|
||||
Completed,
|
||||
}
|
||||
|
||||
impl From<CorePlanItemArg> for TurnPlanStep {
|
||||
fn from(value: CorePlanItemArg) -> Self {
|
||||
Self {
|
||||
step: value.step,
|
||||
status: value.status.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CorePlanStepStatus> for TurnPlanStepStatus {
|
||||
fn from(value: CorePlanStepStatus) -> Self {
|
||||
match value {
|
||||
CorePlanStepStatus::Pending => Self::Pending,
|
||||
CorePlanStepStatus::InProgress => Self::InProgress,
|
||||
CorePlanStepStatus::Completed => Self::Completed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ItemStartedNotification {
|
||||
pub item: ThreadItem,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -967,6 +1504,8 @@ pub struct ItemStartedNotification {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ItemCompletedNotification {
|
||||
pub item: ThreadItem,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
}
|
||||
|
||||
// Item-specific progress notifications
|
||||
@@ -974,6 +1513,8 @@ pub struct ItemCompletedNotification {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct AgentMessageDeltaNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub delta: String,
|
||||
}
|
||||
@@ -982,8 +1523,11 @@ pub struct AgentMessageDeltaNotification {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ReasoningSummaryTextDeltaNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub delta: String,
|
||||
#[ts(type = "number")]
|
||||
pub summary_index: i64,
|
||||
}
|
||||
|
||||
@@ -991,7 +1535,10 @@ pub struct ReasoningSummaryTextDeltaNotification {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ReasoningSummaryPartAddedNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
#[ts(type = "number")]
|
||||
pub summary_index: i64,
|
||||
}
|
||||
|
||||
@@ -999,15 +1546,41 @@ pub struct ReasoningSummaryPartAddedNotification {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ReasoningTextDeltaNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub delta: String,
|
||||
#[ts(type = "number")]
|
||||
pub content_index: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct TerminalInteractionNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub process_id: String,
|
||||
pub stdin: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecutionOutputDeltaNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub delta: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct FileChangeOutputDeltaNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub delta: String,
|
||||
}
|
||||
@@ -1016,10 +1589,23 @@ pub struct CommandExecutionOutputDeltaNotification {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpToolCallProgressNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
pub item_id: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerOauthLoginCompletedNotification {
|
||||
pub name: String,
|
||||
pub success: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional)]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -1029,6 +1615,14 @@ pub struct WindowsWorldWritableWarningNotification {
|
||||
pub failed_scan: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ContextCompactedNotification {
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -1038,17 +1632,8 @@ pub struct CommandExecutionRequestApprovalParams {
|
||||
pub item_id: String,
|
||||
/// Optional explanatory reason (e.g. request for network access).
|
||||
pub reason: Option<String>,
|
||||
/// Optional model-provided risk assessment describing the blocked command.
|
||||
pub risk: Option<SandboxCommandAssessment>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecutionRequestAcceptSettings {
|
||||
/// If true, automatically approve this command for the duration of the session.
|
||||
#[serde(default)]
|
||||
pub for_session: bool,
|
||||
/// Optional proposed execpolicy amendment to allow similar commands without prompting.
|
||||
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1056,10 +1641,6 @@ pub struct CommandExecutionRequestAcceptSettings {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecutionRequestApprovalResponse {
|
||||
pub decision: ApprovalDecision,
|
||||
/// Optional approval settings for when the decision is `accept`.
|
||||
/// Ignored if the decision is `decline` or `cancel`.
|
||||
#[serde(default)]
|
||||
pub accept_settings: Option<CommandExecutionRequestAcceptSettings>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1096,6 +1677,7 @@ pub struct RateLimitSnapshot {
|
||||
pub primary: Option<RateLimitWindow>,
|
||||
pub secondary: Option<RateLimitWindow>,
|
||||
pub credits: Option<CreditsSnapshot>,
|
||||
pub plan_type: Option<PlanType>,
|
||||
}
|
||||
|
||||
impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
|
||||
@@ -1104,6 +1686,7 @@ impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
|
||||
primary: value.primary.map(RateLimitWindow::from),
|
||||
secondary: value.secondary.map(RateLimitWindow::from),
|
||||
credits: value.credits.map(CreditsSnapshot::from),
|
||||
plan_type: value.plan_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1113,7 +1696,9 @@ impl From<CoreRateLimitSnapshot> for RateLimitSnapshot {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct RateLimitWindow {
|
||||
pub used_percent: i32,
|
||||
#[ts(type = "number | null")]
|
||||
pub window_duration_mins: Option<i64>,
|
||||
#[ts(type = "number | null")]
|
||||
pub resets_at: Option<i64>,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
[package]
|
||||
name = "codex-app-server-test-client"
|
||||
version = { workspace = true }
|
||||
edition = "2024"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -21,7 +21,6 @@ use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::CommandExecutionRequestAcceptSettings;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
@@ -101,6 +100,15 @@ enum CliCommand {
|
||||
/// Start a V2 turn that should not elicit an ExecCommand approval.
|
||||
#[command(name = "no-trigger-cmd-approval")]
|
||||
NoTriggerCmdApproval,
|
||||
/// Send two sequential V2 turns in the same thread to test follow-up behavior.
|
||||
SendFollowUpV2 {
|
||||
/// Initial user message for the first turn.
|
||||
#[arg()]
|
||||
first_message: String,
|
||||
/// Follow-up user message for the second turn.
|
||||
#[arg()]
|
||||
follow_up_message: String,
|
||||
},
|
||||
/// Trigger the ChatGPT login flow and wait for completion.
|
||||
TestLogin,
|
||||
/// Fetch the current account rate limits from the Codex app-server.
|
||||
@@ -120,6 +128,10 @@ fn main() -> Result<()> {
|
||||
trigger_patch_approval(codex_bin, user_message)
|
||||
}
|
||||
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(codex_bin),
|
||||
CliCommand::SendFollowUpV2 {
|
||||
first_message,
|
||||
follow_up_message,
|
||||
} => send_follow_up_v2(codex_bin, first_message, follow_up_message),
|
||||
CliCommand::TestLogin => test_login(codex_bin),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(codex_bin),
|
||||
}
|
||||
@@ -209,6 +221,44 @@ fn send_message_v2_with_policies(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send_follow_up_v2(
|
||||
codex_bin: String,
|
||||
first_message: String,
|
||||
follow_up_message: String,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
|
||||
let thread_response = client.thread_start(ThreadStartParams::default())?;
|
||||
println!("< thread/start response: {thread_response:?}");
|
||||
|
||||
let first_turn_params = TurnStartParams {
|
||||
thread_id: thread_response.thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: first_message,
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let first_turn_response = client.turn_start(first_turn_params)?;
|
||||
println!("< turn/start response (initial): {first_turn_response:?}");
|
||||
client.stream_turn(&thread_response.thread.id, &first_turn_response.turn.id)?;
|
||||
|
||||
let follow_up_params = TurnStartParams {
|
||||
thread_id: thread_response.thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: follow_up_message,
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let follow_up_response = client.turn_start(follow_up_params)?;
|
||||
println!("< turn/start response (follow-up): {follow_up_response:?}");
|
||||
client.stream_turn(&thread_response.thread.id, &follow_up_response.turn.id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_login(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
@@ -503,6 +553,10 @@ impl CodexClient {
|
||||
print!("{}", delta.delta);
|
||||
std::io::stdout().flush().ok();
|
||||
}
|
||||
ServerNotification::TerminalInteraction(delta) => {
|
||||
println!("[stdin sent: {}]", delta.stdin);
|
||||
std::io::stdout().flush().ok();
|
||||
}
|
||||
ServerNotification::ItemStarted(payload) => {
|
||||
println!("\n< item started: {:?}", payload.item);
|
||||
}
|
||||
@@ -512,7 +566,9 @@ impl CodexClient {
|
||||
ServerNotification::TurnCompleted(payload) => {
|
||||
if payload.turn.id == turn_id {
|
||||
println!("\n< turn/completed notification: {:?}", payload.turn.status);
|
||||
if let TurnStatus::Failed { error } = &payload.turn.status {
|
||||
if payload.turn.status == TurnStatus::Failed
|
||||
&& let Some(error) = payload.turn.error
|
||||
{
|
||||
println!("[turn error] {}", error.message);
|
||||
}
|
||||
break;
|
||||
@@ -700,7 +756,7 @@ impl CodexClient {
|
||||
turn_id,
|
||||
item_id,
|
||||
reason,
|
||||
risk,
|
||||
proposed_execpolicy_amendment,
|
||||
} = params;
|
||||
|
||||
println!(
|
||||
@@ -709,13 +765,12 @@ impl CodexClient {
|
||||
if let Some(reason) = reason.as_deref() {
|
||||
println!("< reason: {reason}");
|
||||
}
|
||||
if let Some(risk) = risk.as_ref() {
|
||||
println!("< risk assessment: {risk:?}");
|
||||
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
|
||||
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
|
||||
}
|
||||
|
||||
let response = CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Accept,
|
||||
accept_settings: Some(CommandExecutionRequestAcceptSettings { for_session: false }),
|
||||
};
|
||||
self.send_server_request_response(request_id, &response)?;
|
||||
println!("< approved commandExecution request for item {item_id}");
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
[package]
|
||||
edition = "2024"
|
||||
name = "codex-app-server"
|
||||
version = { workspace = true }
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "codex-app-server"
|
||||
@@ -25,10 +26,16 @@ codex-login = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-feedback = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-utils-json-to-toml = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
toml_edit = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
@@ -50,6 +57,5 @@ mcp-types = { workspace = true }
|
||||
os_info = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
# codex-app-server
|
||||
|
||||
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
|
||||
`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt).
|
||||
|
||||
## Table of Contents
|
||||
- [Protocol](#protocol)
|
||||
- [Message Schema](#message-schema)
|
||||
- [Core Primitives](#core-primitives)
|
||||
- [Lifecycle Overview](#lifecycle-overview)
|
||||
- [Initialization](#initialization)
|
||||
- [Core primitives](#core-primitives)
|
||||
- [Thread & turn endpoints](#thread--turn-endpoints)
|
||||
- [API Overview](#api-overview)
|
||||
- [Events](#events)
|
||||
- [Auth endpoints](#auth-endpoints)
|
||||
- [Events (work-in-progress)](#v2-streaming-events-work-in-progress)
|
||||
|
||||
## Protocol
|
||||
|
||||
@@ -25,6 +25,15 @@ codex app-server generate-ts --out DIR
|
||||
codex app-server generate-json-schema --out DIR
|
||||
```
|
||||
|
||||
## Core Primitives
|
||||
|
||||
The API exposes three top level primitives representing an interaction between a user and Codex:
|
||||
- **Thread**: A conversation between a user and the Codex agent. Each thread contains multiple turns.
|
||||
- **Turn**: One turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
|
||||
- **Item**: Represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations. Example items include user message, agent reasoning, agent message, shell command, file edit, etc.
|
||||
|
||||
Use the thread APIs to create, list, or archive conversations. Drive a conversation with turn APIs and stream progress via turn notifications.
|
||||
|
||||
## Lifecycle Overview
|
||||
|
||||
- Initialize once: Immediately after launching the codex app-server process, send an `initialize` request with your client metadata, then emit an `initialized` notification. Any other request before this handshake gets rejected.
|
||||
@@ -37,37 +46,34 @@ codex app-server generate-json-schema --out DIR
|
||||
|
||||
Clients must send a single `initialize` request before invoking any other method, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls receive an `"Already initialized"` error.
|
||||
|
||||
Example:
|
||||
Applications building on top of `codex app-server` should identify themselves via the `clientInfo` parameter.
|
||||
|
||||
Example (from OpenAI's official VSCode extension):
|
||||
```json
|
||||
{ "method": "initialize", "id": 0, "params": {
|
||||
"clientInfo": { "name": "codex-vscode", "title": "Codex VS Code Extension", "version": "0.1.0" }
|
||||
} }
|
||||
{ "id": 0, "result": { "userAgent": "codex-app-server/0.1.0 codex-vscode/0.1.0" } }
|
||||
{ "method": "initialized" }
|
||||
```
|
||||
|
||||
## Core primitives
|
||||
|
||||
We have 3 top level primitives:
|
||||
- Thread - a conversation between the Codex agent and a user. Each thread contains multiple turns.
|
||||
- Turn - one turn of the conversation, typically starting with a user message and finishing with an agent message. Each turn contains multiple items.
|
||||
- Item - represents user inputs and agent outputs as part of the turn, persisted and used as the context for future conversations.
|
||||
|
||||
## Thread & turn endpoints
|
||||
|
||||
The JSON-RPC API exposes dedicated methods for managing Codex conversations. Threads store long-lived conversation metadata, and turns store the per-message exchange (input → Codex output, including streamed items). Use the thread APIs to create, list, or archive sessions, then drive the conversation with turn APIs and notifications.
|
||||
|
||||
### Quick reference
|
||||
## API Overview
|
||||
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
|
||||
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
|
||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
|
||||
- `thread/archive` — move a thread’s rollout file into the archived directory; returns `{}` on success.
|
||||
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
|
||||
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
|
||||
- `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits a `item/completed` notification with a `codeReview` item when results are ready.
|
||||
- `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `model/list` — list available models (with reasoning effort options).
|
||||
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
|
||||
- `mcpServers/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
|
||||
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering.
|
||||
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
|
||||
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
|
||||
|
||||
### 1) Start or resume a thread
|
||||
### Example: Start or resume a thread
|
||||
|
||||
Start a fresh thread when you need a new Codex conversation.
|
||||
|
||||
@@ -98,7 +104,7 @@ To continue a stored session, call `thread/resume` with the `thread.id` you prev
|
||||
{ "id": 11, "result": { "thread": { "id": "thr_123", … } } }
|
||||
```
|
||||
|
||||
### 2) List threads (pagination & filters)
|
||||
### Example: List threads (with pagination & filters)
|
||||
|
||||
`thread/list` lets you render a history UI. Pass any combination of:
|
||||
- `cursor` — opaque string from a prior response; omit for the first page.
|
||||
@@ -123,7 +129,7 @@ Example:
|
||||
|
||||
When `nextCursor` is `null`, you’ve reached the final page.
|
||||
|
||||
### 3) Archive a thread
|
||||
### Example: Archive a thread
|
||||
|
||||
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
|
||||
|
||||
@@ -134,7 +140,7 @@ Use `thread/archive` to move the persisted rollout (stored as a JSONL file on di
|
||||
|
||||
An archived thread will not appear in future calls to `thread/list`.
|
||||
|
||||
### 4) Start a turn (send user input)
|
||||
### Example: Start a turn (send user input)
|
||||
|
||||
Turns attach user input (text or images) to a thread and trigger Codex generation. The `input` field is a list of discriminated unions:
|
||||
|
||||
@@ -168,7 +174,7 @@ You can optionally specify config overrides on the new turn. If specified, these
|
||||
} } }
|
||||
```
|
||||
|
||||
### 5) Interrupt an active turn
|
||||
### Example: Interrupt an active turn
|
||||
|
||||
You can cancel a running Turn with `turn/interrupt`.
|
||||
|
||||
@@ -182,7 +188,7 @@ You can cancel a running Turn with `turn/interrupt`.
|
||||
|
||||
The server requests cancellations for running subprocesses, then emits a `turn/completed` event with `status: "interrupted"`. Rely on the `turn/completed` to know when Codex-side cleanup is done.
|
||||
|
||||
### 6) Request a code review
|
||||
### Example: Request a code review
|
||||
|
||||
Use `review/start` to run Codex’s reviewer on the currently checked-out project. The request takes the thread id plus a `target` describing what should be reviewed:
|
||||
|
||||
@@ -190,55 +196,171 @@ Use `review/start` to run Codex’s reviewer on the currently checked-out projec
|
||||
- `{"type":"baseBranch","branch":"main"}` — diff against the provided branch’s upstream (see prompt for the exact `git merge-base`/`git diff` instructions Codex will run).
|
||||
- `{"type":"commit","sha":"abc1234","title":"Optional subject"}` — review a specific commit.
|
||||
- `{"type":"custom","instructions":"Free-form reviewer instructions"}` — fallback prompt equivalent to the legacy manual review request.
|
||||
- `appendToOriginalThread` (bool, default `false`) — when `true`, Codex also records a final assistant-style message with the review summary in the original thread. When `false`, only the `codeReview` item is emitted for the review run and no extra message is added to the original thread.
|
||||
- `delivery` (`"inline"` or `"detached"`, default `"inline"`) — where the review runs:
|
||||
- `"inline"`: run the review as a new turn on the existing thread. The response’s `reviewThreadId` equals the original `threadId`, and no new `thread/started` notification is emitted.
|
||||
- `"detached"`: fork a new review thread from the parent conversation and run the review there. The response’s `reviewThreadId` is the id of this new review thread, and the server emits a `thread/started` notification for it before streaming review items.
|
||||
|
||||
Example request/response:
|
||||
|
||||
```json
|
||||
{ "method": "review/start", "id": 40, "params": {
|
||||
"threadId": "thr_123",
|
||||
"appendToOriginalThread": true,
|
||||
"delivery": "inline",
|
||||
"target": { "type": "commit", "sha": "1234567deadbeef", "title": "Polish tui colors" }
|
||||
} }
|
||||
{ "id": 40, "result": { "turn": {
|
||||
"id": "turn_900",
|
||||
"status": "inProgress",
|
||||
"items": [
|
||||
{ "type": "userMessage", "id": "turn_900", "content": [ { "type": "text", "text": "Review commit 1234567: Polish tui colors" } ] }
|
||||
],
|
||||
"error": null
|
||||
} } }
|
||||
{ "id": 40, "result": {
|
||||
"turn": {
|
||||
"id": "turn_900",
|
||||
"status": "inProgress",
|
||||
"items": [
|
||||
{ "type": "userMessage", "id": "turn_900", "content": [ { "type": "text", "text": "Review commit 1234567: Polish tui colors" } ] }
|
||||
],
|
||||
"error": null
|
||||
},
|
||||
"reviewThreadId": "thr_123"
|
||||
} }
|
||||
```
|
||||
|
||||
For a detached review, use `"delivery": "detached"`. The response is the same shape, but `reviewThreadId` will be the id of the new review thread (different from the original `threadId`). The server also emits a `thread/started` notification for that new thread before streaming the review turn.
|
||||
|
||||
Codex streams the usual `turn/started` notification followed by an `item/started`
|
||||
with the same `codeReview` item id so clients can show progress:
|
||||
with an `enteredReviewMode` item so clients can show progress:
|
||||
|
||||
```json
|
||||
{ "method": "item/started", "params": { "item": {
|
||||
"type": "codeReview",
|
||||
"type": "enteredReviewMode",
|
||||
"id": "turn_900",
|
||||
"review": "current changes"
|
||||
} } }
|
||||
```
|
||||
|
||||
When the reviewer finishes, the server emits `item/completed` containing the same
|
||||
`codeReview` item with the final review text:
|
||||
When the reviewer finishes, the server emits `item/started` and `item/completed`
|
||||
containing an `exitedReviewMode` item with the final review text:
|
||||
|
||||
```json
|
||||
{ "method": "item/completed", "params": { "item": {
|
||||
"type": "codeReview",
|
||||
"type": "exitedReviewMode",
|
||||
"id": "turn_900",
|
||||
"review": "Looks solid overall...\n\n- Prefer Stylize helpers — app.rs:10-20\n ..."
|
||||
} } }
|
||||
```
|
||||
|
||||
The `review` string is plain text that already bundles the overall explanation plus a bullet list for each structured finding (matching `ThreadItem::CodeReview` in the generated schema). Use this notification to render the reviewer output in your client.
|
||||
The `review` string is plain text that already bundles the overall explanation plus a bullet list for each structured finding (matching `ThreadItem::ExitedReviewMode` in the generated schema). Use this notification to render the reviewer output in your client.
|
||||
|
||||
### Example: One-off command execution
|
||||
|
||||
Run a standalone command (argv vector) in the server’s sandbox without creating a thread or turn:
|
||||
|
||||
```json
|
||||
{ "method": "command/exec", "id": 32, "params": {
|
||||
"command": ["ls", "-la"],
|
||||
"cwd": "/Users/me/project", // optional; defaults to server cwd
|
||||
"sandboxPolicy": { "type": "workspaceWrite" }, // optional; defaults to user config
|
||||
"timeoutMs": 10000 // optional; ms timeout; defaults to server timeout
|
||||
} }
|
||||
{ "id": 32, "result": { "exitCode": 0, "stdout": "...", "stderr": "" } }
|
||||
```
|
||||
|
||||
Notes:
|
||||
- Empty `command` arrays are rejected.
|
||||
- `sandboxPolicy` accepts the same shape used by `turn/start` (e.g., `dangerFullAccess`, `readOnly`, `workspaceWrite` with flags).
|
||||
- When omitted, `timeoutMs` falls back to the server default.
|
||||
|
||||
## Events
|
||||
|
||||
Event notifications are the server-initiated event stream for thread lifecycles, turn lifecycles, and the items within them. After you start or resume a thread, keep reading stdout for `thread/started`, `turn/*`, and `item/*` notifications.
|
||||
|
||||
### Turn events
|
||||
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
- `turn/diff/updated` — `{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items.
|
||||
- `turn/plan/updated` — `{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
|
||||
|
||||
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
|
||||
|
||||
#### Items
|
||||
|
||||
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
|
||||
- `userMessage` — `{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
|
||||
- `agentMessage` — `{id, text}` containing the accumulated agent reply.
|
||||
- `reasoning` — `{id, summary, content}` where `summary` holds streamed reasoning summaries (applicable for most OpenAI models) and `content` holds raw reasoning blocks (applicable for e.g. open source models).
|
||||
- `commandExecution` — `{id, command, cwd, status, commandActions, aggregatedOutput?, exitCode?, durationMs?}` for sandboxed commands; `status` is `inProgress`, `completed`, `failed`, or `declined`.
|
||||
- `fileChange` — `{id, changes, status}` describing proposed edits; `changes` list `{path, kind, diff}` and `status` is `inProgress`, `completed`, `failed`, or `declined`.
|
||||
- `mcpToolCall` — `{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
|
||||
- `webSearch` — `{id, query}` for a web search request issued by the agent.
|
||||
- `imageView` — `{id, path}` emitted when the agent invokes the image viewer tool.
|
||||
- `enteredReviewMode` — `{id, review}` sent when the reviewer starts; `review` is a short user-facing label such as `"current changes"` or the requested target description.
|
||||
- `exitedReviewMode` — `{id, review}` emitted when the reviewer finishes; `review` is the full plain-text review (usually, overall notes plus bullet point findings).
|
||||
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically.
|
||||
|
||||
All items emit two shared lifecycle events:
|
||||
- `item/started` — emits the full `item` when a new unit of work begins so the UI can render it immediately; the `item.id` in this payload matches the `itemId` used by deltas.
|
||||
- `item/completed` — sends the final `item` once that work finishes (e.g., after a tool call or message completes); treat this as the authoritative state.
|
||||
|
||||
There are additional item-specific events:
|
||||
#### agentMessage
|
||||
- `item/agentMessage/delta` — appends streamed text for the agent message; concatenate `delta` values for the same `itemId` in order to reconstruct the full reply.
|
||||
#### reasoning
|
||||
- `item/reasoning/summaryTextDelta` — streams readable reasoning summaries; `summaryIndex` increments when a new summary section opens.
|
||||
- `item/reasoning/summaryPartAdded` — marks the boundary between reasoning summary sections for an `itemId`; subsequent `summaryTextDelta` entries share the same `summaryIndex`.
|
||||
- `item/reasoning/textDelta` — streams raw reasoning text (only applicable for e.g. open source models); use `contentIndex` to group deltas that belong together before showing them in the UI.
|
||||
#### commandExecution
|
||||
- `item/commandExecution/outputDelta` — streams stdout/stderr for the command; append deltas in order to render live output alongside `aggregatedOutput` in the final item.
|
||||
Final `commandExecution` items include parsed `commandActions`, `status`, `exitCode`, and `durationMs` so the UI can summarize what ran and whether it succeeded.
|
||||
#### fileChange
|
||||
- `item/fileChange/outputDelta` - contains the tool call response of the underlying `apply_patch` tool call.
|
||||
|
||||
### Errors
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
- `ContextWindowExceeded`
|
||||
- `UsageLimitExceeded`
|
||||
- `HttpConnectionFailed { httpStatusCode? }`: upstream HTTP failures including 4xx/5xx
|
||||
- `ResponseStreamConnectionFailed { httpStatusCode? }`: failure to connect to the response SSE stream
|
||||
- `ResponseStreamDisconnected { httpStatusCode? }`: disconnect of the response SSE stream in the middle of a turn before completion
|
||||
- `ResponseTooManyFailedAttempts { httpStatusCode? }`
|
||||
- `BadRequest`
|
||||
- `Unauthorized`
|
||||
- `SandboxError`
|
||||
- `InternalServerError`
|
||||
- `Other`: all unclassified errors
|
||||
|
||||
When an upstream HTTP status is available (for example, from the Responses API or a provider), it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
|
||||
|
||||
## Approvals
|
||||
|
||||
Certain actions (shell commands or modifying files) may require explicit user approval depending on the user's config. When `turn/start` is used, the app-server drives an approval flow by sending a server-initiated JSON-RPC request to the client. The client must respond to tell Codex whether to proceed. UIs should present these requests inline with the active turn so users can review the proposed command or diff before choosing.
|
||||
|
||||
- Requests include `threadId` and `turnId`—use them to scope UI state to the active conversation.
|
||||
- Respond with a single `{ "decision": "accept" | "decline" }` payload (plus optional `acceptSettings` on command executions). The server resumes or declines the work and ends the item with `item/completed`.
|
||||
|
||||
### Command execution approvals
|
||||
|
||||
Order of messages:
|
||||
1. `item/started` — shows the pending `commandExecution` item with `command`, `cwd`, and other fields so you can render the proposed action.
|
||||
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason` or `risk`, plus `parsedCmd` for friendly display.
|
||||
3. Client response — `{ "decision": "accept", "acceptSettings": { "forSession": false } }` or `{ "decision": "decline" }`.
|
||||
4. `item/completed` — final `commandExecution` item with `status: "completed" | "failed" | "declined"` and execution output. Render this as the authoritative result.
|
||||
|
||||
### File change approvals
|
||||
|
||||
Order of messages:
|
||||
1. `item/started` — emits a `fileChange` item with `changes` (diff chunk summaries) and `status: "inProgress"`. Show the proposed edits and paths to the user.
|
||||
2. `item/fileChange/requestApproval` (request) — includes `itemId`, `threadId`, `turnId`, and an optional `reason`.
|
||||
3. Client response — `{ "decision": "accept" }` or `{ "decision": "decline" }`.
|
||||
4. `item/completed` — returns the same `fileChange` item with `status` updated to `completed`, `failed`, or `declined` after the patch attempt. Rely on this to show success/failure and finalize the diff state in your UI.
|
||||
|
||||
UI guidance for IDEs: surface an approval dialog as soon as the request arrives. The turn will proceed after the server receives a response to the approval request. The terminal `item/completed` notification will be sent with the appropriate status.
|
||||
|
||||
## Auth endpoints
|
||||
|
||||
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
|
||||
|
||||
### Quick reference
|
||||
### API Overview
|
||||
- `account/read` — fetch current account info; optionally refresh tokens.
|
||||
- `account/login/start` — begin login (`apiKey` or `chatgpt`).
|
||||
- `account/login/completed` (notify) — emitted when a login attempt finishes (success or error).
|
||||
@@ -246,6 +368,8 @@ The JSON-RPC auth/account surface exposes request/response methods plus server-i
|
||||
- `account/logout` — sign out; triggers `account/updated`.
|
||||
- `account/updated` (notify) — emitted whenever auth mode changes (`authMode`: `apikey`, `chatgpt`, or `null`).
|
||||
- `account/rateLimits/read` — fetch ChatGPT rate limits; updates arrive via `account/rateLimits/updated` (notify).
|
||||
- `account/rateLimits/updated` (notify) — emitted whenever a user's ChatGPT rate limits change.
|
||||
- `mcpServer/oauthLogin/completed` (notify) — emitted after a `mcpServer/oauth/login` flow finishes for a server; payload includes `{ name, success, error? }`.
|
||||
|
||||
### 1) Check auth state
|
||||
|
||||
@@ -323,62 +447,3 @@ Field notes:
|
||||
- `usedPercent` is current usage within the OpenAI quota window.
|
||||
- `windowDurationMins` is the quota window length.
|
||||
- `resetsAt` is a Unix timestamp (seconds) for the next reset.
|
||||
|
||||
### Dev notes
|
||||
|
||||
- `codex app-server generate-ts --out <dir>` emits v2 types under `v2/`.
|
||||
- `codex app-server generate-json-schema --out <dir>` outputs `codex_app_server_protocol.schemas.json`.
|
||||
- See [“Authentication and authorization” in the config docs](../../docs/config.md#authentication-and-authorization) for configuration knobs.
|
||||
|
||||
|
||||
## Events (work-in-progress)
|
||||
|
||||
Event notifications are the server-initiated event stream for thread lifecycles, turn lifecycles, and the items within them. After you start or resume a thread, keep reading stdout for `thread/started`, `turn/*`, and `item/*` notifications.
|
||||
|
||||
### Turn events
|
||||
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` plus token `usage`), and clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
|
||||
Today both notifications carry an empty `items` array even when item events were streamed; rely on `item/*` notifications for the canonical item list until this is fixed.
|
||||
|
||||
#### Errors
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
- `ContextWindowExceeded`
|
||||
- `UsageLimitExceeded`
|
||||
- `HttpConnectionFailed { httpStatusCode? }`: upstream HTTP failures including 4xx/5xx
|
||||
- `ResponseStreamConnectionFailed { httpStatusCode? }`: failure to connect to the response SSE stream
|
||||
- `ResponseStreamDisconnected { httpStatusCode? }`: disconnect of the response SSE stream in the middle of a turn before completion
|
||||
- `ResponseTooManyFailedAttempts { httpStatusCode? }`
|
||||
- `BadRequest`
|
||||
- `Unauthorized`
|
||||
- `SandboxError`
|
||||
- `InternalServerError`
|
||||
- `Other`: all unclassified errors
|
||||
|
||||
When an upstream HTTP status is available (for example, from the Responses API or a provider), it is forwarded in `httpStatusCode` on the relevant `codexErrorInfo` variant.
|
||||
|
||||
#### Thread items
|
||||
|
||||
`ThreadItem` is the tagged union carried in turn responses and `item/*` notifications. Currently we support events for the following items:
|
||||
- `userMessage` — `{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
|
||||
- `agentMessage` — `{id, text}` containing the accumulated agent reply.
|
||||
- `reasoning` — `{id, summary, content}` where `summary` holds streamed reasoning summaries (applicable for most OpenAI models) and `content` holds raw reasoning blocks (applicable for e.g. open source models).
|
||||
- `mcpToolCall` — `{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
|
||||
- `webSearch` — `{id, query}` for a web search request issued by the agent.
|
||||
|
||||
All items emit two shared lifecycle events:
|
||||
- `item/started` — emits the full `item` when a new unit of work begins so the UI can render it immediately; the `item.id` in this payload matches the `itemId` used by deltas.
|
||||
- `item/completed` — sends the final `item` once that work finishes (e.g., after a tool call or message completes); treat this as the authoritative state.
|
||||
|
||||
There are additional item-specific events:
|
||||
#### agentMessage
|
||||
- `item/agentMessage/delta` — appends streamed text for the agent message; concatenate `delta` values for the same `itemId` in order to reconstruct the full reply.
|
||||
#### reasoning
|
||||
- `item/reasoning/summaryTextDelta` — streams readable reasoning summaries; `summaryIndex` increments when a new summary section opens.
|
||||
- `item/reasoning/summaryPartAdded` — marks the boundary between reasoning summary sections for an `itemId`; subsequent `summaryTextDelta` entries share the same `summaryIndex`.
|
||||
- `item/reasoning/textDelta` — streams raw reasoning text (only applicable for e.g. open source models); use `contentIndex` to group deltas that belong together before showing them in the UI.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1144
codex-rs/app-server/src/config_api.rs
Normal file
1144
codex-rs/app-server/src/config_api.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -18,6 +18,7 @@ use tokio::io::AsyncWriteExt;
|
||||
use tokio::io::BufReader;
|
||||
use tokio::io::{self};
|
||||
use tokio::sync::mpsc;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Level;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
@@ -30,6 +31,7 @@ use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
mod bespoke_event_handling;
|
||||
mod codex_message_processor;
|
||||
mod config_api;
|
||||
mod error_code;
|
||||
mod fuzzy_file_search;
|
||||
mod message_processor;
|
||||
@@ -80,11 +82,12 @@ pub async fn run_main(
|
||||
format!("error parsing -c overrides: {e}"),
|
||||
)
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
let config =
|
||||
Config::load_with_cli_overrides(cli_kv_overrides.clone(), ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
|
||||
let feedback = CodexFeedback::new();
|
||||
|
||||
@@ -121,10 +124,12 @@ pub async fn run_main(
|
||||
// Task: process incoming messages.
|
||||
let processor_handle = tokio::spawn({
|
||||
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
||||
let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone();
|
||||
let mut processor = MessageProcessor::new(
|
||||
outgoing_message_sender,
|
||||
codex_linux_sandbox_exe,
|
||||
std::sync::Arc::new(config),
|
||||
cli_overrides,
|
||||
feedback.clone(),
|
||||
);
|
||||
async move {
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::codex_message_processor::CodexMessageProcessor;
|
||||
use crate::config_api::ConfigApi;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::InitializeResponse;
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
@@ -18,11 +24,12 @@ use codex_core::default_client::USER_AGENT_SUFFIX;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use std::sync::Arc;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
pub(crate) struct MessageProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_message_processor: CodexMessageProcessor,
|
||||
config_api: ConfigApi,
|
||||
initialized: bool,
|
||||
}
|
||||
|
||||
@@ -33,6 +40,7 @@ impl MessageProcessor {
|
||||
outgoing: OutgoingMessageSender,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
feedback: CodexFeedback,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
@@ -50,13 +58,16 @@ impl MessageProcessor {
|
||||
conversation_manager,
|
||||
outgoing.clone(),
|
||||
codex_linux_sandbox_exe,
|
||||
config,
|
||||
Arc::clone(&config),
|
||||
cli_overrides.clone(),
|
||||
feedback,
|
||||
);
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides);
|
||||
|
||||
Self {
|
||||
outgoing,
|
||||
codex_message_processor,
|
||||
config_api,
|
||||
initialized: false,
|
||||
}
|
||||
}
|
||||
@@ -134,9 +145,20 @@ impl MessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
self.codex_message_processor
|
||||
.process_request(codex_request)
|
||||
.await;
|
||||
match codex_request {
|
||||
ClientRequest::ConfigRead { request_id, params } => {
|
||||
self.handle_config_read(request_id, params).await;
|
||||
}
|
||||
ClientRequest::ConfigValueWrite { request_id, params } => {
|
||||
self.handle_config_value_write(request_id, params).await;
|
||||
}
|
||||
ClientRequest::ConfigBatchWrite { request_id, params } => {
|
||||
self.handle_config_batch_write(request_id, params).await;
|
||||
}
|
||||
other => {
|
||||
self.codex_message_processor.process_request(other).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn process_notification(&self, notification: JSONRPCNotification) {
|
||||
@@ -156,4 +178,33 @@ impl MessageProcessor {
|
||||
pub(crate) fn process_error(&mut self, err: JSONRPCError) {
|
||||
tracing::error!("<- error: {:?}", err);
|
||||
}
|
||||
|
||||
async fn handle_config_read(&self, request_id: RequestId, params: ConfigReadParams) {
|
||||
match self.config_api.read(params).await {
|
||||
Ok(response) => self.outgoing.send_response(request_id, response).await,
|
||||
Err(error) => self.outgoing.send_error(request_id, error).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_config_value_write(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
params: ConfigValueWriteParams,
|
||||
) {
|
||||
match self.config_api.write_value(params).await {
|
||||
Ok(response) => self.outgoing.send_response(request_id, response).await,
|
||||
Err(error) => self.outgoing.send_error(request_id, error).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_config_batch_write(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
params: ConfigBatchWriteParams,
|
||||
) {
|
||||
match self.config_api.batch_write(params).await {
|
||||
Ok(response) => self.outgoing.send_response(request_id, response).await,
|
||||
Err(error) => self.outgoing.send_error(request_id, error).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::Model;
|
||||
use codex_app_server_protocol::ReasoningEffortOption;
|
||||
use codex_common::model_presets::ModelPreset;
|
||||
use codex_common::model_presets::ReasoningEffortPreset;
|
||||
use codex_common::model_presets::builtin_model_presets;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
|
||||
pub fn supported_models(auth_mode: Option<AuthMode>) -> Vec<Model> {
|
||||
builtin_model_presets(auth_mode)
|
||||
pub async fn supported_models(
|
||||
conversation_manager: Arc<ConversationManager>,
|
||||
config: &Config,
|
||||
) -> Vec<Model> {
|
||||
conversation_manager
|
||||
.list_models(config)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(model_from_preset)
|
||||
.collect()
|
||||
@@ -27,7 +34,7 @@ fn model_from_preset(preset: ModelPreset) -> Model {
|
||||
}
|
||||
|
||||
fn reasoning_efforts_from_preset(
|
||||
efforts: &'static [ReasoningEffortPreset],
|
||||
efforts: Vec<ReasoningEffortPreset>,
|
||||
) -> Vec<ReasoningEffortOption> {
|
||||
efforts
|
||||
.iter()
|
||||
|
||||
@@ -16,6 +16,9 @@ use tracing::warn;
|
||||
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_protocol::account::PlanType;
|
||||
|
||||
/// Sends messages to the client and manages request callbacks.
|
||||
pub(crate) struct OutgoingMessageSender {
|
||||
next_request_id: AtomicI64,
|
||||
@@ -230,6 +233,7 @@ mod tests {
|
||||
}),
|
||||
secondary: None,
|
||||
credits: None,
|
||||
plan_type: Some(PlanType::Plus),
|
||||
},
|
||||
});
|
||||
|
||||
@@ -245,7 +249,8 @@ mod tests {
|
||||
"resetsAt": 123
|
||||
},
|
||||
"secondary": null,
|
||||
"credits": null
|
||||
"credits": null,
|
||||
"planType": "plus"
|
||||
}
|
||||
},
|
||||
}),
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
[package]
|
||||
edition = "2024"
|
||||
name = "app_test_support"
|
||||
version = { workspace = true }
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
@@ -24,3 +25,5 @@ tokio = { workspace = true, features = [
|
||||
] }
|
||||
uuid = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
core_test_support = { path = "../../../core/tests/common" }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
mod auth_fixtures;
|
||||
mod mcp_process;
|
||||
mod mock_model_server;
|
||||
mod models_cache;
|
||||
mod responses;
|
||||
mod rollout;
|
||||
|
||||
@@ -9,12 +10,19 @@ pub use auth_fixtures::ChatGptIdTokenClaims;
|
||||
pub use auth_fixtures::encode_id_token;
|
||||
pub use auth_fixtures::write_chatgpt_auth;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
pub use core_test_support::format_with_current_shell;
|
||||
pub use core_test_support::format_with_current_shell_display;
|
||||
pub use core_test_support::format_with_current_shell_display_non_login;
|
||||
pub use core_test_support::format_with_current_shell_non_login;
|
||||
pub use mcp_process::McpProcess;
|
||||
pub use mock_model_server::create_mock_chat_completions_server;
|
||||
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
|
||||
pub use models_cache::write_models_cache;
|
||||
pub use models_cache::write_models_cache_with_models;
|
||||
pub use responses::create_apply_patch_sse_response;
|
||||
pub use responses::create_exec_command_sse_response;
|
||||
pub use responses::create_final_assistant_message_sse_response;
|
||||
pub use responses::create_shell_sse_response;
|
||||
pub use responses::create_shell_command_sse_response;
|
||||
pub use rollout::create_fake_rollout;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
|
||||
@@ -18,6 +18,9 @@ use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
use codex_app_server_protocol::CancelLoginChatGptParams;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientNotification;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::FeedbackUploadParams;
|
||||
use codex_app_server_protocol::GetAccountParams;
|
||||
use codex_app_server_protocol::GetAuthStatusParams;
|
||||
@@ -401,6 +404,30 @@ impl McpProcess {
|
||||
self.send_request("logoutChatGpt", None).await
|
||||
}
|
||||
|
||||
pub async fn send_config_read_request(
|
||||
&mut self,
|
||||
params: ConfigReadParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("config/read", params).await
|
||||
}
|
||||
|
||||
pub async fn send_config_value_write_request(
|
||||
&mut self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("config/value/write", params).await
|
||||
}
|
||||
|
||||
pub async fn send_config_batch_write_request(
|
||||
&mut self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("config/batchWrite", params).await
|
||||
}
|
||||
|
||||
/// Send an `account/logout` JSON-RPC request.
|
||||
pub async fn send_logout_account_request(&mut self) -> anyhow::Result<i64> {
|
||||
self.send_request("account/logout", None).await
|
||||
|
||||
74
codex-rs/app-server/tests/common/models_cache.rs
Normal file
74
codex-rs/app-server/tests/common/models_cache.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use codex_core::openai_models::model_presets::all_model_presets;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
|
||||
/// Convert a ModelPreset to ModelInfo for cache storage.
|
||||
fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
ModelInfo {
|
||||
slug: preset.id.clone(),
|
||||
display_name: preset.display_name.clone(),
|
||||
description: Some(preset.description.clone()),
|
||||
default_reasoning_level: preset.default_reasoning_effort,
|
||||
supported_reasoning_levels: preset.supported_reasoning_efforts.clone(),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: if preset.show_in_picker {
|
||||
ModelVisibility::List
|
||||
} else {
|
||||
ModelVisibility::Hide
|
||||
},
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority,
|
||||
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
|
||||
base_instructions: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a models_cache.json file to the codex home directory.
|
||||
/// This prevents ModelsManager from making network requests to refresh models.
|
||||
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
|
||||
/// Uses the built-in model presets from ModelsManager, converted to ModelInfo format.
|
||||
pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> {
|
||||
// Get all presets and filter for show_in_picker (same as builtin_model_presets does)
|
||||
let presets: Vec<&ModelPreset> = all_model_presets()
|
||||
.iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
.collect();
|
||||
// Convert presets to ModelInfo, assigning priorities (higher = earlier in list)
|
||||
// Priority is used for sorting, so first model gets highest priority
|
||||
let models: Vec<ModelInfo> = presets
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, preset)| {
|
||||
// Higher priority = earlier in list, so reverse the index
|
||||
let priority = (presets.len() - idx) as i32;
|
||||
preset_to_info(preset, priority)
|
||||
})
|
||||
.collect();
|
||||
|
||||
write_models_cache_with_models(codex_home, models)
|
||||
}
|
||||
|
||||
/// Write a models_cache.json file with specific models.
|
||||
/// Useful when tests need specific models to be available.
|
||||
pub fn write_models_cache_with_models(
|
||||
codex_home: &Path,
|
||||
models: Vec<ModelInfo>,
|
||||
) -> std::io::Result<()> {
|
||||
let cache_path = codex_home.join("models_cache.json");
|
||||
// DateTime<Utc> serializes to RFC3339 format by default with serde
|
||||
let fetched_at: DateTime<Utc> = Utc::now();
|
||||
let cache = json!({
|
||||
"fetched_at": fetched_at,
|
||||
"etag": null,
|
||||
"models": models
|
||||
});
|
||||
std::fs::write(cache_path, serde_json::to_string_pretty(&cache)?)
|
||||
}
|
||||
@@ -1,17 +1,18 @@
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn create_shell_sse_response(
|
||||
pub fn create_shell_command_sse_response(
|
||||
command: Vec<String>,
|
||||
workdir: Option<&Path>,
|
||||
timeout_ms: Option<u64>,
|
||||
call_id: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
// The `arguments`` for the `shell` tool is a serialized JSON object.
|
||||
// The `arguments` for the `shell_command` tool is a serialized JSON object.
|
||||
let command_str = shlex::try_join(command.iter().map(String::as_str))?;
|
||||
let tool_call_arguments = serde_json::to_string(&json!({
|
||||
"command": command,
|
||||
"command": command_str,
|
||||
"workdir": workdir.map(|w| w.to_string_lossy()),
|
||||
"timeout": timeout_ms
|
||||
"timeout_ms": timeout_ms
|
||||
}))?;
|
||||
let tool_call = json!({
|
||||
"choices": [
|
||||
@@ -21,7 +22,7 @@ pub fn create_shell_sse_response(
|
||||
{
|
||||
"id": call_id,
|
||||
"function": {
|
||||
"name": "shell",
|
||||
"name": "shell_command",
|
||||
"arguments": tool_call_arguments
|
||||
}
|
||||
}
|
||||
@@ -62,10 +63,10 @@ pub fn create_apply_patch_sse_response(
|
||||
patch_content: &str,
|
||||
call_id: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
// Use shell command to call apply_patch with heredoc format
|
||||
let shell_command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
|
||||
// Use shell_command to call apply_patch with heredoc format
|
||||
let command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
|
||||
let tool_call_arguments = serde_json::to_string(&json!({
|
||||
"command": ["bash", "-lc", shell_command]
|
||||
"command": command
|
||||
}))?;
|
||||
|
||||
let tool_call = json!({
|
||||
@@ -76,7 +77,46 @@ pub fn create_apply_patch_sse_response(
|
||||
{
|
||||
"id": call_id,
|
||||
"function": {
|
||||
"name": "shell",
|
||||
"name": "shell_command",
|
||||
"arguments": tool_call_arguments
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"finish_reason": "tool_calls"
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
let sse = format!(
|
||||
"data: {}\n\ndata: DONE\n\n",
|
||||
serde_json::to_string(&tool_call)?
|
||||
);
|
||||
Ok(sse)
|
||||
}
|
||||
|
||||
pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result<String> {
|
||||
let (cmd, args) = if cfg!(windows) {
|
||||
("cmd.exe", vec!["/d", "/c", "echo hi"])
|
||||
} else {
|
||||
("/bin/sh", vec!["-c", "echo hi"])
|
||||
};
|
||||
let command = std::iter::once(cmd.to_string())
|
||||
.chain(args.into_iter().map(str::to_string))
|
||||
.collect::<Vec<_>>();
|
||||
let tool_call_arguments = serde_json::to_string(&json!({
|
||||
"cmd": command.join(" "),
|
||||
"yield_time_ms": 500
|
||||
}))?;
|
||||
let tool_call = json!({
|
||||
"choices": [
|
||||
{
|
||||
"delta": {
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": call_id,
|
||||
"function": {
|
||||
"name": "exec_command",
|
||||
"arguments": tool_call_arguments
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use anyhow::Result;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::protocol::GitInfo;
|
||||
use codex_protocol::protocol::SessionMeta;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use serde_json::json;
|
||||
use std::fs;
|
||||
@@ -22,6 +24,7 @@ pub fn create_fake_rollout(
|
||||
meta_rfc3339: &str,
|
||||
preview: &str,
|
||||
model_provider: Option<&str>,
|
||||
git_info: Option<GitInfo>,
|
||||
) -> Result<String> {
|
||||
let uuid = Uuid::new_v4();
|
||||
let uuid_str = uuid.to_string();
|
||||
@@ -37,7 +40,7 @@ pub fn create_fake_rollout(
|
||||
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
|
||||
|
||||
// Build JSONL lines
|
||||
let payload = serde_json::to_value(SessionMeta {
|
||||
let meta = SessionMeta {
|
||||
id: conversation_id,
|
||||
timestamp: meta_rfc3339.to_string(),
|
||||
cwd: PathBuf::from("/"),
|
||||
@@ -46,6 +49,10 @@ pub fn create_fake_rollout(
|
||||
instructions: None,
|
||||
source: SessionSource::Cli,
|
||||
model_provider: model_provider.map(str::to_string),
|
||||
};
|
||||
let payload = serde_json::to_value(SessionMetaLine {
|
||||
meta,
|
||||
git: git_info,
|
||||
})?;
|
||||
|
||||
let lines = [
|
||||
|
||||
@@ -2,7 +2,8 @@ use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||
@@ -22,10 +23,10 @@ use codex_app_server_protocol::SendUserTurnResponse;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_core::protocol_config_types::ReasoningSummary;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::protocol::Event;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
@@ -56,7 +57,7 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
|
||||
// Create a mock model server that immediately ends each turn.
|
||||
// Two turns are expected: initial session configure + one user message.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec!["ls".to_string()],
|
||||
Some(&working_directory),
|
||||
Some(5000),
|
||||
@@ -175,7 +176,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
|
||||
// Mock server will request a python shell call for the first and second turn, then finish.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -186,7 +187,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
"call1",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done 1")?,
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -267,14 +268,9 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
ExecCommandApprovalParams {
|
||||
conversation_id,
|
||||
call_id: "call1".to_string(),
|
||||
command: vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
"print(42)".to_string(),
|
||||
],
|
||||
command: format_with_current_shell("python3 -c 'print(42)'"),
|
||||
cwd: working_directory.clone(),
|
||||
reason: None,
|
||||
risk: None,
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
cmd: "python3 -c 'print(42)'".to_string()
|
||||
}],
|
||||
@@ -353,23 +349,15 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
std::fs::create_dir(&second_cwd)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo first turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "first".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-first",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done first")?,
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "second".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-second",
|
||||
@@ -481,13 +469,9 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
exec_begin.cwd, second_cwd,
|
||||
"exec turn should run from updated cwd"
|
||||
);
|
||||
let expected_command = format_with_current_shell("echo second turn");
|
||||
assert_eq!(
|
||||
exec_begin.command,
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string()
|
||||
],
|
||||
exec_begin.command, expected_command,
|
||||
"exec turn should run expected command"
|
||||
);
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ use codex_app_server_protocol::Tools;
|
||||
use codex_app_server_protocol::UserSavedConfig;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
@@ -19,7 +19,7 @@ use tokio::time::timeout;
|
||||
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::to_response;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
@@ -56,7 +56,7 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
|
||||
std::fs::create_dir(&working_directory)?;
|
||||
|
||||
// Create mock server with a single SSE response: the long sleep command
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
|
||||
shell_command.clone(),
|
||||
Some(&working_directory),
|
||||
Some(10_000), // 10 seconds timeout in ms
|
||||
|
||||
@@ -31,6 +31,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-02T12:00:00Z",
|
||||
"Hello A",
|
||||
Some("openai"),
|
||||
None,
|
||||
)?;
|
||||
create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -38,6 +39,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-01T13:00:00Z",
|
||||
"Hello B",
|
||||
Some("openai"),
|
||||
None,
|
||||
)?;
|
||||
create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -45,6 +47,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-01T12:00:00Z",
|
||||
"Hello C",
|
||||
None,
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
@@ -105,6 +108,7 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
"2025-01-01T11:30:00Z",
|
||||
"Hello TP",
|
||||
Some("test-provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
// Filtering by model provider should return only matching sessions.
|
||||
@@ -354,3 +358,81 @@ async fn test_list_and_resume_conversations() -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn list_conversations_fetches_through_filtered_pages() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
// Only the last 3 conversations match the provider filter; request 3 and
|
||||
// ensure pagination keeps fetching past non-matching pages.
|
||||
let cases = [
|
||||
(
|
||||
"2025-03-04T12-00-00",
|
||||
"2025-03-04T12:00:00Z",
|
||||
"skip_provider",
|
||||
),
|
||||
(
|
||||
"2025-03-03T12-00-00",
|
||||
"2025-03-03T12:00:00Z",
|
||||
"skip_provider",
|
||||
),
|
||||
(
|
||||
"2025-03-02T12-00-00",
|
||||
"2025-03-02T12:00:00Z",
|
||||
"target_provider",
|
||||
),
|
||||
(
|
||||
"2025-03-01T12-00-00",
|
||||
"2025-03-01T12:00:00Z",
|
||||
"target_provider",
|
||||
),
|
||||
(
|
||||
"2025-02-28T12-00-00",
|
||||
"2025-02-28T12:00:00Z",
|
||||
"target_provider",
|
||||
),
|
||||
];
|
||||
|
||||
for (ts_file, ts_rfc, provider) in cases {
|
||||
create_fake_rollout(
|
||||
codex_home.path(),
|
||||
ts_file,
|
||||
ts_rfc,
|
||||
"Hello",
|
||||
Some(provider),
|
||||
None,
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let req_id = mcp
|
||||
.send_list_conversations_request(ListConversationsParams {
|
||||
page_size: Some(3),
|
||||
cursor: None,
|
||||
model_providers: Some(vec!["target_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
|
||||
)
|
||||
.await??;
|
||||
let ListConversationsResponse { items, next_cursor } =
|
||||
to_response::<ListConversationsResponse>(resp)?;
|
||||
|
||||
assert_eq!(
|
||||
items.len(),
|
||||
3,
|
||||
"should fetch across pages to satisfy the limit"
|
||||
);
|
||||
assert!(
|
||||
items
|
||||
.iter()
|
||||
.all(|item| item.model_provider == "target_provider")
|
||||
);
|
||||
assert_eq!(next_cursor, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::CancelLoginChatGptParams;
|
||||
use codex_app_server_protocol::CancelLoginChatGptResponse;
|
||||
use codex_app_server_protocol::GetAuthStatusParams;
|
||||
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
@@ -14,7 +12,6 @@ use codex_core::auth::AuthCredentialsStoreMode;
|
||||
use codex_login::login_with_api_key;
|
||||
use serial_test::serial;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
@@ -87,48 +84,6 @@ async fn logout_chatgpt_removes_auth() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||
#[serial(login_port)]
|
||||
async fn login_and_cancel_chatgpt() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let login_id = mcp.send_login_chat_gpt_request().await?;
|
||||
let login_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(login_id)),
|
||||
)
|
||||
.await??;
|
||||
let login: LoginChatGptResponse = to_response(login_resp)?;
|
||||
|
||||
let cancel_id = mcp
|
||||
.send_cancel_login_chat_gpt_request(CancelLoginChatGptParams {
|
||||
login_id: login.login_id,
|
||||
})
|
||||
.await?;
|
||||
let cancel_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
|
||||
)
|
||||
.await??;
|
||||
let _ok: CancelLoginChatGptResponse = to_response(cancel_resp)?;
|
||||
|
||||
// Optionally observe the completion notification; do not fail if it races.
|
||||
let maybe_note = timeout(
|
||||
Duration::from_secs(2),
|
||||
mcp.read_stream_until_notification_message("codex/event/login_chat_gpt_complete"),
|
||||
)
|
||||
.await;
|
||||
if maybe_note.is_err() {
|
||||
eprintln!("warning: did not observe login_chat_gpt_complete notification after cancel");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
let contents = format!(
|
||||
|
||||
@@ -272,40 +272,45 @@ async fn read_raw_response_item(
|
||||
mcp: &mut McpProcess,
|
||||
conversation_id: ConversationId,
|
||||
) -> ResponseItem {
|
||||
let raw_notification: JSONRPCNotification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
|
||||
)
|
||||
.await
|
||||
.expect("codex/event/raw_response_item notification timeout")
|
||||
.expect("codex/event/raw_response_item notification resp");
|
||||
loop {
|
||||
let raw_notification: JSONRPCNotification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/raw_response_item"),
|
||||
)
|
||||
.await
|
||||
.expect("codex/event/raw_response_item notification timeout")
|
||||
.expect("codex/event/raw_response_item notification resp");
|
||||
|
||||
let serde_json::Value::Object(params) = raw_notification
|
||||
.params
|
||||
.expect("codex/event/raw_response_item should have params")
|
||||
else {
|
||||
panic!("codex/event/raw_response_item should have params");
|
||||
};
|
||||
let serde_json::Value::Object(params) = raw_notification
|
||||
.params
|
||||
.expect("codex/event/raw_response_item should have params")
|
||||
else {
|
||||
panic!("codex/event/raw_response_item should have params");
|
||||
};
|
||||
|
||||
let conversation_id_value = params
|
||||
.get("conversationId")
|
||||
.and_then(|value| value.as_str())
|
||||
.expect("raw response item should include conversationId");
|
||||
let conversation_id_value = params
|
||||
.get("conversationId")
|
||||
.and_then(|value| value.as_str())
|
||||
.expect("raw response item should include conversationId");
|
||||
|
||||
assert_eq!(
|
||||
conversation_id_value,
|
||||
conversation_id.to_string(),
|
||||
"raw response item conversation mismatch"
|
||||
);
|
||||
assert_eq!(
|
||||
conversation_id_value,
|
||||
conversation_id.to_string(),
|
||||
"raw response item conversation mismatch"
|
||||
);
|
||||
|
||||
let msg_value = params
|
||||
.get("msg")
|
||||
.cloned()
|
||||
.expect("raw response item should include msg payload");
|
||||
let msg_value = params
|
||||
.get("msg")
|
||||
.cloned()
|
||||
.expect("raw response item should include msg payload");
|
||||
|
||||
let event: RawResponseItemEvent =
|
||||
serde_json::from_value(msg_value).expect("deserialize raw response item");
|
||||
event.item
|
||||
// Ghost snapshots are produced concurrently and may arrive before the model reply.
|
||||
let event: RawResponseItemEvent =
|
||||
serde_json::from_value(msg_value).expect("deserialize raw response item");
|
||||
if !matches!(event.item, ResponseItem::GhostSnapshot { .. }) {
|
||||
return event.item;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_instructions_message(item: &ResponseItem) {
|
||||
|
||||
@@ -241,7 +241,7 @@ async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> {
|
||||
#[tokio::test]
|
||||
// Serialize tests that launch the login server since it binds to a fixed port.
|
||||
#[serial(login_port)]
|
||||
async fn login_account_chatgpt_start() -> Result<()> {
|
||||
async fn login_account_chatgpt_start_can_be_cancelled() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
|
||||
|
||||
|
||||
411
codex-rs/app-server/tests/suite/v2/config_rpc.rs
Normal file
411
codex-rs/app-server/tests/suite/v2/config_rpc.rs
Normal file
@@ -0,0 +1,411 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigEdit;
|
||||
use codex_app_server_protocol::ConfigLayerName;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigReadResponse;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::MergeStrategy;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ToolsV2;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
fn write_config(codex_home: &TempDir, contents: &str) -> Result<()> {
|
||||
Ok(std::fs::write(
|
||||
codex_home.path().join("config.toml"),
|
||||
contents,
|
||||
)?)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_read_returns_effective_and_layers() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(
|
||||
&codex_home,
|
||||
r#"
|
||||
model = "gpt-user"
|
||||
sandbox_mode = "workspace-write"
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let request_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
include_layers: true,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
let ConfigReadResponse {
|
||||
config,
|
||||
origins,
|
||||
layers,
|
||||
} = to_response(resp)?;
|
||||
|
||||
assert_eq!(config.model.as_deref(), Some("gpt-user"));
|
||||
assert_eq!(
|
||||
origins.get("model").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerName::SessionFlags);
|
||||
assert_eq!(layers[1].name, ConfigLayerName::User);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_read_includes_tools() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(
|
||||
&codex_home,
|
||||
r#"
|
||||
model = "gpt-user"
|
||||
|
||||
[tools]
|
||||
web_search = true
|
||||
view_image = false
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let request_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
include_layers: true,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
let ConfigReadResponse {
|
||||
config,
|
||||
origins,
|
||||
layers,
|
||||
} = to_response(resp)?;
|
||||
|
||||
let tools = config.tools.expect("tools present");
|
||||
assert_eq!(
|
||||
tools,
|
||||
ToolsV2 {
|
||||
web_search: Some(true),
|
||||
view_image: Some(false),
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
origins.get("tools.web_search").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
assert_eq!(
|
||||
origins.get("tools.view_image").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerName::SessionFlags);
|
||||
assert_eq!(layers[1].name, ConfigLayerName::User);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(
|
||||
&codex_home,
|
||||
r#"
|
||||
model = "gpt-user"
|
||||
approval_policy = "on-request"
|
||||
sandbox_mode = "workspace-write"
|
||||
|
||||
[sandbox_workspace_write]
|
||||
writable_roots = ["/user"]
|
||||
network_access = true
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let managed_path = codex_home.path().join("managed_config.toml");
|
||||
std::fs::write(
|
||||
&managed_path,
|
||||
r#"
|
||||
model = "gpt-system"
|
||||
approval_policy = "never"
|
||||
|
||||
[sandbox_workspace_write]
|
||||
writable_roots = ["/system"]
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let managed_path_str = managed_path.display().to_string();
|
||||
|
||||
let mut mcp = McpProcess::new_with_env(
|
||||
codex_home.path(),
|
||||
&[("CODEX_MANAGED_CONFIG_PATH", Some(&managed_path_str))],
|
||||
)
|
||||
.await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let request_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
include_layers: true,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
let ConfigReadResponse {
|
||||
config,
|
||||
origins,
|
||||
layers,
|
||||
} = to_response(resp)?;
|
||||
|
||||
assert_eq!(config.model.as_deref(), Some("gpt-system"));
|
||||
assert_eq!(
|
||||
origins.get("model").expect("origin").name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
|
||||
assert_eq!(config.approval_policy, Some(AskForApproval::Never));
|
||||
assert_eq!(
|
||||
origins.get("approval_policy").expect("origin").name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
|
||||
assert_eq!(config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
|
||||
assert_eq!(
|
||||
origins.get("sandbox_mode").expect("origin").name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
|
||||
let sandbox = config
|
||||
.sandbox_workspace_write
|
||||
.as_ref()
|
||||
.expect("sandbox workspace write");
|
||||
assert_eq!(sandbox.writable_roots, vec![PathBuf::from("/system")]);
|
||||
assert_eq!(
|
||||
origins
|
||||
.get("sandbox_workspace_write.writable_roots.0")
|
||||
.expect("origin")
|
||||
.name,
|
||||
ConfigLayerName::System
|
||||
);
|
||||
|
||||
assert!(sandbox.network_access);
|
||||
assert_eq!(
|
||||
origins
|
||||
.get("sandbox_workspace_write.network_access")
|
||||
.expect("origin")
|
||||
.name,
|
||||
ConfigLayerName::User
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 3);
|
||||
assert_eq!(layers[0].name, ConfigLayerName::System);
|
||||
assert_eq!(layers[1].name, ConfigLayerName::SessionFlags);
|
||||
assert_eq!(layers[2].name, ConfigLayerName::User);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_value_write_replaces_value() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(
|
||||
&codex_home,
|
||||
r#"
|
||||
model = "gpt-old"
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let read_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
include_layers: false,
|
||||
})
|
||||
.await?;
|
||||
let read_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
|
||||
)
|
||||
.await??;
|
||||
let read: ConfigReadResponse = to_response(read_resp)?;
|
||||
let expected_version = read.origins.get("model").map(|m| m.version.clone());
|
||||
|
||||
let write_id = mcp
|
||||
.send_config_value_write_request(ConfigValueWriteParams {
|
||||
file_path: None,
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-new"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
expected_version,
|
||||
})
|
||||
.await?;
|
||||
let write_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(write_id)),
|
||||
)
|
||||
.await??;
|
||||
let write: ConfigWriteResponse = to_response(write_resp)?;
|
||||
let expected_file_path = codex_home
|
||||
.path()
|
||||
.join("config.toml")
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
assert_eq!(write.status, WriteStatus::Ok);
|
||||
assert_eq!(write.file_path, expected_file_path);
|
||||
assert!(write.overridden_metadata.is_none());
|
||||
|
||||
let verify_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
include_layers: false,
|
||||
})
|
||||
.await?;
|
||||
let verify_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(verify_id)),
|
||||
)
|
||||
.await??;
|
||||
let verify: ConfigReadResponse = to_response(verify_resp)?;
|
||||
assert_eq!(verify.config.model.as_deref(), Some("gpt-new"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_value_write_rejects_version_conflict() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(
|
||||
&codex_home,
|
||||
r#"
|
||||
model = "gpt-old"
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let write_id = mcp
|
||||
.send_config_value_write_request(ConfigValueWriteParams {
|
||||
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-new"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
expected_version: Some("sha256:stale".to_string()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let err: JSONRPCError = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_error_message(RequestId::Integer(write_id)),
|
||||
)
|
||||
.await??;
|
||||
let code = err
|
||||
.error
|
||||
.data
|
||||
.as_ref()
|
||||
.and_then(|d| d.get("config_write_error_code"))
|
||||
.and_then(|v| v.as_str());
|
||||
assert_eq!(code, Some("configVersionConflict"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_config(&codex_home, "")?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let batch_id = mcp
|
||||
.send_config_batch_write_request(ConfigBatchWriteParams {
|
||||
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
|
||||
edits: vec![
|
||||
ConfigEdit {
|
||||
key_path: "sandbox_mode".to_string(),
|
||||
value: json!("workspace-write"),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
},
|
||||
ConfigEdit {
|
||||
key_path: "sandbox_workspace_write".to_string(),
|
||||
value: json!({
|
||||
"writable_roots": ["/tmp"],
|
||||
"network_access": false
|
||||
}),
|
||||
merge_strategy: MergeStrategy::Replace,
|
||||
},
|
||||
],
|
||||
expected_version: None,
|
||||
})
|
||||
.await?;
|
||||
let batch_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(batch_id)),
|
||||
)
|
||||
.await??;
|
||||
let batch_write: ConfigWriteResponse = to_response(batch_resp)?;
|
||||
assert_eq!(batch_write.status, WriteStatus::Ok);
|
||||
let expected_file_path = codex_home
|
||||
.path()
|
||||
.join("config.toml")
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.display()
|
||||
.to_string();
|
||||
assert_eq!(batch_write.file_path, expected_file_path);
|
||||
|
||||
let read_id = mcp
|
||||
.send_config_read_request(ConfigReadParams {
|
||||
include_layers: false,
|
||||
})
|
||||
.await?;
|
||||
let read_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
|
||||
)
|
||||
.await??;
|
||||
let read: ConfigReadResponse = to_response(read_resp)?;
|
||||
assert_eq!(read.config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
|
||||
let sandbox = read
|
||||
.config
|
||||
.sandbox_workspace_write
|
||||
.as_ref()
|
||||
.expect("sandbox workspace write");
|
||||
assert_eq!(sandbox.writable_roots, vec![PathBuf::from("/tmp")]);
|
||||
assert!(!sandbox.network_access);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
mod account;
|
||||
mod config_rpc;
|
||||
mod model_list;
|
||||
mod rate_limits;
|
||||
mod review;
|
||||
|
||||
@@ -4,6 +4,7 @@ use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use app_test_support::write_models_cache;
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::Model;
|
||||
@@ -11,7 +12,7 @@ use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::ReasoningEffortOption;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
@@ -22,6 +23,7 @@ const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
|
||||
#[tokio::test]
|
||||
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
@@ -114,6 +116,39 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.2".to_string(),
|
||||
model: "gpt-5.2".to_string(),
|
||||
display_name: "gpt-5.2".to_string(),
|
||||
description:
|
||||
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
||||
.to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Balances speed with some reasoning; useful for straightforward \
|
||||
queries and short explanations"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Provides a solid balance of reasoning depth and latency for \
|
||||
general-purpose tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
description: "Extra high reasoning for complex problems".to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1".to_string(),
|
||||
model: "gpt-5.1".to_string(),
|
||||
@@ -151,6 +186,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn list_models_pagination_works() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
@@ -240,14 +276,37 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(fourth_response)?;
|
||||
|
||||
assert_eq!(fourth_items.len(), 1);
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.1");
|
||||
assert!(fourth_cursor.is_none());
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.2");
|
||||
let fifth_cursor = fourth_cursor.ok_or_else(|| anyhow!("cursor for fifth page"))?;
|
||||
|
||||
let fifth_request = mcp
|
||||
.send_list_models_request(ModelListParams {
|
||||
limit: Some(1),
|
||||
cursor: Some(fifth_cursor.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let fifth_response: JSONRPCResponse = timeout(
|
||||
DEFAULT_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(fifth_request)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let ModelListResponse {
|
||||
data: fifth_items,
|
||||
next_cursor: fifth_cursor,
|
||||
} = to_response::<ModelListResponse>(fifth_response)?;
|
||||
|
||||
assert_eq!(fifth_items.len(), 1);
|
||||
assert_eq!(fifth_items[0].id, "gpt-5.1");
|
||||
assert!(fifth_cursor.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_rejects_invalid_cursor() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
write_models_cache(codex_home.path())?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
@@ -11,6 +11,7 @@ use codex_app_server_protocol::RateLimitSnapshot;
|
||||
use codex_app_server_protocol::RateLimitWindow;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_core::auth::AuthCredentialsStoreMode;
|
||||
use codex_protocol::account::PlanType as AccountPlanType;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -153,6 +154,7 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
|
||||
resets_at: Some(secondary_reset_timestamp),
|
||||
}),
|
||||
credits: None,
|
||||
plan_type: Some(AccountPlanType::Pro),
|
||||
},
|
||||
};
|
||||
assert_eq!(received, expected);
|
||||
|
||||
@@ -9,12 +9,13 @@ use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ReviewDelivery;
|
||||
use codex_app_server_protocol::ReviewStartParams;
|
||||
use codex_app_server_protocol::ReviewStartResponse;
|
||||
use codex_app_server_protocol::ReviewTarget;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
@@ -59,7 +60,7 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
|
||||
let review_req = mcp
|
||||
.send_review_start_request(ReviewStartParams {
|
||||
thread_id: thread_id.clone(),
|
||||
append_to_original_thread: true,
|
||||
delivery: Some(ReviewDelivery::Inline),
|
||||
target: ReviewTarget::Commit {
|
||||
sha: "1234567deadbeef".to_string(),
|
||||
title: Some("Tidy UI colors".to_string()),
|
||||
@@ -71,43 +72,43 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(review_req)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(review_resp)?;
|
||||
let ReviewStartResponse {
|
||||
turn,
|
||||
review_thread_id,
|
||||
} = to_response::<ReviewStartResponse>(review_resp)?;
|
||||
assert_eq!(review_thread_id, thread_id.clone());
|
||||
let turn_id = turn.id.clone();
|
||||
assert_eq!(turn.status, TurnStatus::InProgress);
|
||||
assert_eq!(turn.items.len(), 1);
|
||||
match &turn.items[0] {
|
||||
ThreadItem::UserMessage { content, .. } => {
|
||||
assert_eq!(content.len(), 1);
|
||||
assert!(matches!(
|
||||
&content[0],
|
||||
codex_app_server_protocol::UserInput::Text { .. }
|
||||
));
|
||||
}
|
||||
other => panic!("expected user message, got {other:?}"),
|
||||
}
|
||||
|
||||
let _started: JSONRPCNotification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/started"),
|
||||
)
|
||||
.await??;
|
||||
let item_started: JSONRPCNotification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/started"),
|
||||
)
|
||||
.await??;
|
||||
let started: ItemStartedNotification =
|
||||
serde_json::from_value(item_started.params.expect("params must be present"))?;
|
||||
match started.item {
|
||||
ThreadItem::CodeReview { id, review } => {
|
||||
assert_eq!(id, turn_id);
|
||||
assert_eq!(review, "commit 1234567");
|
||||
// Confirm we see the EnteredReviewMode marker on the main thread.
|
||||
let mut saw_entered_review_mode = false;
|
||||
for _ in 0..10 {
|
||||
let item_started: JSONRPCNotification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/started"),
|
||||
)
|
||||
.await??;
|
||||
let started: ItemStartedNotification =
|
||||
serde_json::from_value(item_started.params.expect("params must be present"))?;
|
||||
match started.item {
|
||||
ThreadItem::EnteredReviewMode { id, review } => {
|
||||
assert_eq!(id, turn_id);
|
||||
assert_eq!(review, "commit 1234567: Tidy UI colors");
|
||||
saw_entered_review_mode = true;
|
||||
break;
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
other => panic!("expected code review item, got {other:?}"),
|
||||
}
|
||||
assert!(
|
||||
saw_entered_review_mode,
|
||||
"did not observe enteredReviewMode item"
|
||||
);
|
||||
|
||||
// Confirm we see the ExitedReviewMode marker (with review text)
|
||||
// on the same turn. Ignore any other items the stream surfaces.
|
||||
let mut review_body: Option<String> = None;
|
||||
for _ in 0..5 {
|
||||
for _ in 0..10 {
|
||||
let review_notif: JSONRPCNotification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/completed"),
|
||||
@@ -116,13 +117,12 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
|
||||
let completed: ItemCompletedNotification =
|
||||
serde_json::from_value(review_notif.params.expect("params must be present"))?;
|
||||
match completed.item {
|
||||
ThreadItem::CodeReview { id, review } => {
|
||||
ThreadItem::ExitedReviewMode { id, review } => {
|
||||
assert_eq!(id, turn_id);
|
||||
review_body = Some(review);
|
||||
break;
|
||||
}
|
||||
ThreadItem::UserMessage { .. } => continue,
|
||||
other => panic!("unexpected item/completed payload: {other:?}"),
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ async fn review_start_rejects_empty_base_branch() -> Result<()> {
|
||||
let request_id = mcp
|
||||
.send_review_start_request(ReviewStartParams {
|
||||
thread_id,
|
||||
append_to_original_thread: true,
|
||||
delivery: Some(ReviewDelivery::Inline),
|
||||
target: ReviewTarget::BaseBranch {
|
||||
branch: " ".to_string(),
|
||||
},
|
||||
@@ -167,6 +167,56 @@ async fn review_start_rejects_empty_base_branch() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn review_start_with_detached_delivery_returns_new_thread_id() -> Result<()> {
|
||||
let review_payload = json!({
|
||||
"findings": [],
|
||||
"overall_correctness": "ok",
|
||||
"overall_explanation": "detached review",
|
||||
"overall_confidence_score": 0.5
|
||||
})
|
||||
.to_string();
|
||||
let responses = vec![create_final_assistant_message_sse_response(
|
||||
&review_payload,
|
||||
)?];
|
||||
let server = create_mock_chat_completions_server_unchecked(responses).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_id = start_default_thread(&mut mcp).await?;
|
||||
|
||||
let review_req = mcp
|
||||
.send_review_start_request(ReviewStartParams {
|
||||
thread_id: thread_id.clone(),
|
||||
delivery: Some(ReviewDelivery::Detached),
|
||||
target: ReviewTarget::Custom {
|
||||
instructions: "detached review".to_string(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
let review_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(review_req)),
|
||||
)
|
||||
.await??;
|
||||
let ReviewStartResponse {
|
||||
turn,
|
||||
review_thread_id,
|
||||
} = to_response::<ReviewStartResponse>(review_resp)?;
|
||||
|
||||
assert_eq!(turn.status, TurnStatus::InProgress);
|
||||
assert_ne!(
|
||||
review_thread_id, thread_id,
|
||||
"detached review should run on a different thread"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn review_start_rejects_empty_commit_sha() -> Result<()> {
|
||||
let server = create_mock_chat_completions_server_unchecked(vec![]).await;
|
||||
@@ -180,7 +230,7 @@ async fn review_start_rejects_empty_commit_sha() -> Result<()> {
|
||||
let request_id = mcp
|
||||
.send_review_start_request(ReviewStartParams {
|
||||
thread_id,
|
||||
append_to_original_thread: true,
|
||||
delivery: Some(ReviewDelivery::Inline),
|
||||
target: ReviewTarget::Commit {
|
||||
sha: "\t".to_string(),
|
||||
title: None,
|
||||
@@ -215,7 +265,7 @@ async fn review_start_rejects_empty_custom_instructions() -> Result<()> {
|
||||
let request_id = mcp
|
||||
.send_review_start_request(ReviewStartParams {
|
||||
thread_id,
|
||||
append_to_original_thread: true,
|
||||
delivery: Some(ReviewDelivery::Inline),
|
||||
target: ReviewTarget::Custom {
|
||||
instructions: "\n\n".to_string(),
|
||||
},
|
||||
|
||||
@@ -2,37 +2,100 @@ use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_fake_rollout;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::GitInfo as ApiGitInfo;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::SessionSource;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
async fn init_mcp(codex_home: &Path) -> Result<McpProcess> {
|
||||
let mut mcp = McpProcess::new(codex_home).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
Ok(mcp)
|
||||
}
|
||||
|
||||
async fn list_threads(
|
||||
mcp: &mut McpProcess,
|
||||
cursor: Option<String>,
|
||||
limit: Option<u32>,
|
||||
providers: Option<Vec<String>>,
|
||||
) -> Result<ThreadListResponse> {
|
||||
let request_id = mcp
|
||||
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
|
||||
cursor,
|
||||
limit,
|
||||
model_providers: providers,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
to_response::<ThreadListResponse>(resp)
|
||||
}
|
||||
|
||||
fn create_fake_rollouts<F, G>(
|
||||
codex_home: &Path,
|
||||
count: usize,
|
||||
provider_for_index: F,
|
||||
timestamp_for_index: G,
|
||||
preview: &str,
|
||||
) -> Result<Vec<String>>
|
||||
where
|
||||
F: Fn(usize) -> &'static str,
|
||||
G: Fn(usize) -> (String, String),
|
||||
{
|
||||
let mut ids = Vec::with_capacity(count);
|
||||
for i in 0..count {
|
||||
let (ts_file, ts_rfc) = timestamp_for_index(i);
|
||||
ids.push(create_fake_rollout(
|
||||
codex_home,
|
||||
&ts_file,
|
||||
&ts_rfc,
|
||||
preview,
|
||||
Some(provider_for_index(i)),
|
||||
None,
|
||||
)?);
|
||||
}
|
||||
Ok(ids)
|
||||
}
|
||||
|
||||
fn timestamp_at(
|
||||
year: i32,
|
||||
month: u32,
|
||||
day: u32,
|
||||
hour: u32,
|
||||
minute: u32,
|
||||
second: u32,
|
||||
) -> (String, String) {
|
||||
(
|
||||
format!("{year:04}-{month:02}-{day:02}T{hour:02}-{minute:02}-{second:02}"),
|
||||
format!("{year:04}-{month:02}-{day:02}T{hour:02}:{minute:02}:{second:02}Z"),
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_basic_empty() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
// List threads in an empty CODEX_HOME; should return an empty page with nextCursor: null.
|
||||
let list_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
model_providers: None,
|
||||
})
|
||||
.await?;
|
||||
let list_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(list_resp)?;
|
||||
.await?;
|
||||
assert!(data.is_empty());
|
||||
assert_eq!(next_cursor, None);
|
||||
|
||||
@@ -63,6 +126,7 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
"2025-01-02T12:00:00Z",
|
||||
"Hello",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let _b = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -70,6 +134,7 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
"2025-01-01T13:00:00Z",
|
||||
"Hello",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let _c = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -77,58 +142,54 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
|
||||
"2025-01-01T12:00:00Z",
|
||||
"Hello",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
// Page 1: limit 2 → expect next_cursor Some.
|
||||
let page1_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(2),
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let page1_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(page1_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse {
|
||||
data: data1,
|
||||
next_cursor: cursor1,
|
||||
} = to_response::<ThreadListResponse>(page1_resp)?;
|
||||
} = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(2),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(data1.len(), 2);
|
||||
for thread in &data1 {
|
||||
assert_eq!(thread.preview, "Hello");
|
||||
assert_eq!(thread.model_provider, "mock_provider");
|
||||
assert!(thread.created_at > 0);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
}
|
||||
let cursor1 = cursor1.expect("expected nextCursor on first page");
|
||||
|
||||
// Page 2: with cursor → expect next_cursor None when no more results.
|
||||
let page2_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: Some(cursor1),
|
||||
limit: Some(2),
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let page2_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(page2_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse {
|
||||
data: data2,
|
||||
next_cursor: cursor2,
|
||||
} = to_response::<ThreadListResponse>(page2_resp)?;
|
||||
} = list_threads(
|
||||
&mut mcp,
|
||||
Some(cursor1),
|
||||
Some(2),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert!(data2.len() <= 2);
|
||||
for thread in &data2 {
|
||||
assert_eq!(thread.preview, "Hello");
|
||||
assert_eq!(thread.model_provider, "mock_provider");
|
||||
assert!(thread.created_at > 0);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
}
|
||||
assert_eq!(cursor2, None, "expected nextCursor to be null on last page");
|
||||
|
||||
@@ -147,6 +208,7 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
"2025-01-02T10:00:00Z",
|
||||
"X",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?; // mock_provider
|
||||
let _b = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
@@ -154,25 +216,19 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
"2025-01-02T11:00:00Z",
|
||||
"X",
|
||||
Some("other_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
// Filter to only other_provider; expect 1 item, nextCursor None.
|
||||
let list_id = mcp
|
||||
.send_thread_list_request(ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
model_providers: Some(vec!["other_provider".to_string()]),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["other_provider".to_string()]),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse { data, next_cursor } = to_response::<ThreadListResponse>(resp)?;
|
||||
.await?;
|
||||
assert_eq!(data.len(), 1);
|
||||
assert_eq!(next_cursor, None);
|
||||
let thread = &data[0];
|
||||
@@ -180,6 +236,196 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
|
||||
assert_eq!(thread.model_provider, "other_provider");
|
||||
let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp();
|
||||
assert_eq!(thread.created_at, expected_ts);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
// Newest 16 conversations belong to a different provider; the older 8 are the
|
||||
// only ones that match the filter. We request 8 so the server must keep
|
||||
// paging past the first two pages to reach the desired count.
|
||||
create_fake_rollouts(
|
||||
codex_home.path(),
|
||||
24,
|
||||
|i| {
|
||||
if i < 16 {
|
||||
"skip_provider"
|
||||
} else {
|
||||
"target_provider"
|
||||
}
|
||||
},
|
||||
|i| timestamp_at(2025, 3, 30 - i as u32, 12, 0, 0),
|
||||
"Hello",
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
// Request 8 threads for the target provider; the matches only start on the
|
||||
// third page so we rely on pagination to reach the limit.
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(8),
|
||||
Some(vec!["target_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(
|
||||
data.len(),
|
||||
8,
|
||||
"should keep paging until the requested count is filled"
|
||||
);
|
||||
assert!(
|
||||
data.iter()
|
||||
.all(|thread| thread.model_provider == "target_provider"),
|
||||
"all returned threads must match the requested provider"
|
||||
);
|
||||
assert_eq!(
|
||||
next_cursor, None,
|
||||
"once the requested count is satisfied on the final page, nextCursor should be None"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_enforces_max_limit() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
create_fake_rollouts(
|
||||
codex_home.path(),
|
||||
105,
|
||||
|_| "mock_provider",
|
||||
|i| {
|
||||
let month = 5 + (i / 28);
|
||||
let day = (i % 28) + 1;
|
||||
timestamp_at(2025, month as u32, day as u32, 0, 0, 0)
|
||||
},
|
||||
"Hello",
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(200),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(
|
||||
data.len(),
|
||||
100,
|
||||
"limit should be clamped to the maximum page size"
|
||||
);
|
||||
assert!(
|
||||
next_cursor.is_some(),
|
||||
"when more than the maximum exist, nextCursor should continue pagination"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_stops_when_not_enough_filtered_results_exist() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
// Only the last 7 conversations match the provider filter; we ask for 10 to
|
||||
// ensure the server exhausts pagination without looping forever.
|
||||
create_fake_rollouts(
|
||||
codex_home.path(),
|
||||
22,
|
||||
|i| {
|
||||
if i < 15 {
|
||||
"skip_provider"
|
||||
} else {
|
||||
"target_provider"
|
||||
}
|
||||
},
|
||||
|i| timestamp_at(2025, 4, 28 - i as u32, 8, 0, 0),
|
||||
"Hello",
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
// Request more threads than exist after filtering; expect all matches to be
|
||||
// returned with nextCursor None.
|
||||
let ThreadListResponse { data, next_cursor } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["target_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(
|
||||
data.len(),
|
||||
7,
|
||||
"all available filtered threads should be returned"
|
||||
);
|
||||
assert!(
|
||||
data.iter()
|
||||
.all(|thread| thread.model_provider == "target_provider"),
|
||||
"results should still respect the provider filter"
|
||||
);
|
||||
assert_eq!(
|
||||
next_cursor, None,
|
||||
"when results are exhausted before reaching the limit, nextCursor should be None"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_includes_git_info() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
create_minimal_config(codex_home.path())?;
|
||||
|
||||
let git_info = CoreGitInfo {
|
||||
commit_hash: Some("abc123".to_string()),
|
||||
branch: Some("main".to_string()),
|
||||
repository_url: Some("https://example.com/repo.git".to_string()),
|
||||
};
|
||||
let conversation_id = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
"2025-02-01T09-00-00",
|
||||
"2025-02-01T09:00:00Z",
|
||||
"Git info preview",
|
||||
Some("mock_provider"),
|
||||
Some(git_info),
|
||||
)?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
|
||||
let ThreadListResponse { data, .. } = list_threads(
|
||||
&mut mcp,
|
||||
None,
|
||||
Some(10),
|
||||
Some(vec!["mock_provider".to_string()]),
|
||||
)
|
||||
.await?;
|
||||
let thread = data
|
||||
.iter()
|
||||
.find(|t| t.id == conversation_id)
|
||||
.expect("expected thread for created rollout");
|
||||
|
||||
let expected_git = ApiGitInfo {
|
||||
sha: Some("abc123".to_string()),
|
||||
branch: Some("main".to_string()),
|
||||
origin_url: Some("https://example.com/repo.git".to_string()),
|
||||
};
|
||||
assert_eq!(thread.git_info, Some(expected_git));
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SessionSource;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
@@ -14,6 +15,7 @@ use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::UserInput;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
@@ -75,6 +77,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
|
||||
"2025-01-05T12:00:00Z",
|
||||
preview,
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
@@ -97,6 +100,10 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
|
||||
assert_eq!(thread.preview, preview);
|
||||
assert_eq!(thread.model_provider, "mock_provider");
|
||||
assert!(thread.path.is_absolute());
|
||||
assert_eq!(thread.cwd, PathBuf::from("/"));
|
||||
assert_eq!(thread.cli_version, "0.0.0");
|
||||
assert_eq!(thread.source, SessionSource::Cli);
|
||||
assert_eq!(thread.git_info, None);
|
||||
|
||||
assert_eq!(
|
||||
thread.turns.len(),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
@@ -41,7 +41,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> {
|
||||
std::fs::create_dir(&working_directory)?;
|
||||
|
||||
// Mock server: long-running shell command then (after abort) nothing else needed.
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_sse_response(
|
||||
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
|
||||
shell_command.clone(),
|
||||
Some(&working_directory),
|
||||
Some(10_000),
|
||||
@@ -88,10 +88,11 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> {
|
||||
// Give the command a brief moment to start.
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
|
||||
let thread_id = thread.id.clone();
|
||||
// Interrupt the in-progress turn by id (v2 API).
|
||||
let interrupt_id = mcp
|
||||
.send_turn_interrupt_request(TurnInterruptParams {
|
||||
thread_id: thread.id,
|
||||
thread_id: thread_id.clone(),
|
||||
turn_id: turn.id,
|
||||
})
|
||||
.await?;
|
||||
@@ -112,6 +113,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> {
|
||||
.params
|
||||
.expect("turn/completed params must be present"),
|
||||
)?;
|
||||
assert_eq!(completed.thread_id, thread_id);
|
||||
assert_eq!(completed.turn.status, TurnStatus::Interrupted);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_apply_patch_sse_response;
|
||||
use app_test_support::create_exec_command_sse_response;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_chat_completions_server;
|
||||
use app_test_support::create_mock_chat_completions_server_unchecked;
|
||||
use app_test_support::create_shell_sse_response;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell_display;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::CommandExecutionStatus;
|
||||
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
use codex_app_server_protocol::ItemCompletedNotification;
|
||||
use codex_app_server_protocol::ItemStartedNotification;
|
||||
@@ -26,8 +30,8 @@ use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStartedNotification;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_core::protocol_config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
@@ -93,6 +97,7 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
|
||||
.await??;
|
||||
let started: TurnStartedNotification =
|
||||
serde_json::from_value(notif.params.expect("params must be present"))?;
|
||||
assert_eq!(started.thread_id, thread.id);
|
||||
assert_eq!(
|
||||
started.turn.status,
|
||||
codex_app_server_protocol::TurnStatus::InProgress
|
||||
@@ -136,6 +141,7 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
|
||||
.params
|
||||
.expect("turn/completed params must be present"),
|
||||
)?;
|
||||
assert_eq!(completed.thread_id, thread.id);
|
||||
assert_eq!(completed.turn.status, TurnStatus::Completed);
|
||||
|
||||
Ok(())
|
||||
@@ -203,7 +209,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
// Mock server: first turn requests a shell call (elicitation), then completes.
|
||||
// Second turn same, but we'll set approval_policy=never to avoid elicitation.
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -214,7 +220,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
"call1",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done 1")?,
|
||||
create_shell_sse_response(
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
@@ -328,6 +334,144 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_exec_approval_decline_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().to_path_buf();
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_command_sse_response(
|
||||
vec![
|
||||
"python3".to_string(),
|
||||
"-c".to_string(),
|
||||
"print(42)".to_string(),
|
||||
],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-decline",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done")?,
|
||||
];
|
||||
let server = create_mock_chat_completions_server(responses).await;
|
||||
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let turn_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "run python".to_string(),
|
||||
}],
|
||||
cwd: Some(workspace.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
let started_command_execution = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let started_notif = mcp
|
||||
.read_stream_until_notification_message("item/started")
|
||||
.await?;
|
||||
let started: ItemStartedNotification =
|
||||
serde_json::from_value(started_notif.params.clone().expect("item/started params"))?;
|
||||
if let ThreadItem::CommandExecution { .. } = started.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(started.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution { id, status, .. } = started_command_execution else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(id, "call-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::InProgress);
|
||||
|
||||
let server_req = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_request_message(),
|
||||
)
|
||||
.await??;
|
||||
let ServerRequest::CommandExecutionRequestApproval { request_id, params } = server_req else {
|
||||
panic!("expected CommandExecutionRequestApproval request")
|
||||
};
|
||||
assert_eq!(params.item_id, "call-decline");
|
||||
assert_eq!(params.thread_id, thread.id);
|
||||
assert_eq!(params.turn_id, turn.id);
|
||||
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let completed_command_execution = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let completed_notif = mcp
|
||||
.read_stream_until_notification_message("item/completed")
|
||||
.await?;
|
||||
let completed: ItemCompletedNotification = serde_json::from_value(
|
||||
completed_notif
|
||||
.params
|
||||
.clone()
|
||||
.expect("item/completed params"),
|
||||
)?;
|
||||
if let ThreadItem::CommandExecution { .. } = completed.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(completed.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution {
|
||||
id,
|
||||
status,
|
||||
exit_code,
|
||||
aggregated_output,
|
||||
..
|
||||
} = completed_command_execution
|
||||
else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(id, "call-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::Declined);
|
||||
assert!(exit_code.is_none());
|
||||
assert!(aggregated_output.is_none());
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -343,23 +487,15 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
std::fs::create_dir(&second_cwd)?;
|
||||
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo first turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "first".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-first",
|
||||
)?,
|
||||
create_final_assistant_message_sse_response("done first")?,
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string(),
|
||||
],
|
||||
create_shell_command_sse_response(
|
||||
vec!["echo".to_string(), "second".to_string(), "turn".to_string()],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-second",
|
||||
@@ -465,7 +601,8 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(cwd, second_cwd);
|
||||
assert_eq!(command, "bash -lc 'echo second turn'");
|
||||
let expected_command = format_with_current_shell_display("echo second turn");
|
||||
assert_eq!(command, expected_command);
|
||||
assert_eq!(status, CommandExecutionStatus::InProgress);
|
||||
|
||||
timeout(
|
||||
@@ -588,6 +725,26 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let output_delta_notif = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/fileChange/outputDelta"),
|
||||
)
|
||||
.await??;
|
||||
let output_delta: FileChangeOutputDeltaNotification = serde_json::from_value(
|
||||
output_delta_notif
|
||||
.params
|
||||
.clone()
|
||||
.expect("item/fileChange/outputDelta params"),
|
||||
)?;
|
||||
assert_eq!(output_delta.thread_id, thread.id);
|
||||
assert_eq!(output_delta.turn_id, turn.id);
|
||||
assert_eq!(output_delta.item_id, "patch-call");
|
||||
assert!(
|
||||
!output_delta.delta.is_empty(),
|
||||
"expected delta to be non-empty, got: {}",
|
||||
output_delta.delta
|
||||
);
|
||||
|
||||
let completed_file_change = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let completed_notif = mcp
|
||||
@@ -771,6 +928,134 @@ async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[cfg_attr(windows, ignore = "process id reporting differs on Windows")]
|
||||
async fn command_execution_notifications_include_process_id() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let responses = vec![
|
||||
create_exec_command_sse_response("uexec-1")?,
|
||||
create_final_assistant_message_sse_response("done")?,
|
||||
];
|
||||
let server = create_mock_chat_completions_server(responses).await;
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri(), "never")?;
|
||||
let config_toml = codex_home.path().join("config.toml");
|
||||
let mut config_contents = std::fs::read_to_string(&config_toml)?;
|
||||
config_contents.push_str(
|
||||
r#"
|
||||
[features]
|
||||
unified_exec = true
|
||||
"#,
|
||||
);
|
||||
std::fs::write(&config_toml, config_contents)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
let turn_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "run a command".to_string(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { turn: _turn } = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
let started_command = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let notif = mcp
|
||||
.read_stream_until_notification_message("item/started")
|
||||
.await?;
|
||||
let started: ItemStartedNotification = serde_json::from_value(
|
||||
notif
|
||||
.params
|
||||
.clone()
|
||||
.expect("item/started should include params"),
|
||||
)?;
|
||||
if let ThreadItem::CommandExecution { .. } = started.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(started.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution {
|
||||
id,
|
||||
process_id: started_process_id,
|
||||
status,
|
||||
..
|
||||
} = started_command
|
||||
else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(id, "uexec-1");
|
||||
assert_eq!(status, CommandExecutionStatus::InProgress);
|
||||
let started_process_id = started_process_id.expect("process id should be present");
|
||||
|
||||
let completed_command = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let notif = mcp
|
||||
.read_stream_until_notification_message("item/completed")
|
||||
.await?;
|
||||
let completed: ItemCompletedNotification = serde_json::from_value(
|
||||
notif
|
||||
.params
|
||||
.clone()
|
||||
.expect("item/completed should include params"),
|
||||
)?;
|
||||
if let ThreadItem::CommandExecution { .. } = completed.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(completed.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::CommandExecution {
|
||||
id: completed_id,
|
||||
process_id: completed_process_id,
|
||||
status: completed_status,
|
||||
exit_code,
|
||||
..
|
||||
} = completed_command
|
||||
else {
|
||||
unreachable!("loop ensures we break on command execution items");
|
||||
};
|
||||
assert_eq!(completed_id, "uexec-1");
|
||||
assert_eq!(completed_status, CommandExecutionStatus::Completed);
|
||||
assert_eq!(exit_code, Some(0));
|
||||
assert_eq!(
|
||||
completed_process_id.as_deref(),
|
||||
Some(started_process_id.as_str())
|
||||
);
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper to create a config.toml pointing at the mock model server.
|
||||
fn create_config_toml(
|
||||
codex_home: &Path,
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
[package]
|
||||
edition = "2024"
|
||||
name = "codex-apply-patch"
|
||||
version = { workspace = true }
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_apply_patch"
|
||||
|
||||
@@ -31,6 +31,13 @@ pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_too
|
||||
|
||||
const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"];
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
enum ApplyPatchShell {
|
||||
Unix,
|
||||
PowerShell,
|
||||
Cmd,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum ApplyPatchError {
|
||||
#[error(transparent)]
|
||||
@@ -96,6 +103,57 @@ pub struct ApplyPatchArgs {
|
||||
pub workdir: Option<String>,
|
||||
}
|
||||
|
||||
fn classify_shell_name(shell: &str) -> Option<String> {
|
||||
std::path::Path::new(shell)
|
||||
.file_stem()
|
||||
.and_then(|name| name.to_str())
|
||||
.map(str::to_ascii_lowercase)
|
||||
}
|
||||
|
||||
fn classify_shell(shell: &str, flag: &str) -> Option<ApplyPatchShell> {
|
||||
classify_shell_name(shell).and_then(|name| match name.as_str() {
|
||||
"bash" | "zsh" | "sh" if matches!(flag, "-lc" | "-c") => Some(ApplyPatchShell::Unix),
|
||||
"pwsh" | "powershell" if flag.eq_ignore_ascii_case("-command") => {
|
||||
Some(ApplyPatchShell::PowerShell)
|
||||
}
|
||||
"cmd" if flag.eq_ignore_ascii_case("/c") => Some(ApplyPatchShell::Cmd),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
fn can_skip_flag(shell: &str, flag: &str) -> bool {
|
||||
classify_shell_name(shell).is_some_and(|name| {
|
||||
matches!(name.as_str(), "pwsh" | "powershell") && flag.eq_ignore_ascii_case("-noprofile")
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_shell_script(argv: &[String]) -> Option<(ApplyPatchShell, &str)> {
|
||||
match argv {
|
||||
[shell, flag, script] => classify_shell(shell, flag).map(|shell_type| {
|
||||
let script = script.as_str();
|
||||
(shell_type, script)
|
||||
}),
|
||||
[shell, skip_flag, flag, script] if can_skip_flag(shell, skip_flag) => {
|
||||
classify_shell(shell, flag).map(|shell_type| {
|
||||
let script = script.as_str();
|
||||
(shell_type, script)
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_apply_patch_from_shell(
|
||||
shell: ApplyPatchShell,
|
||||
script: &str,
|
||||
) -> std::result::Result<(String, Option<String>), ExtractHeredocError> {
|
||||
match shell {
|
||||
ApplyPatchShell::Unix | ApplyPatchShell::PowerShell | ApplyPatchShell::Cmd => {
|
||||
extract_apply_patch_from_bash(script)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
match argv {
|
||||
// Direct invocation: apply_patch <patch>
|
||||
@@ -103,9 +161,9 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
Ok(source) => MaybeApplyPatch::Body(source),
|
||||
Err(e) => MaybeApplyPatch::PatchParseError(e),
|
||||
},
|
||||
// Bash heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
match extract_apply_patch_from_bash(script) {
|
||||
// Shell heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ...
|
||||
_ => match parse_shell_script(argv) {
|
||||
Some((shell, script)) => match extract_apply_patch_from_shell(shell, script) {
|
||||
Ok((body, workdir)) => match parse_patch(&body) {
|
||||
Ok(mut source) => {
|
||||
source.workdir = workdir;
|
||||
@@ -117,9 +175,9 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch {
|
||||
MaybeApplyPatch::NotApplyPatch
|
||||
}
|
||||
Err(e) => MaybeApplyPatch::ShellParseError(e),
|
||||
}
|
||||
}
|
||||
_ => MaybeApplyPatch::NotApplyPatch,
|
||||
},
|
||||
None => MaybeApplyPatch::NotApplyPatch,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,24 +272,17 @@ impl ApplyPatchAction {
|
||||
/// cwd must be an absolute path so that we can resolve relative paths in the
|
||||
/// patch.
|
||||
pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApplyPatchVerified {
|
||||
// Detect a raw patch body passed directly as the command or as the body of a bash -lc
|
||||
// Detect a raw patch body passed directly as the command or as the body of a shell
|
||||
// script. In these cases, report an explicit error rather than applying the patch.
|
||||
match argv {
|
||||
[body] => {
|
||||
if parse_patch(body).is_ok() {
|
||||
return MaybeApplyPatchVerified::CorrectnessError(
|
||||
ApplyPatchError::ImplicitInvocation,
|
||||
);
|
||||
}
|
||||
}
|
||||
[bash, flag, script] if bash == "bash" && flag == "-lc" => {
|
||||
if parse_patch(script).is_ok() {
|
||||
return MaybeApplyPatchVerified::CorrectnessError(
|
||||
ApplyPatchError::ImplicitInvocation,
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
if let [body] = argv
|
||||
&& parse_patch(body).is_ok()
|
||||
{
|
||||
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
|
||||
}
|
||||
if let Some((_, script)) = parse_shell_script(argv)
|
||||
&& parse_patch(script).is_ok()
|
||||
{
|
||||
return MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation);
|
||||
}
|
||||
|
||||
match maybe_parse_apply_patch(argv) {
|
||||
@@ -648,13 +699,7 @@ fn derive_new_contents_from_chunks(
|
||||
}
|
||||
};
|
||||
|
||||
let mut original_lines: Vec<String> = original_contents.split('\n').map(String::from).collect();
|
||||
|
||||
// Drop the trailing empty element that results from the final newline so
|
||||
// that line counts match the behaviour of standard `diff`.
|
||||
if original_lines.last().is_some_and(String::is_empty) {
|
||||
original_lines.pop();
|
||||
}
|
||||
let original_lines: Vec<String> = build_lines_from_contents(&original_contents);
|
||||
|
||||
let replacements = compute_replacements(&original_lines, path, chunks)?;
|
||||
let new_lines = apply_replacements(original_lines, &replacements);
|
||||
@@ -662,13 +707,67 @@ fn derive_new_contents_from_chunks(
|
||||
if !new_lines.last().is_some_and(String::is_empty) {
|
||||
new_lines.push(String::new());
|
||||
}
|
||||
let new_contents = new_lines.join("\n");
|
||||
let new_contents = build_contents_from_lines(&original_contents, &new_lines);
|
||||
Ok(AppliedPatch {
|
||||
original_contents,
|
||||
new_contents,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(dylan-hurd-oai): I think we can migrate to just use `contents.lines()`
|
||||
// across all platforms.
|
||||
fn build_lines_from_contents(contents: &str) -> Vec<String> {
|
||||
if cfg!(windows) {
|
||||
contents.lines().map(String::from).collect()
|
||||
} else {
|
||||
let mut lines: Vec<String> = contents.split('\n').map(String::from).collect();
|
||||
|
||||
// Drop the trailing empty element that results from the final newline so
|
||||
// that line counts match the behaviour of standard `diff`.
|
||||
if lines.last().is_some_and(String::is_empty) {
|
||||
lines.pop();
|
||||
}
|
||||
|
||||
lines
|
||||
}
|
||||
}
|
||||
|
||||
fn build_contents_from_lines(original_contents: &str, lines: &[String]) -> String {
|
||||
if cfg!(windows) {
|
||||
// for now, only compute this if we're on Windows.
|
||||
let uses_crlf = contents_uses_crlf(original_contents);
|
||||
if uses_crlf {
|
||||
lines.join("\r\n")
|
||||
} else {
|
||||
lines.join("\n")
|
||||
}
|
||||
} else {
|
||||
lines.join("\n")
|
||||
}
|
||||
}
|
||||
|
||||
/// Detects whether the source file uses Windows CRLF line endings consistently.
|
||||
/// We only consider a file CRLF-formatted if every newline is part of a
|
||||
/// CRLF sequence. This avoids rewriting an LF-formatted file that merely
|
||||
/// contains embedded sequences of "\r\n".
|
||||
///
|
||||
/// Returns `true` if the file uses CRLF line endings, `false` otherwise.
|
||||
fn contents_uses_crlf(contents: &str) -> bool {
|
||||
let bytes = contents.as_bytes();
|
||||
let mut n_newlines = 0usize;
|
||||
let mut n_crlf = 0usize;
|
||||
for i in 0..bytes.len() {
|
||||
if bytes[i] == b'\n' {
|
||||
n_newlines += 1;
|
||||
if i > 0 && bytes[i - 1] == b'\r' {
|
||||
n_crlf += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n_newlines > 0 && n_crlf == n_newlines
|
||||
}
|
||||
|
||||
/// Compute a list of replacements needed to transform `original_lines` into the
|
||||
/// new lines, given the patch `chunks`. Each replacement is returned as
|
||||
/// `(start_index, old_len, new_lines)`.
|
||||
@@ -863,6 +962,22 @@ mod tests {
|
||||
strs_to_strings(&["bash", "-lc", script])
|
||||
}
|
||||
|
||||
fn args_powershell(script: &str) -> Vec<String> {
|
||||
strs_to_strings(&["powershell.exe", "-Command", script])
|
||||
}
|
||||
|
||||
fn args_powershell_no_profile(script: &str) -> Vec<String> {
|
||||
strs_to_strings(&["powershell.exe", "-NoProfile", "-Command", script])
|
||||
}
|
||||
|
||||
fn args_pwsh(script: &str) -> Vec<String> {
|
||||
strs_to_strings(&["pwsh", "-NoProfile", "-Command", script])
|
||||
}
|
||||
|
||||
fn args_cmd(script: &str) -> Vec<String> {
|
||||
strs_to_strings(&["cmd.exe", "/c", script])
|
||||
}
|
||||
|
||||
fn heredoc_script(prefix: &str) -> String {
|
||||
format!(
|
||||
"{prefix}apply_patch <<'PATCH'\n*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch\nPATCH"
|
||||
@@ -882,8 +997,7 @@ mod tests {
|
||||
}]
|
||||
}
|
||||
|
||||
fn assert_match(script: &str, expected_workdir: Option<&str>) {
|
||||
let args = args_bash(script);
|
||||
fn assert_match_args(args: Vec<String>, expected_workdir: Option<&str>) {
|
||||
match maybe_parse_apply_patch(&args) {
|
||||
MaybeApplyPatch::Body(ApplyPatchArgs { hunks, workdir, .. }) => {
|
||||
assert_eq!(workdir.as_deref(), expected_workdir);
|
||||
@@ -893,6 +1007,11 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_match(script: &str, expected_workdir: Option<&str>) {
|
||||
let args = args_bash(script);
|
||||
assert_match_args(args, expected_workdir);
|
||||
}
|
||||
|
||||
fn assert_not_match(script: &str) {
|
||||
let args = args_bash(script);
|
||||
assert_matches!(
|
||||
@@ -978,6 +1097,13 @@ mod tests {
|
||||
assert_match(&heredoc_script(""), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heredoc_non_login_shell() {
|
||||
let script = heredoc_script("");
|
||||
let args = strs_to_strings(&["bash", "-c", &script]);
|
||||
assert_match_args(args, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heredoc_applypatch() {
|
||||
let args = strs_to_strings(&[
|
||||
@@ -1006,6 +1132,28 @@ PATCH"#,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_powershell_heredoc() {
|
||||
let script = heredoc_script("");
|
||||
assert_match_args(args_powershell(&script), None);
|
||||
}
|
||||
#[test]
|
||||
fn test_powershell_heredoc_no_profile() {
|
||||
let script = heredoc_script("");
|
||||
assert_match_args(args_powershell_no_profile(&script), None);
|
||||
}
|
||||
#[test]
|
||||
fn test_pwsh_heredoc() {
|
||||
let script = heredoc_script("");
|
||||
assert_match_args(args_pwsh(&script), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cmd_heredoc_with_cd() {
|
||||
let script = heredoc_script("cd foo && ");
|
||||
assert_match_args(args_cmd(&script), Some("foo"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heredoc_with_leading_cd() {
|
||||
assert_match(&heredoc_script("cd foo && "), Some("foo"));
|
||||
@@ -1266,6 +1414,72 @@ PATCH"#,
|
||||
assert_eq!(contents, "a\nB\nc\nd\nE\nf\ng\n");
|
||||
}
|
||||
|
||||
/// Ensure CRLF line endings are preserved for updated files on Windows‑style inputs.
|
||||
#[cfg(windows)]
|
||||
#[test]
|
||||
fn test_preserve_crlf_line_endings_on_update() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("crlf.txt");
|
||||
|
||||
// Original file uses CRLF (\r\n) endings.
|
||||
std::fs::write(&path, b"a\r\nb\r\nc\r\n").unwrap();
|
||||
|
||||
// Replace `b` -> `B` and append `d`.
|
||||
let patch = wrap_patch(&format!(
|
||||
r#"*** Update File: {}
|
||||
@@
|
||||
a
|
||||
-b
|
||||
+B
|
||||
@@
|
||||
c
|
||||
+d
|
||||
*** End of File"#,
|
||||
path.display()
|
||||
));
|
||||
|
||||
let mut stdout = Vec::new();
|
||||
let mut stderr = Vec::new();
|
||||
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
|
||||
|
||||
let out = std::fs::read(&path).unwrap();
|
||||
// Expect all CRLF endings; count occurrences of CRLF and ensure there are 4 lines.
|
||||
let content = String::from_utf8_lossy(&out);
|
||||
assert!(content.contains("\r\n"));
|
||||
// No bare LF occurrences immediately preceding a non-CR: the text should not contain "a\nb".
|
||||
assert!(!content.contains("a\nb"));
|
||||
// Validate exact content sequence with CRLF delimiters.
|
||||
assert_eq!(content, "a\r\nB\r\nc\r\nd\r\n");
|
||||
}
|
||||
|
||||
/// Ensure CRLF inputs with embedded carriage returns in the content are preserved.
|
||||
#[cfg(windows)]
|
||||
#[test]
|
||||
fn test_preserve_crlf_embedded_carriage_returns_on_append() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("crlf_cr_content.txt");
|
||||
|
||||
// Original file: first line has a literal '\r' in the content before the CRLF terminator.
|
||||
std::fs::write(&path, b"foo\r\r\nbar\r\n").unwrap();
|
||||
|
||||
// Append a new line without modifying existing ones.
|
||||
let patch = wrap_patch(&format!(
|
||||
r#"*** Update File: {}
|
||||
@@
|
||||
+BAZ
|
||||
*** End of File"#,
|
||||
path.display()
|
||||
));
|
||||
|
||||
let mut stdout = Vec::new();
|
||||
let mut stderr = Vec::new();
|
||||
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
|
||||
|
||||
let out = std::fs::read(&path).unwrap();
|
||||
// CRLF endings must be preserved and the extra CR in "foo\r\r" must not be collapsed.
|
||||
assert_eq!(out.as_slice(), b"foo\r\r\nbar\r\nBAZ\r\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pure_addition_chunk_followed_by_removal() {
|
||||
let dir = tempdir().unwrap();
|
||||
@@ -1451,6 +1665,37 @@ PATCH"#,
|
||||
assert_eq!(expected, diff);
|
||||
}
|
||||
|
||||
/// For LF-only inputs with a trailing newline ensure that the helper used
|
||||
/// on Windows-style builds drops the synthetic trailing empty element so
|
||||
/// replacements behave like standard `diff` line numbering.
|
||||
#[test]
|
||||
fn test_derive_new_contents_lf_trailing_newline() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("lf_trailing_newline.txt");
|
||||
fs::write(&path, "foo\nbar\n").unwrap();
|
||||
|
||||
let patch = wrap_patch(&format!(
|
||||
r#"*** Update File: {}
|
||||
@@
|
||||
foo
|
||||
-bar
|
||||
+BAR
|
||||
"#,
|
||||
path.display()
|
||||
));
|
||||
|
||||
let patch = parse_patch(&patch).unwrap();
|
||||
let chunks = match patch.hunks.as_slice() {
|
||||
[Hunk::UpdateFile { chunks, .. }] => chunks,
|
||||
_ => panic!("Expected a single UpdateFile hunk"),
|
||||
};
|
||||
|
||||
let AppliedPatch { new_contents, .. } =
|
||||
derive_new_contents_from_chunks(&path, chunks).unwrap();
|
||||
|
||||
assert_eq!(new_contents, "foo\nBAR\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unified_diff_insert_at_eof() {
|
||||
// Insert a new line at end‑of‑file.
|
||||
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/.gitattributes
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
** text eol=lf
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/001_add_file/expected/bar.md
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/001_add_file/expected/bar.md
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This is a new file
|
||||
4
codex-rs/apply-patch/tests/fixtures/scenarios/001_add_file/patch.txt
vendored
Normal file
4
codex-rs/apply-patch/tests/fixtures/scenarios/001_add_file/patch.txt
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*** Begin Patch
|
||||
*** Add File: bar.md
|
||||
+This is a new file
|
||||
*** End Patch
|
||||
2
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/expected/modify.txt
vendored
Normal file
2
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/expected/modify.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
line1
|
||||
changed
|
||||
@@ -0,0 +1 @@
|
||||
created
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/input/delete.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/input/delete.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
obsolete
|
||||
2
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/input/modify.txt
vendored
Normal file
2
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/input/modify.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
line1
|
||||
line2
|
||||
9
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/patch.txt
vendored
Normal file
9
codex-rs/apply-patch/tests/fixtures/scenarios/002_multiple_operations/patch.txt
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
*** Begin Patch
|
||||
*** Add File: nested/new.txt
|
||||
+created
|
||||
*** Delete File: delete.txt
|
||||
*** Update File: modify.txt
|
||||
@@
|
||||
-line2
|
||||
+changed
|
||||
*** End Patch
|
||||
4
codex-rs/apply-patch/tests/fixtures/scenarios/003_multiple_chunks/expected/multi.txt
vendored
Normal file
4
codex-rs/apply-patch/tests/fixtures/scenarios/003_multiple_chunks/expected/multi.txt
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
line1
|
||||
changed2
|
||||
line3
|
||||
changed4
|
||||
4
codex-rs/apply-patch/tests/fixtures/scenarios/003_multiple_chunks/input/multi.txt
vendored
Normal file
4
codex-rs/apply-patch/tests/fixtures/scenarios/003_multiple_chunks/input/multi.txt
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
9
codex-rs/apply-patch/tests/fixtures/scenarios/003_multiple_chunks/patch.txt
vendored
Normal file
9
codex-rs/apply-patch/tests/fixtures/scenarios/003_multiple_chunks/patch.txt
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
*** Begin Patch
|
||||
*** Update File: multi.txt
|
||||
@@
|
||||
-line2
|
||||
+changed2
|
||||
@@
|
||||
-line4
|
||||
+changed4
|
||||
*** End Patch
|
||||
@@ -0,0 +1 @@
|
||||
unrelated file
|
||||
@@ -0,0 +1 @@
|
||||
new content
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/004_move_to_new_directory/input/old/name.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/004_move_to_new_directory/input/old/name.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
old content
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/004_move_to_new_directory/input/old/other.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/004_move_to_new_directory/input/old/other.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
unrelated file
|
||||
7
codex-rs/apply-patch/tests/fixtures/scenarios/004_move_to_new_directory/patch.txt
vendored
Normal file
7
codex-rs/apply-patch/tests/fixtures/scenarios/004_move_to_new_directory/patch.txt
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
*** Begin Patch
|
||||
*** Update File: old/name.txt
|
||||
*** Move to: renamed/dir/name.txt
|
||||
@@
|
||||
-old content
|
||||
+new content
|
||||
*** End Patch
|
||||
2
codex-rs/apply-patch/tests/fixtures/scenarios/005_rejects_empty_patch/patch.txt
vendored
Normal file
2
codex-rs/apply-patch/tests/fixtures/scenarios/005_rejects_empty_patch/patch.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*** Begin Patch
|
||||
*** End Patch
|
||||
@@ -0,0 +1,2 @@
|
||||
line1
|
||||
line2
|
||||
2
codex-rs/apply-patch/tests/fixtures/scenarios/006_rejects_missing_context/input/modify.txt
vendored
Normal file
2
codex-rs/apply-patch/tests/fixtures/scenarios/006_rejects_missing_context/input/modify.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
line1
|
||||
line2
|
||||
6
codex-rs/apply-patch/tests/fixtures/scenarios/006_rejects_missing_context/patch.txt
vendored
Normal file
6
codex-rs/apply-patch/tests/fixtures/scenarios/006_rejects_missing_context/patch.txt
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
*** Begin Patch
|
||||
*** Update File: modify.txt
|
||||
@@
|
||||
-missing
|
||||
+changed
|
||||
*** End Patch
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/007_rejects_missing_file_delete/patch.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/007_rejects_missing_file_delete/patch.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*** Begin Patch
|
||||
*** Delete File: missing.txt
|
||||
*** End Patch
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/008_rejects_empty_update_hunk/patch.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/008_rejects_empty_update_hunk/patch.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*** Begin Patch
|
||||
*** Update File: foo.txt
|
||||
*** End Patch
|
||||
@@ -0,0 +1,6 @@
|
||||
*** Begin Patch
|
||||
*** Update File: missing.txt
|
||||
@@
|
||||
-old
|
||||
+new
|
||||
*** End Patch
|
||||
@@ -0,0 +1 @@
|
||||
unrelated file
|
||||
@@ -0,0 +1 @@
|
||||
new
|
||||
@@ -0,0 +1 @@
|
||||
from
|
||||
@@ -0,0 +1 @@
|
||||
unrelated file
|
||||
@@ -0,0 +1 @@
|
||||
existing
|
||||
@@ -0,0 +1,7 @@
|
||||
*** Begin Patch
|
||||
*** Update File: old/name.txt
|
||||
*** Move to: renamed/dir/name.txt
|
||||
@@
|
||||
-from
|
||||
+new
|
||||
*** End Patch
|
||||
@@ -0,0 +1 @@
|
||||
new content
|
||||
@@ -0,0 +1 @@
|
||||
old content
|
||||
4
codex-rs/apply-patch/tests/fixtures/scenarios/011_add_overwrites_existing_file/patch.txt
vendored
Normal file
4
codex-rs/apply-patch/tests/fixtures/scenarios/011_add_overwrites_existing_file/patch.txt
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*** Begin Patch
|
||||
*** Add File: duplicate.txt
|
||||
+new content
|
||||
*** End Patch
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/012_delete_directory_fails/patch.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/012_delete_directory_fails/patch.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*** Begin Patch
|
||||
*** Delete File: dir
|
||||
*** End Patch
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/013_rejects_invalid_hunk_header/patch.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/013_rejects_invalid_hunk_header/patch.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*** Begin Patch
|
||||
*** Frobnicate File: foo
|
||||
*** End Patch
|
||||
@@ -0,0 +1,2 @@
|
||||
first line
|
||||
second line
|
||||
@@ -0,0 +1 @@
|
||||
no newline at end
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user