mirror of
https://github.com/openai/codex.git
synced 2026-02-02 06:57:03 +00:00
Compare commits
185 Commits
daniel/tes
...
jif/infty
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce9347388a | ||
|
|
d6515aa010 | ||
|
|
37b3807f96 | ||
|
|
7b8533fdbe | ||
|
|
5f47ab64c4 | ||
|
|
6915ba2100 | ||
|
|
50f53e7071 | ||
|
|
40fba1bb4c | ||
|
|
bdda762deb | ||
|
|
da5492694b | ||
|
|
a5d48a775b | ||
|
|
78f2785595 | ||
|
|
fc1723f131 | ||
|
|
ed5b0bfeb3 | ||
|
|
4b01f0f50a | ||
|
|
ac2b3ec2bb | ||
|
|
c052b89333 | ||
|
|
b424ca93ab | ||
|
|
32bd302d80 | ||
|
|
39c72b3151 | ||
|
|
2cdfd38c24 | ||
|
|
fc79a46c7a | ||
|
|
010dfa7751 | ||
|
|
54b9436699 | ||
|
|
af3bf801ce | ||
|
|
5fb6cbbcca | ||
|
|
7bdf63a009 | ||
|
|
119dabd272 | ||
|
|
c0baaa171b | ||
|
|
b45c204109 | ||
|
|
0139f6780c | ||
|
|
86ba270926 | ||
|
|
c146585cdb | ||
|
|
5fa7844ad7 | ||
|
|
84c9b574f9 | ||
|
|
272e13dd90 | ||
|
|
18d00e36b9 | ||
|
|
17550fee9e | ||
|
|
995f5c3614 | ||
|
|
9b53a306e3 | ||
|
|
0016346dfb | ||
|
|
f38ad65254 | ||
|
|
774892c6d7 | ||
|
|
897d4d5f17 | ||
|
|
8a281cd1f4 | ||
|
|
e8863b233b | ||
|
|
8fed0b53c4 | ||
|
|
00debb6399 | ||
|
|
0a0a10d8b3 | ||
|
|
13035561cd | ||
|
|
9be704a934 | ||
|
|
f7b4e29609 | ||
|
|
d6c5df9a0a | ||
|
|
8662162f45 | ||
|
|
57584d6f34 | ||
|
|
b70dcd80a2 | ||
|
|
c0f8a49e3e | ||
|
|
268a10f917 | ||
|
|
87362d6ebd | ||
|
|
f073bc5ccf | ||
|
|
9320565658 | ||
|
|
4de5b25c52 | ||
|
|
90b2f096c3 | ||
|
|
f3c57ab888 | ||
|
|
43ee0dfd19 | ||
|
|
c9d9a40c98 | ||
|
|
ab3d607be4 | ||
|
|
f7d8e12ae0 | ||
|
|
a8278b5423 | ||
|
|
cb99d71f57 | ||
|
|
f72e9da7c5 | ||
|
|
732c435345 | ||
|
|
5346cc422d | ||
|
|
f5e055ae36 | ||
|
|
8245a4f53b | ||
|
|
26f7c46856 | ||
|
|
90af046c5c | ||
|
|
961ed31901 | ||
|
|
85e7357973 | ||
|
|
f98fa85b44 | ||
|
|
ddcaf3dccd | ||
|
|
56296cad82 | ||
|
|
95b41dd7f1 | ||
|
|
bf82353f45 | ||
|
|
0308febc23 | ||
|
|
7b4a4c2219 | ||
|
|
3ddd4d47d0 | ||
|
|
ca6a0358de | ||
|
|
0026b12615 | ||
|
|
4300236681 | ||
|
|
ec238a2c39 | ||
|
|
b6165aee0c | ||
|
|
f4bc03d7c0 | ||
|
|
3c5e12e2a4 | ||
|
|
c89229db97 | ||
|
|
d3820f4782 | ||
|
|
e896db1180 | ||
|
|
96acb8a74e | ||
|
|
687a13bbe5 | ||
|
|
fe8122e514 | ||
|
|
876d4f450a | ||
|
|
f52320be86 | ||
|
|
a43ae86b6c | ||
|
|
496cb801e1 | ||
|
|
abd517091f | ||
|
|
b8b04514bc | ||
|
|
0e5d72cc57 | ||
|
|
60f9e85c16 | ||
|
|
b016a3e7d8 | ||
|
|
a0d56541cf | ||
|
|
226215f36d | ||
|
|
338c2c873c | ||
|
|
4b0f5eb6a8 | ||
|
|
75176dae70 | ||
|
|
12fd2b4160 | ||
|
|
f2555422b9 | ||
|
|
27f169bb91 | ||
|
|
b16c985ed2 | ||
|
|
35a770e871 | ||
|
|
b09f62a1c3 | ||
|
|
5833508a17 | ||
|
|
d73055c5b1 | ||
|
|
7e3a272b29 | ||
|
|
661663c98a | ||
|
|
721003c552 | ||
|
|
36f1cca1b1 | ||
|
|
d3e1beb26c | ||
|
|
c264ae6021 | ||
|
|
8cd882c4bd | ||
|
|
90fe5e4a7e | ||
|
|
a90a58f7a1 | ||
|
|
b2d81a7cac | ||
|
|
77a8b7fdeb | ||
|
|
7fa5e95c1f | ||
|
|
191d620707 | ||
|
|
53504a38d2 | ||
|
|
5c42419b02 | ||
|
|
aecbe0f333 | ||
|
|
a30a902db5 | ||
|
|
f3b4a26f32 | ||
|
|
dc3c6bf62a | ||
|
|
3203862167 | ||
|
|
06853d94f0 | ||
|
|
cc2f4aafd7 | ||
|
|
356ea6ea34 | ||
|
|
4764fc1ee7 | ||
|
|
90ef94d3b3 | ||
|
|
6c2969d22d | ||
|
|
0ad1b0782b | ||
|
|
d7acd146fb | ||
|
|
c5465aed60 | ||
|
|
a95605a867 | ||
|
|
848058f05b | ||
|
|
a4f1c9d67e | ||
|
|
665341c9b1 | ||
|
|
fae0e6c52c | ||
|
|
1b4a79f03c | ||
|
|
640192ac3d | ||
|
|
205c36e393 | ||
|
|
d13ee79c41 | ||
|
|
bde468ff8d | ||
|
|
e292d1ed21 | ||
|
|
de8d77274a | ||
|
|
a5b7675e42 | ||
|
|
9823de3cc6 | ||
|
|
c32e9cfe86 | ||
|
|
1d17ca1fa3 | ||
|
|
bfe3328129 | ||
|
|
e0b38bd7a2 | ||
|
|
153338c20f | ||
|
|
3495a7dc37 | ||
|
|
042d4d55d9 | ||
|
|
5af08e0719 | ||
|
|
33d3ecbccc | ||
|
|
69cb72f842 | ||
|
|
69ac5153d4 | ||
|
|
16b6951648 | ||
|
|
231c36f8d3 | ||
|
|
1e4541b982 | ||
|
|
7be3b484ad | ||
|
|
9617b69c8a | ||
|
|
1d94b9111c | ||
|
|
2d6cd6951a | ||
|
|
310e3c32e5 | ||
|
|
37786593a0 |
22
.github/ISSUE_TEMPLATE/2-bug-report.yml
vendored
22
.github/ISSUE_TEMPLATE/2-bug-report.yml
vendored
@@ -20,6 +20,14 @@ body:
|
||||
attributes:
|
||||
label: What version of Codex is running?
|
||||
description: Copy the output of `codex --version`
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: plan
|
||||
attributes:
|
||||
label: What subscription do you have?
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: model
|
||||
attributes:
|
||||
@@ -32,11 +40,18 @@ body:
|
||||
description: |
|
||||
For MacOS and Linux: copy the output of `uname -mprs`
|
||||
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
label: What issue are you seeing?
|
||||
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: What steps can reproduce the bug?
|
||||
description: Explain the bug and provide a code snippet that can reproduce it.
|
||||
description: Explain the bug and provide a code snippet that can reproduce it. Please include session id, token limit usage, context window usage if applicable.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
@@ -44,11 +59,6 @@ body:
|
||||
attributes:
|
||||
label: What is the expected behavior?
|
||||
description: If possible, please provide text instead of a screenshot.
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
label: What do you see instead?
|
||||
description: If possible, please provide text instead of a screenshot.
|
||||
- type: textarea
|
||||
id: notes
|
||||
attributes:
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/4-feature-request.yml
vendored
6
.github/ISSUE_TEMPLATE/4-feature-request.yml
vendored
@@ -2,7 +2,6 @@ name: 🎁 Feature Request
|
||||
description: Propose a new feature for Codex
|
||||
labels:
|
||||
- enhancement
|
||||
- needs triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
@@ -19,11 +18,6 @@ body:
|
||||
label: What feature would you like to see?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: author
|
||||
attributes:
|
||||
label: Are you interested in implementing this feature?
|
||||
description: Please wait for acknowledgement before implementing or opening a PR.
|
||||
- type: textarea
|
||||
id: notes
|
||||
attributes:
|
||||
|
||||
24
.github/ISSUE_TEMPLATE/5-vs-code-extension.yml
vendored
24
.github/ISSUE_TEMPLATE/5-vs-code-extension.yml
vendored
@@ -14,11 +14,21 @@ body:
|
||||
id: version
|
||||
attributes:
|
||||
label: What version of the VS Code extension are you using?
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: plan
|
||||
attributes:
|
||||
label: What subscription do you have?
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: ide
|
||||
attributes:
|
||||
label: Which IDE are you using?
|
||||
description: Like `VS Code`, `Cursor`, `Windsurf`, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: platform
|
||||
attributes:
|
||||
@@ -26,11 +36,18 @@ body:
|
||||
description: |
|
||||
For MacOS and Linux: copy the output of `uname -mprs`
|
||||
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
label: What issue are you seeing?
|
||||
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: What steps can reproduce the bug?
|
||||
description: Explain the bug and provide a code snippet that can reproduce it.
|
||||
description: Explain the bug and provide a code snippet that can reproduce it. Please include session id, token limit usage, context window usage if applicable.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
@@ -38,11 +55,6 @@ body:
|
||||
attributes:
|
||||
label: What is the expected behavior?
|
||||
description: If possible, please provide text instead of a screenshot.
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
label: What do you see instead?
|
||||
description: If possible, please provide text instead of a screenshot.
|
||||
- type: textarea
|
||||
id: notes
|
||||
attributes:
|
||||
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -60,3 +60,6 @@ jobs:
|
||||
run: ./scripts/asciicheck.py codex-cli/README.md
|
||||
- name: Check codex-cli/README ToC
|
||||
run: python3 scripts/readme_toc.py codex-cli/README.md
|
||||
|
||||
- name: Prettier (run `pnpm run format:fix` to fix)
|
||||
run: pnpm run format
|
||||
|
||||
63
.github/workflows/issue-deduplicator.yml
vendored
63
.github/workflows/issue-deduplicator.yml
vendored
@@ -3,7 +3,7 @@ name: Issue Deduplicator
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
# - opened - disabled while testing
|
||||
- opened
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final_message }}
|
||||
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -44,10 +44,38 @@ jobs:
|
||||
- id: codex
|
||||
uses: openai/codex-action@main
|
||||
with:
|
||||
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
prompt_file: .github/prompts/issue-deduplicator.txt
|
||||
require_repo_write: false
|
||||
codex_version: 0.43.0-alpha.16
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
model: gpt-5
|
||||
prompt: |
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
You will receive the following JSON files located in the current working directory:
|
||||
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
||||
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
|
||||
|
||||
Instructions:
|
||||
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
|
||||
- Focus on the underlying intent and context of each issue—such as reported symptoms, feature requests, reproduction steps, or error messages—rather than relying solely on string similarity or synthetic metrics.
|
||||
- After your analysis, validate your results in 1-2 lines explaining your decision to return the selected matches.
|
||||
- When unsure, prefer returning fewer matches.
|
||||
- Include at most five numbers.
|
||||
|
||||
output-schema: |
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issues": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"reason": { "type": "string" }
|
||||
},
|
||||
"required": ["issues", "reason"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
|
||||
comment-on-issue:
|
||||
name: Comment with potential duplicates
|
||||
@@ -65,20 +93,35 @@ jobs:
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
script: |
|
||||
let numbers;
|
||||
const raw = process.env.CODEX_OUTPUT ?? '';
|
||||
let parsed;
|
||||
try {
|
||||
numbers = JSON.parse(process.env.CODEX_OUTPUT);
|
||||
parsed = JSON.parse(raw);
|
||||
} catch (error) {
|
||||
core.info(`Codex output was not valid JSON. Raw output: ${raw}`);
|
||||
core.info(`Parse error: ${error.message}`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (numbers.length === 0) {
|
||||
const issues = Array.isArray(parsed?.issues) ? parsed.issues : [];
|
||||
const currentIssueNumber = String(context.payload.issue.number);
|
||||
|
||||
console.log(`Current issue number: ${currentIssueNumber}`);
|
||||
console.log(issues);
|
||||
|
||||
const filteredIssues = issues.filter((value) => String(value) !== currentIssueNumber);
|
||||
|
||||
if (filteredIssues.length === 0) {
|
||||
core.info('Codex reported no potential duplicates.');
|
||||
return;
|
||||
}
|
||||
|
||||
const lines = ['Potential duplicates detected:', ...numbers.map((value) => `- #${value}`)];
|
||||
const lines = [
|
||||
'Potential duplicates detected. Please review them and close your issue if it is a duplicate.',
|
||||
'',
|
||||
...filteredIssues.map((value) => `- #${String(value)}`),
|
||||
'',
|
||||
'*Powered by [Codex Action](https://github.com/openai/codex-action)*'];
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
|
||||
65
.github/workflows/issue-labeler.yml
vendored
65
.github/workflows/issue-labeler.yml
vendored
@@ -3,7 +3,7 @@ name: Issue Labeler
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
# - opened - disabled while testing
|
||||
- opened
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
@@ -13,23 +13,60 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
env:
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
ISSUE_BODY: ${{ github.event.issue.body }}
|
||||
REPO_FULL_NAME: ${{ github.repository }}
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final_message }}
|
||||
codex_output: ${{ steps.codex.outputs.final-message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- id: codex
|
||||
uses: openai/codex-action@main
|
||||
with:
|
||||
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
prompt_file: .github/prompts/issue-labeler.txt
|
||||
require_repo_write: false
|
||||
codex_version: 0.43.0-alpha.16
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
prompt: |
|
||||
You are an assistant that reviews GitHub issues for the repository.
|
||||
|
||||
Your job is to choose the most appropriate existing labels for the issue described later in this prompt.
|
||||
Follow these rules:
|
||||
- Only pick labels out of the list below.
|
||||
- Prefer a small set of precise labels over many broad ones.
|
||||
|
||||
Labels to apply:
|
||||
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
|
||||
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
|
||||
3. extension — VS Code (or other IDE) extension-specific issues.
|
||||
4. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
5. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
6. codex-web — Issues targeting the Codex web UI/Cloud experience.
|
||||
8. azure — Problems or requests tied to Azure OpenAI deployments.
|
||||
9. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
|
||||
10. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
|
||||
|
||||
Issue number: ${{ github.event.issue.number }}
|
||||
|
||||
Issue title:
|
||||
${{ github.event.issue.title }}
|
||||
|
||||
Issue body:
|
||||
${{ github.event.issue.body }}
|
||||
|
||||
Repository full name:
|
||||
${{ github.repository }}
|
||||
|
||||
output-schema: |
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["labels"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
|
||||
apply-labels:
|
||||
name: Apply labels from Codex output
|
||||
@@ -53,12 +90,12 @@ jobs:
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if ! printf '%s' "$json" | jq -e 'type == "array"' >/dev/null 2>&1; then
|
||||
echo "Codex output was not a JSON array. Raw output: $json"
|
||||
if ! printf '%s' "$json" | jq -e 'type == "object" and (.labels | type == "array")' >/dev/null 2>&1; then
|
||||
echo "Codex output did not include a labels array. Raw output: $json"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
labels=$(printf '%s' "$json" | jq -r '.[] | tostring')
|
||||
labels=$(printf '%s' "$json" | jq -r '.labels[] | tostring')
|
||||
if [ -z "$labels" ]; then
|
||||
echo "Codex returned an empty array. Nothing to do."
|
||||
exit 0
|
||||
|
||||
42
.github/workflows/rust-ci.yml
vendored
42
.github/workflows/rust-ci.yml
vendored
@@ -148,15 +148,26 @@ jobs:
|
||||
targets: ${{ matrix.target }}
|
||||
components: clippy
|
||||
|
||||
- uses: actions/cache@v4
|
||||
# Explicit cache restore: split cargo home vs target, so we can
|
||||
# avoid caching the large target dir on the gnu-dev job.
|
||||
- name: Restore cargo home cache
|
||||
id: cache_cargo_home_restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Restore target cache (except gnu-dev)
|
||||
id: cache_target_restore
|
||||
if: ${{ !(matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release') }}
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-target-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
name: Install musl build tools
|
||||
@@ -194,6 +205,31 @@ jobs:
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
# Save caches explicitly; make non-fatal so cache packaging
|
||||
# never fails the overall job. Only save when key wasn't hit.
|
||||
- name: Save cargo home cache
|
||||
if: always() && !cancelled() && steps.cache_cargo_home_restore.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Save target cache (except gnu-dev)
|
||||
if: >-
|
||||
always() && !cancelled() &&
|
||||
(steps.cache_target_restore.outputs.cache-hit != 'true') &&
|
||||
!(matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release')
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ github.workspace }}/codex-rs/target/
|
||||
key: cargo-target-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
# Fail the job if any of the previous steps failed.
|
||||
- name: verify all steps passed
|
||||
if: |
|
||||
|
||||
197
.github/workflows/rust-release.yml
vendored
197
.github/workflows/rust-release.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
|
||||
build:
|
||||
needs: tag-check
|
||||
name: ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
@@ -94,11 +94,181 @@ jobs:
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
|
||||
name: Install musl build tools
|
||||
run: |
|
||||
sudo apt install -y musl-tools pkg-config
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y musl-tools pkg-config
|
||||
|
||||
- name: Cargo build
|
||||
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin codex-responses-api-proxy
|
||||
|
||||
- if: ${{ matrix.runner == 'macos-14' }}
|
||||
name: Configure Apple code signing
|
||||
shell: bash
|
||||
env:
|
||||
KEYCHAIN_PASSWORD: actions
|
||||
APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE_P12 }}
|
||||
APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${APPLE_CERTIFICATE:-}" ]]; then
|
||||
echo "APPLE_CERTIFICATE is required for macOS signing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${APPLE_CERTIFICATE_PASSWORD:-}" ]]; then
|
||||
echo "APPLE_CERTIFICATE_PASSWORD is required for macOS signing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cert_path="${RUNNER_TEMP}/apple_signing_certificate.p12"
|
||||
echo "$APPLE_CERTIFICATE" | base64 -d > "$cert_path"
|
||||
|
||||
keychain_path="${RUNNER_TEMP}/codex-signing.keychain-db"
|
||||
security create-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
|
||||
security set-keychain-settings -lut 21600 "$keychain_path"
|
||||
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$keychain_path"
|
||||
|
||||
keychain_args=()
|
||||
cleanup_keychain() {
|
||||
if ((${#keychain_args[@]} > 0)); then
|
||||
security list-keychains -s "${keychain_args[@]}" || true
|
||||
security default-keychain -s "${keychain_args[0]}" || true
|
||||
else
|
||||
security list-keychains -s || true
|
||||
fi
|
||||
if [[ -f "$keychain_path" ]]; then
|
||||
security delete-keychain "$keychain_path" || true
|
||||
fi
|
||||
}
|
||||
|
||||
while IFS= read -r keychain; do
|
||||
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
|
||||
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
|
||||
|
||||
if ((${#keychain_args[@]} > 0)); then
|
||||
security list-keychains -s "$keychain_path" "${keychain_args[@]}"
|
||||
else
|
||||
security list-keychains -s "$keychain_path"
|
||||
fi
|
||||
|
||||
security default-keychain -s "$keychain_path"
|
||||
security import "$cert_path" -k "$keychain_path" -P "$APPLE_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/security
|
||||
security set-key-partition-list -S apple-tool:,apple: -s -k "$KEYCHAIN_PASSWORD" "$keychain_path" > /dev/null
|
||||
|
||||
codesign_hashes=()
|
||||
while IFS= read -r hash; do
|
||||
[[ -n "$hash" ]] && codesign_hashes+=("$hash")
|
||||
done < <(security find-identity -v -p codesigning "$keychain_path" \
|
||||
| sed -n 's/.*\([0-9A-F]\{40\}\).*/\1/p' \
|
||||
| sort -u)
|
||||
|
||||
if ((${#codesign_hashes[@]} == 0)); then
|
||||
echo "No signing identities found in $keychain_path"
|
||||
cleanup_keychain
|
||||
rm -f "$cert_path"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ((${#codesign_hashes[@]} > 1)); then
|
||||
echo "Multiple signing identities found in $keychain_path:"
|
||||
printf ' %s\n' "${codesign_hashes[@]}"
|
||||
cleanup_keychain
|
||||
rm -f "$cert_path"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
APPLE_CODESIGN_IDENTITY="${codesign_hashes[0]}"
|
||||
|
||||
rm -f "$cert_path"
|
||||
|
||||
echo "APPLE_CODESIGN_IDENTITY=$APPLE_CODESIGN_IDENTITY" >> "$GITHUB_ENV"
|
||||
echo "APPLE_CODESIGN_KEYCHAIN=$keychain_path" >> "$GITHUB_ENV"
|
||||
echo "::add-mask::$APPLE_CODESIGN_IDENTITY"
|
||||
|
||||
- if: ${{ matrix.runner == 'macos-14' }}
|
||||
name: Sign macOS binaries
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${APPLE_CODESIGN_IDENTITY:-}" ]]; then
|
||||
echo "APPLE_CODESIGN_IDENTITY is required for macOS signing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
keychain_args=()
|
||||
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" && -f "${APPLE_CODESIGN_KEYCHAIN}" ]]; then
|
||||
keychain_args+=(--keychain "${APPLE_CODESIGN_KEYCHAIN}")
|
||||
fi
|
||||
|
||||
for binary in codex codex-responses-api-proxy; do
|
||||
path="target/${{ matrix.target }}/release/${binary}"
|
||||
codesign --force --options runtime --timestamp --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
|
||||
done
|
||||
|
||||
- if: ${{ matrix.runner == 'macos-14' }}
|
||||
name: Notarize macOS binaries
|
||||
shell: bash
|
||||
env:
|
||||
APPLE_NOTARIZATION_KEY_P8: ${{ secrets.APPLE_NOTARIZATION_KEY_P8 }}
|
||||
APPLE_NOTARIZATION_KEY_ID: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
|
||||
APPLE_NOTARIZATION_ISSUER_ID: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
for var in APPLE_NOTARIZATION_KEY_P8 APPLE_NOTARIZATION_KEY_ID APPLE_NOTARIZATION_ISSUER_ID; do
|
||||
if [[ -z "${!var:-}" ]]; then
|
||||
echo "$var is required for notarization"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
notary_key_path="${RUNNER_TEMP}/notarytool.key.p8"
|
||||
echo "$APPLE_NOTARIZATION_KEY_P8" | base64 -d > "$notary_key_path"
|
||||
cleanup_notary() {
|
||||
rm -f "$notary_key_path"
|
||||
}
|
||||
trap cleanup_notary EXIT
|
||||
|
||||
notarize_binary() {
|
||||
local binary="$1"
|
||||
local source_path="target/${{ matrix.target }}/release/${binary}"
|
||||
local archive_path="${RUNNER_TEMP}/${binary}.zip"
|
||||
|
||||
if [[ ! -f "$source_path" ]]; then
|
||||
echo "Binary $source_path not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f "$archive_path"
|
||||
ditto -c -k --keepParent "$source_path" "$archive_path"
|
||||
|
||||
submission_json=$(xcrun notarytool submit "$archive_path" \
|
||||
--key "$notary_key_path" \
|
||||
--key-id "$APPLE_NOTARIZATION_KEY_ID" \
|
||||
--issuer "$APPLE_NOTARIZATION_ISSUER_ID" \
|
||||
--output-format json \
|
||||
--wait)
|
||||
|
||||
status=$(printf '%s\n' "$submission_json" | jq -r '.status // "Unknown"')
|
||||
submission_id=$(printf '%s\n' "$submission_json" | jq -r '.id // ""')
|
||||
|
||||
if [[ -z "$submission_id" ]]; then
|
||||
echo "Failed to retrieve submission ID for $binary"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "::notice title=Notarization::$binary submission ${submission_id} completed with status ${status}"
|
||||
|
||||
if [[ "$status" != "Accepted" ]]; then
|
||||
echo "Notarization failed for ${binary} (submission ${submission_id}, status ${status})"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
notarize_binary "codex"
|
||||
notarize_binary "codex-responses-api-proxy"
|
||||
|
||||
- name: Stage artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -157,6 +327,29 @@ jobs:
|
||||
zstd -T0 -19 --rm "$dest/$base"
|
||||
done
|
||||
|
||||
- name: Remove signing keychain
|
||||
if: ${{ always() && matrix.runner == 'macos-14' }}
|
||||
shell: bash
|
||||
env:
|
||||
APPLE_CODESIGN_KEYCHAIN: ${{ env.APPLE_CODESIGN_KEYCHAIN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [[ -n "${APPLE_CODESIGN_KEYCHAIN:-}" ]]; then
|
||||
keychain_args=()
|
||||
while IFS= read -r keychain; do
|
||||
[[ "$keychain" == "$APPLE_CODESIGN_KEYCHAIN" ]] && continue
|
||||
[[ -n "$keychain" ]] && keychain_args+=("$keychain")
|
||||
done < <(security list-keychains | sed 's/^[[:space:]]*//;s/[[:space:]]*$//;s/"//g')
|
||||
if ((${#keychain_args[@]} > 0)); then
|
||||
security list-keychains -s "${keychain_args[@]}"
|
||||
security default-keychain -s "${keychain_args[0]}"
|
||||
fi
|
||||
|
||||
if [[ -f "$APPLE_CODESIGN_KEYCHAIN" ]]; then
|
||||
security delete-keychain "$APPLE_CODESIGN_KEYCHAIN"
|
||||
fi
|
||||
fi
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -30,6 +30,7 @@ result
|
||||
# cli tools
|
||||
CLAUDE.md
|
||||
.claude/
|
||||
AGENTS.override.md
|
||||
|
||||
# caches
|
||||
.cache/
|
||||
|
||||
36
AGENTS.md
36
AGENTS.md
@@ -8,11 +8,17 @@ In the codex-rs folder where the rust code lives:
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
|
||||
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
|
||||
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
|
||||
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
|
||||
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
|
||||
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
||||
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
|
||||
|
||||
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace‑wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
|
||||
|
||||
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
|
||||
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
|
||||
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
|
||||
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
|
||||
|
||||
## TUI style conventions
|
||||
|
||||
@@ -28,6 +34,7 @@ See `codex-rs/tui/styles.md`.
|
||||
- Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]
|
||||
|
||||
### TUI Styling (ratatui)
|
||||
|
||||
- Prefer Stylize helpers: use "text".dim(), .bold(), .cyan(), .italic(), .underlined() instead of manual Style where possible.
|
||||
- Prefer simple conversions: use "text".into() for spans and vec![…].into() for lines; when inference is ambiguous (e.g., Paragraph::new/Cell::from), use Line::from(spans) or Span::from(text).
|
||||
- Computed styles: if the Style is computed at runtime, using `Span::styled` is OK (`Span::from(text).set_style(style)` is also acceptable).
|
||||
@@ -39,6 +46,7 @@ See `codex-rs/tui/styles.md`.
|
||||
- Compactness: prefer the form that stays on one line after rustfmt; if only one of Line::from(vec![…]) or vec![…].into() avoids wrapping, choose that. If both wrap, pick the one with fewer wrapped lines.
|
||||
|
||||
### Text wrapping
|
||||
|
||||
- Always use textwrap::wrap to wrap plain strings.
|
||||
- If you have a ratatui Line and you want to wrap it, use the helpers in tui/src/wrapping.rs, e.g. word_wrap_lines / word_wrap_line.
|
||||
- If you need to indent wrapped lines, use the initial_indent / subsequent_indent options from RtOptions if you can, rather than writing custom logic.
|
||||
@@ -60,8 +68,34 @@ This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to va
|
||||
- `cargo insta accept -p codex-tui`
|
||||
|
||||
If you don’t have the tool:
|
||||
|
||||
- `cargo install cargo-insta`
|
||||
|
||||
### Test assertions
|
||||
|
||||
- Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already.
|
||||
|
||||
### Integration tests (core)
|
||||
|
||||
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
|
||||
|
||||
- All `mount_sse*` helpers return a `ResponseMock`; hold onto it so you can assert against outbound `/responses` POST bodies.
|
||||
- Use `ResponseMock::single_request()` when a test should only issue one POST, or `ResponseMock::requests()` to inspect every captured `ResponsesRequest`.
|
||||
- `ResponsesRequest` exposes helpers (`body_json`, `input`, `function_call_output`, `custom_tool_call_output`, `call_output`, `header`, `path`, `query_param`) so assertions can target structured payloads instead of manual JSON digging.
|
||||
- Build SSE payloads with the provided `ev_*` constructors and the `sse(...)`.
|
||||
|
||||
- Typical pattern:
|
||||
|
||||
```rust
|
||||
let mock = responses::mount_sse_once(&server, responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
responses::ev_completed("resp-1"),
|
||||
])).await;
|
||||
|
||||
codex.submit(Op::UserTurn { ... }).await?;
|
||||
|
||||
// Assert request body if needed.
|
||||
let request = mock.single_request();
|
||||
// assert using request.function_call_output(call_id) or request.json_body() or other helpers.
|
||||
```
|
||||
|
||||
10
README.md
10
README.md
@@ -1,4 +1,3 @@
|
||||
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
|
||||
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
@@ -62,8 +61,7 @@ You can also use Codex with an API key, but this requires [additional setup](./d
|
||||
|
||||
### Model Context Protocol (MCP)
|
||||
|
||||
Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp). Enable by adding an `mcp_servers` section to your `~/.codex/config.toml`.
|
||||
|
||||
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -83,9 +81,11 @@ Codex CLI supports a rich set of configuration options, with preferences stored
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- [**Non-interactive mode**](./docs/exec.md)
|
||||
- **Automating Codex**
|
||||
- [GitHub Action](https://github.com/openai/codex-action)
|
||||
- [TypeScript SDK](./sdk/typescript/README.md)
|
||||
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
|
||||
- [**Advanced**](./docs/advanced.md)
|
||||
- [Non-interactive / CI mode](./docs/advanced.md#non-interactive--ci-mode)
|
||||
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
|
||||
- [**Zero data retention (ZDR)**](./docs/zdr.md)
|
||||
|
||||
35
codex-cli/bin/codex.js
Executable file → Normal file
35
codex-cli/bin/codex.js
Executable file → Normal file
@@ -80,6 +80,32 @@ function getUpdatedPath(newDirs) {
|
||||
return updatedPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use heuristics to detect the package manager that was used to install Codex
|
||||
* in order to give the user a hint about how to update it.
|
||||
*/
|
||||
function detectPackageManager() {
|
||||
const userAgent = process.env.npm_config_user_agent || "";
|
||||
if (/\bbun\//.test(userAgent)) {
|
||||
return "bun";
|
||||
}
|
||||
|
||||
const execPath = process.env.npm_execpath || "";
|
||||
if (execPath.includes("bun")) {
|
||||
return "bun";
|
||||
}
|
||||
|
||||
if (
|
||||
process.env.BUN_INSTALL ||
|
||||
process.env.BUN_INSTALL_GLOBAL_DIR ||
|
||||
process.env.BUN_INSTALL_BIN_DIR
|
||||
) {
|
||||
return "bun";
|
||||
}
|
||||
|
||||
return userAgent ? "npm" : null;
|
||||
}
|
||||
|
||||
const additionalDirs = [];
|
||||
const pathDir = path.join(archRoot, "path");
|
||||
if (existsSync(pathDir)) {
|
||||
@@ -87,9 +113,16 @@ if (existsSync(pathDir)) {
|
||||
}
|
||||
const updatedPath = getUpdatedPath(additionalDirs);
|
||||
|
||||
const env = { ...process.env, PATH: updatedPath };
|
||||
const packageManagerEnvVar =
|
||||
detectPackageManager() === "bun"
|
||||
? "CODEX_MANAGED_BY_BUN"
|
||||
: "CODEX_MANAGED_BY_NPM";
|
||||
env[packageManagerEnvVar] = "1";
|
||||
|
||||
const child = spawn(binaryPath, process.argv.slice(2), {
|
||||
stdio: "inherit",
|
||||
env: { ...process.env, PATH: updatedPath, CODEX_MANAGED_BY_NPM: "1" },
|
||||
env,
|
||||
});
|
||||
|
||||
child.on("error", (err) => {
|
||||
|
||||
1756
codex-rs/Cargo.lock
generated
1756
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,7 @@ members = [
|
||||
"app-server-protocol",
|
||||
"apply-patch",
|
||||
"arg0",
|
||||
"codex-infty",
|
||||
"codex-backend-openapi-models",
|
||||
"cloud-tasks",
|
||||
"cloud-tasks-client",
|
||||
@@ -14,6 +15,7 @@ members = [
|
||||
"core",
|
||||
"exec",
|
||||
"execpolicy",
|
||||
"feedback",
|
||||
"file-search",
|
||||
"git-tooling",
|
||||
"linux-sandbox",
|
||||
@@ -32,6 +34,7 @@ members = [
|
||||
"git-apply",
|
||||
"utils/json-to-toml",
|
||||
"utils/readiness",
|
||||
"utils/string",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -55,6 +58,7 @@ codex-chatgpt = { path = "chatgpt" }
|
||||
codex-common = { path = "common" }
|
||||
codex-core = { path = "core" }
|
||||
codex-exec = { path = "exec" }
|
||||
codex-feedback = { path = "feedback" }
|
||||
codex-file-search = { path = "file-search" }
|
||||
codex-git-tooling = { path = "git-tooling" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
@@ -71,6 +75,7 @@ codex-rmcp-client = { path = "rmcp-client" }
|
||||
codex-tui = { path = "tui" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
codex-utils-readiness = { path = "utils/readiness" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
mcp-types = { path = "mcp-types" }
|
||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||
@@ -82,9 +87,11 @@ anyhow = "1"
|
||||
arboard = "3"
|
||||
askama = "0.12"
|
||||
assert_cmd = "2"
|
||||
assert_matches = "1.5.0"
|
||||
async-channel = "2.3.1"
|
||||
async-stream = "0.3.6"
|
||||
async-trait = "0.1.89"
|
||||
axum = { version = "0.8", default-features = false }
|
||||
base64 = "0.22.1"
|
||||
bytes = "1.10.1"
|
||||
chrono = "0.4.42"
|
||||
@@ -102,7 +109,7 @@ env-flags = "0.1.1"
|
||||
env_logger = "0.11.5"
|
||||
escargot = "0.5"
|
||||
eventsource-stream = "0.2.3"
|
||||
futures = "0.3"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
icu_decimal = "2.0.0"
|
||||
icu_locale_core = "2.0.0"
|
||||
ignore = "0.4.23"
|
||||
@@ -110,6 +117,7 @@ image = { version = "^0.25.8", default-features = false }
|
||||
indexmap = "2.6.0"
|
||||
insta = "1.43.2"
|
||||
itertools = "0.14.0"
|
||||
keyring = "3.6"
|
||||
landlock = "0.4.1"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.175"
|
||||
@@ -117,6 +125,7 @@ log = "0.4"
|
||||
maplit = "1.0.2"
|
||||
mime_guess = "2.0.5"
|
||||
multimap = "0.10.0"
|
||||
notify = "8.2.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
openssl-sys = "*"
|
||||
opentelemetry = "0.30.0"
|
||||
@@ -138,11 +147,14 @@ rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
regex-lite = "0.1.7"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.8.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
seccompiler = "0.5.0"
|
||||
sentry = "0.34.0"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_with = "3.14"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10"
|
||||
shlex = "1.3.0"
|
||||
@@ -168,8 +180,9 @@ tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-test = "0.2.5"
|
||||
tree-sitter = "0.25.9"
|
||||
tree-sitter-bash = "0.25.0"
|
||||
tree-sitter = "0.25.10"
|
||||
tree-sitter-bash = "0.25"
|
||||
tree-sitter-highlight = "0.25.10"
|
||||
ts-rs = "11"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.2"
|
||||
@@ -237,5 +250,9 @@ strip = "symbols"
|
||||
codegen-units = 1
|
||||
|
||||
[patch.crates-io]
|
||||
# Uncomment to debug local changes.
|
||||
# ratatui = { path = "../../ratatui" }
|
||||
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
|
||||
|
||||
# Uncomment to debug local changes.
|
||||
# rmcp = { path = "../../rust-sdk/crates/rmcp" }
|
||||
|
||||
@@ -23,9 +23,15 @@ Codex supports a rich set of configuration options. Note that the Rust CLI uses
|
||||
|
||||
### Model Context Protocol Support
|
||||
|
||||
Codex CLI functions as an MCP client that can connect to MCP servers on startup. See the [`mcp_servers`](../docs/config.md#mcp_servers) section in the configuration documentation for details.
|
||||
#### MCP client
|
||||
|
||||
It is still experimental, but you can also launch Codex as an MCP _server_ by running `codex mcp-server`. Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out:
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
|
||||
|
||||
#### MCP server (experimental)
|
||||
|
||||
Codex can be launched as an MCP _server_ by running `codex mcp-server`. This allows _other_ MCP clients to use Codex as a tool for another agent.
|
||||
|
||||
Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out:
|
||||
|
||||
```shell
|
||||
npx @modelcontextprotocol/inspector codex mcp-server
|
||||
@@ -71,9 +77,13 @@ To test to see what happens when a command is run under the sandbox provided by
|
||||
|
||||
```
|
||||
# macOS
|
||||
codex debug seatbelt [--full-auto] [COMMAND]...
|
||||
codex sandbox macos [--full-auto] [COMMAND]...
|
||||
|
||||
# Linux
|
||||
codex sandbox linux [--full-auto] [COMMAND]...
|
||||
|
||||
# Legacy aliases
|
||||
codex debug seatbelt [--full-auto] [COMMAND]...
|
||||
codex debug landlock [--full-auto] [COMMAND]...
|
||||
```
|
||||
|
||||
|
||||
@@ -3,11 +3,30 @@ use ansi_to_tui::IntoText;
|
||||
use ratatui::text::Line;
|
||||
use ratatui::text::Text;
|
||||
|
||||
// Expand tabs in a best-effort way for transcript rendering.
|
||||
// Tabs can interact poorly with left-gutter prefixes in our TUI and CLI
|
||||
// transcript views (e.g., `nl` separates line numbers from content with a tab).
|
||||
// Replacing tabs with spaces avoids odd visual artifacts without changing
|
||||
// semantics for our use cases.
|
||||
fn expand_tabs(s: &str) -> std::borrow::Cow<'_, str> {
|
||||
if s.contains('\t') {
|
||||
// Keep it simple: replace each tab with 4 spaces.
|
||||
// We do not try to align to tab stops since most usages (like `nl`)
|
||||
// look acceptable with a fixed substitution and this avoids stateful math
|
||||
// across spans.
|
||||
std::borrow::Cow::Owned(s.replace('\t', " "))
|
||||
} else {
|
||||
std::borrow::Cow::Borrowed(s)
|
||||
}
|
||||
}
|
||||
|
||||
/// This function should be used when the contents of `s` are expected to match
|
||||
/// a single line. If multiple lines are found, a warning is logged and only the
|
||||
/// first line is returned.
|
||||
pub fn ansi_escape_line(s: &str) -> Line<'static> {
|
||||
let text = ansi_escape(s);
|
||||
// Normalize tabs to spaces to avoid odd gutter collisions in transcript mode.
|
||||
let s = expand_tabs(s);
|
||||
let text = ansi_escape(&s);
|
||||
match text.lines.as_slice() {
|
||||
[] => "".into(),
|
||||
[only] => only.clone(),
|
||||
|
||||
@@ -9,6 +9,7 @@ use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::FileChange;
|
||||
@@ -697,6 +698,7 @@ pub struct ExecCommandApprovalParams {
|
||||
pub cwd: PathBuf,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub reason: Option<String>,
|
||||
pub parsed_cmd: Vec<ParsedCommand>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
@@ -725,6 +727,7 @@ pub struct FuzzyFileSearchParams {
|
||||
pub struct FuzzyFileSearchResult {
|
||||
pub root: String,
|
||||
pub path: String,
|
||||
pub file_name: String,
|
||||
pub score: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub indices: Option<Vec<u32>>,
|
||||
@@ -903,6 +906,9 @@ mod tests {
|
||||
command: vec!["echo".to_string(), "hello".to_string()],
|
||||
cwd: PathBuf::from("/tmp"),
|
||||
reason: Some("because tests".to_string()),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
cmd: "echo hello".to_string(),
|
||||
}],
|
||||
};
|
||||
let request = ServerRequest::ExecCommandApproval {
|
||||
request_id: RequestId::Integer(7),
|
||||
@@ -919,6 +925,12 @@ mod tests {
|
||||
"command": ["echo", "hello"],
|
||||
"cwd": "/tmp",
|
||||
"reason": "because tests",
|
||||
"parsedCmd": [
|
||||
{
|
||||
"type": "unknown",
|
||||
"cmd": "echo hello"
|
||||
}
|
||||
]
|
||||
}
|
||||
}),
|
||||
serde_json::to_value(&request)?,
|
||||
|
||||
15
codex-rs/app-server/README.md
Normal file
15
codex-rs/app-server/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# codex-app-server
|
||||
|
||||
`codex app-server` is the harness Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
|
||||
|
||||
## Protocol
|
||||
|
||||
Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted.
|
||||
|
||||
## Message Schema
|
||||
|
||||
Currently, you can dump a TypeScript version of the schema using `codex generate-ts`. It is specific to the version of Codex you used to run `generate-ts`, so the two are guaranteed to be compatible.
|
||||
|
||||
```
|
||||
codex generate-ts --out DIR
|
||||
```
|
||||
@@ -500,7 +500,7 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn get_user_saved_config(&self, request_id: RequestId) {
|
||||
let toml_value = match load_config_as_toml(&self.config.codex_home) {
|
||||
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -653,18 +653,19 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn process_new_conversation(&self, request_id: RequestId, params: NewConversationParams) {
|
||||
let config = match derive_config_from_params(params, self.codex_linux_sandbox_exe.clone()) {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let config =
|
||||
match derive_config_from_params(params, self.codex_linux_sandbox_exe.clone()).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match self.conversation_manager.new_conversation(config).await {
|
||||
Ok(conversation_id) => {
|
||||
@@ -752,7 +753,7 @@ impl CodexMessageProcessor {
|
||||
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
|
||||
let config = match params.overrides {
|
||||
Some(overrides) => {
|
||||
derive_config_from_params(overrides, self.codex_linux_sandbox_exe.clone())
|
||||
derive_config_from_params(overrides, self.codex_linux_sandbox_exe.clone()).await
|
||||
}
|
||||
None => Ok(self.config.as_ref().clone()),
|
||||
};
|
||||
@@ -1283,6 +1284,7 @@ async fn apply_bespoke_event_handling(
|
||||
command,
|
||||
cwd,
|
||||
reason,
|
||||
parsed_cmd,
|
||||
}) => {
|
||||
let params = ExecCommandApprovalParams {
|
||||
conversation_id,
|
||||
@@ -1290,6 +1292,7 @@ async fn apply_bespoke_event_handling(
|
||||
command,
|
||||
cwd,
|
||||
reason,
|
||||
parsed_cmd,
|
||||
};
|
||||
let rx = outgoing
|
||||
.send_request(ServerRequestPayload::ExecCommandApproval(params))
|
||||
@@ -1320,7 +1323,7 @@ async fn apply_bespoke_event_handling(
|
||||
}
|
||||
}
|
||||
|
||||
fn derive_config_from_params(
|
||||
async fn derive_config_from_params(
|
||||
params: NewConversationParams,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
) -> std::io::Result<Config> {
|
||||
@@ -1358,7 +1361,7 @@ fn derive_config_from_params(
|
||||
.map(|(k, v)| (k, json_to_toml(v)))
|
||||
.collect();
|
||||
|
||||
Config::load_with_cli_overrides(cli_overrides, overrides)
|
||||
Config::load_with_cli_overrides(cli_overrides, overrides).await
|
||||
}
|
||||
|
||||
async fn on_patch_approval_response(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::num::NonZero;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -56,9 +57,16 @@ pub(crate) async fn run_fuzzy_file_search(
|
||||
match res {
|
||||
Ok(Ok((root, res))) => {
|
||||
for m in res.matches {
|
||||
let path = m.path;
|
||||
//TODO(shijie): Move file name generation to file_search lib.
|
||||
let file_name = Path::new(&path)
|
||||
.file_name()
|
||||
.map(|name| name.to_string_lossy().into_owned())
|
||||
.unwrap_or_else(|| path.clone());
|
||||
let result = FuzzyFileSearchResult {
|
||||
root: root.clone(),
|
||||
path: m.path,
|
||||
path,
|
||||
file_name,
|
||||
score: m.score,
|
||||
indices: m.indices,
|
||||
};
|
||||
|
||||
@@ -81,6 +81,7 @@ pub async fn run_main(
|
||||
)
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::path::Path;
|
||||
use std::process::Stdio;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
@@ -47,6 +48,7 @@ pub struct McpProcess {
|
||||
process: Child,
|
||||
stdin: ChildStdin,
|
||||
stdout: BufReader<ChildStdout>,
|
||||
pending_user_messages: VecDeque<JSONRPCNotification>,
|
||||
}
|
||||
|
||||
impl McpProcess {
|
||||
@@ -117,6 +119,7 @@ impl McpProcess {
|
||||
process,
|
||||
stdin,
|
||||
stdout,
|
||||
pending_user_messages: VecDeque::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -375,8 +378,9 @@ impl McpProcess {
|
||||
let message = self.read_jsonrpc_message().await?;
|
||||
|
||||
match message {
|
||||
JSONRPCMessage::Notification(_) => {
|
||||
eprintln!("notification: {message:?}");
|
||||
JSONRPCMessage::Notification(notification) => {
|
||||
eprintln!("notification: {notification:?}");
|
||||
self.enqueue_user_message(notification);
|
||||
}
|
||||
JSONRPCMessage::Request(jsonrpc_request) => {
|
||||
return jsonrpc_request.try_into().with_context(
|
||||
@@ -402,8 +406,9 @@ impl McpProcess {
|
||||
loop {
|
||||
let message = self.read_jsonrpc_message().await?;
|
||||
match message {
|
||||
JSONRPCMessage::Notification(_) => {
|
||||
eprintln!("notification: {message:?}");
|
||||
JSONRPCMessage::Notification(notification) => {
|
||||
eprintln!("notification: {notification:?}");
|
||||
self.enqueue_user_message(notification);
|
||||
}
|
||||
JSONRPCMessage::Request(_) => {
|
||||
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
|
||||
@@ -427,8 +432,9 @@ impl McpProcess {
|
||||
loop {
|
||||
let message = self.read_jsonrpc_message().await?;
|
||||
match message {
|
||||
JSONRPCMessage::Notification(_) => {
|
||||
eprintln!("notification: {message:?}");
|
||||
JSONRPCMessage::Notification(notification) => {
|
||||
eprintln!("notification: {notification:?}");
|
||||
self.enqueue_user_message(notification);
|
||||
}
|
||||
JSONRPCMessage::Request(_) => {
|
||||
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
|
||||
@@ -451,6 +457,10 @@ impl McpProcess {
|
||||
) -> anyhow::Result<JSONRPCNotification> {
|
||||
eprintln!("in read_stream_until_notification_message({method})");
|
||||
|
||||
if let Some(notification) = self.take_pending_notification_by_method(method) {
|
||||
return Ok(notification);
|
||||
}
|
||||
|
||||
loop {
|
||||
let message = self.read_jsonrpc_message().await?;
|
||||
match message {
|
||||
@@ -458,6 +468,7 @@ impl McpProcess {
|
||||
if notification.method == method {
|
||||
return Ok(notification);
|
||||
}
|
||||
self.enqueue_user_message(notification);
|
||||
}
|
||||
JSONRPCMessage::Request(_) => {
|
||||
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
|
||||
@@ -471,4 +482,21 @@ impl McpProcess {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn take_pending_notification_by_method(&mut self, method: &str) -> Option<JSONRPCNotification> {
|
||||
if let Some(pos) = self
|
||||
.pending_user_messages
|
||||
.iter()
|
||||
.position(|notification| notification.method == method)
|
||||
{
|
||||
return self.pending_user_messages.remove(pos);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn enqueue_user_message(&mut self, notification: JSONRPCNotification) {
|
||||
if notification.method == "codex/event/user_message" {
|
||||
self.pending_user_messages.push_back(notification);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||
use codex_app_server_protocol::ExecCommandApprovalParams;
|
||||
use codex_app_server_protocol::InputItem;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
@@ -25,6 +26,11 @@ use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol_config_types::ReasoningEffort;
|
||||
use codex_core::protocol_config_types::ReasoningSummary;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::protocol::Event;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::InputMessageKind;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::env;
|
||||
use tempfile::TempDir;
|
||||
@@ -306,6 +312,9 @@ async fn test_send_user_turn_changes_approval_policy_behavior() {
|
||||
],
|
||||
cwd: working_directory.clone(),
|
||||
reason: None,
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
cmd: "python3 -c 'print(42)'".to_string()
|
||||
}],
|
||||
},
|
||||
params
|
||||
);
|
||||
@@ -367,6 +376,234 @@ async fn test_send_user_turn_changes_approval_policy_behavior() {
|
||||
}
|
||||
|
||||
// Helper: minimal config.toml pointing at mock provider.
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() {
|
||||
if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let tmp = TempDir::new().expect("tmp dir");
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
std::fs::create_dir(&codex_home).expect("create codex home dir");
|
||||
let workspace_root = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace_root).expect("create workspace root");
|
||||
let first_cwd = workspace_root.join("turn1");
|
||||
let second_cwd = workspace_root.join("turn2");
|
||||
std::fs::create_dir(&first_cwd).expect("create first cwd");
|
||||
std::fs::create_dir(&second_cwd).expect("create second cwd");
|
||||
|
||||
let responses = vec![
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo first turn".to_string(),
|
||||
],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-first",
|
||||
)
|
||||
.expect("create first shell response"),
|
||||
create_final_assistant_message_sse_response("done first")
|
||||
.expect("create first final assistant message"),
|
||||
create_shell_sse_response(
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string(),
|
||||
],
|
||||
None,
|
||||
Some(5000),
|
||||
"call-second",
|
||||
)
|
||||
.expect("create second shell response"),
|
||||
create_final_assistant_message_sse_response("done second")
|
||||
.expect("create second final assistant message"),
|
||||
];
|
||||
let server = create_mock_chat_completions_server(responses).await;
|
||||
create_config_toml(&codex_home, &server.uri()).expect("write config");
|
||||
|
||||
let mut mcp = McpProcess::new(&codex_home)
|
||||
.await
|
||||
.expect("spawn mcp process");
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
cwd: Some(first_cwd.to_string_lossy().into_owned()),
|
||||
approval_policy: Some(AskForApproval::Never),
|
||||
sandbox: Some(SandboxMode::WorkspaceWrite),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.expect("send newConversation");
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await
|
||||
.expect("newConversation timeout")
|
||||
.expect("newConversation resp");
|
||||
let NewConversationResponse {
|
||||
conversation_id,
|
||||
model,
|
||||
..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)
|
||||
.expect("deserialize newConversation response");
|
||||
|
||||
let add_listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams { conversation_id })
|
||||
.await
|
||||
.expect("send addConversationListener");
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
|
||||
)
|
||||
.await
|
||||
.expect("addConversationListener timeout")
|
||||
.expect("addConversationListener resp");
|
||||
|
||||
let first_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "first turn".to_string(),
|
||||
}],
|
||||
cwd: first_cwd.clone(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![first_cwd.clone()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
},
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await
|
||||
.expect("send first sendUserTurn");
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)),
|
||||
)
|
||||
.await
|
||||
.expect("sendUserTurn 1 timeout")
|
||||
.expect("sendUserTurn 1 resp");
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await
|
||||
.expect("task_complete 1 timeout")
|
||||
.expect("task_complete 1 notification");
|
||||
|
||||
let second_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "second turn".to_string(),
|
||||
}],
|
||||
cwd: second_cwd.clone(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await
|
||||
.expect("send second sendUserTurn");
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)),
|
||||
)
|
||||
.await
|
||||
.expect("sendUserTurn 2 timeout")
|
||||
.expect("sendUserTurn 2 resp");
|
||||
|
||||
let mut env_message: Option<String> = None;
|
||||
let second_cwd_str = second_cwd.to_string_lossy().into_owned();
|
||||
for _ in 0..10 {
|
||||
let notification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/user_message"),
|
||||
)
|
||||
.await
|
||||
.expect("user_message timeout")
|
||||
.expect("user_message notification");
|
||||
let params = notification
|
||||
.params
|
||||
.clone()
|
||||
.expect("user_message should include params");
|
||||
let event: Event = serde_json::from_value(params).expect("deserialize user_message event");
|
||||
if let EventMsg::UserMessage(user) = event.msg
|
||||
&& matches!(user.kind, Some(InputMessageKind::EnvironmentContext))
|
||||
&& user.message.contains(&second_cwd_str)
|
||||
{
|
||||
env_message = Some(user.message);
|
||||
break;
|
||||
}
|
||||
}
|
||||
let env_message = env_message.expect("expected environment context update");
|
||||
assert!(
|
||||
env_message.contains("<sandbox_mode>danger-full-access</sandbox_mode>"),
|
||||
"env context should reflect new sandbox mode: {env_message}"
|
||||
);
|
||||
assert!(
|
||||
env_message.contains("<network_access>enabled</network_access>"),
|
||||
"env context should enable network access for danger-full-access policy: {env_message}"
|
||||
);
|
||||
assert!(
|
||||
env_message.contains(&second_cwd_str),
|
||||
"env context should include updated cwd: {env_message}"
|
||||
);
|
||||
|
||||
let exec_begin_notification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/exec_command_begin"),
|
||||
)
|
||||
.await
|
||||
.expect("exec_command_begin timeout")
|
||||
.expect("exec_command_begin notification");
|
||||
let params = exec_begin_notification
|
||||
.params
|
||||
.clone()
|
||||
.expect("exec_command_begin params");
|
||||
let event: Event = serde_json::from_value(params).expect("deserialize exec begin event");
|
||||
let exec_begin = match event.msg {
|
||||
EventMsg::ExecCommandBegin(exec_begin) => exec_begin,
|
||||
other => panic!("expected ExecCommandBegin event, got {other:?}"),
|
||||
};
|
||||
assert_eq!(
|
||||
exec_begin.cwd, second_cwd,
|
||||
"exec turn should run from updated cwd"
|
||||
);
|
||||
assert_eq!(
|
||||
exec_begin.command,
|
||||
vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"echo second turn".to_string()
|
||||
],
|
||||
"exec turn should run expected command"
|
||||
);
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await
|
||||
.expect("task_complete 2 timeout")
|
||||
.expect("task_complete 2 notification");
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
@@ -9,30 +11,41 @@ use tokio::time::timeout;
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn test_fuzzy_file_search_sorts_and_includes_indices() {
|
||||
async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
|
||||
// Prepare a temporary Codex home and a separate root with test files.
|
||||
let codex_home = TempDir::new().expect("create temp codex home");
|
||||
let root = TempDir::new().expect("create temp search root");
|
||||
let codex_home = TempDir::new().context("create temp codex home")?;
|
||||
let root = TempDir::new().context("create temp search root")?;
|
||||
|
||||
// Create files designed to have deterministic ordering for query "abc".
|
||||
std::fs::write(root.path().join("abc"), "x").expect("write file abc");
|
||||
std::fs::write(root.path().join("abcde"), "x").expect("write file abcx");
|
||||
std::fs::write(root.path().join("abexy"), "x").expect("write file abcx");
|
||||
std::fs::write(root.path().join("zzz.txt"), "x").expect("write file zzz");
|
||||
// Create files designed to have deterministic ordering for query "abe".
|
||||
std::fs::write(root.path().join("abc"), "x").context("write file abc")?;
|
||||
std::fs::write(root.path().join("abcde"), "x").context("write file abcde")?;
|
||||
std::fs::write(root.path().join("abexy"), "x").context("write file abexy")?;
|
||||
std::fs::write(root.path().join("zzz.txt"), "x").context("write file zzz")?;
|
||||
let sub_dir = root.path().join("sub");
|
||||
std::fs::create_dir_all(&sub_dir).context("create sub dir")?;
|
||||
let sub_abce_path = sub_dir.join("abce");
|
||||
std::fs::write(&sub_abce_path, "x").context("write file sub/abce")?;
|
||||
let sub_abce_rel = sub_abce_path
|
||||
.strip_prefix(root.path())
|
||||
.context("strip root prefix from sub/abce")?
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
|
||||
// Start MCP server and initialize.
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await.expect("spawn mcp");
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.context("spawn mcp")?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
.context("init timeout")?
|
||||
.context("init failed")?;
|
||||
|
||||
let root_path = root.path().to_string_lossy().to_string();
|
||||
// Send fuzzyFileSearch request.
|
||||
let request_id = mcp
|
||||
.send_fuzzy_file_search_request("abe", vec![root_path.clone()], None)
|
||||
.await
|
||||
.expect("send fuzzyFileSearch");
|
||||
.context("send fuzzyFileSearch")?;
|
||||
|
||||
// Read response and verify shape and ordering.
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
@@ -40,39 +53,65 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() {
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("fuzzyFileSearch timeout")
|
||||
.expect("fuzzyFileSearch resp");
|
||||
.context("fuzzyFileSearch timeout")?
|
||||
.context("fuzzyFileSearch resp")?;
|
||||
|
||||
let value = resp.result;
|
||||
// The path separator on Windows affects the score.
|
||||
let expected_score = if cfg!(windows) { 69 } else { 72 };
|
||||
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"files": [
|
||||
{ "root": root_path.clone(), "path": "abexy", "score": 88, "indices": [0, 1, 2] },
|
||||
{ "root": root_path.clone(), "path": "abcde", "score": 74, "indices": [0, 1, 4] },
|
||||
{
|
||||
"root": root_path.clone(),
|
||||
"path": "abexy",
|
||||
"file_name": "abexy",
|
||||
"score": 88,
|
||||
"indices": [0, 1, 2],
|
||||
},
|
||||
{
|
||||
"root": root_path.clone(),
|
||||
"path": "abcde",
|
||||
"file_name": "abcde",
|
||||
"score": 74,
|
||||
"indices": [0, 1, 4],
|
||||
},
|
||||
{
|
||||
"root": root_path.clone(),
|
||||
"path": sub_abce_rel,
|
||||
"file_name": "abce",
|
||||
"score": expected_score,
|
||||
"indices": [4, 5, 7],
|
||||
},
|
||||
]
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn test_fuzzy_file_search_accepts_cancellation_token() {
|
||||
let codex_home = TempDir::new().expect("create temp codex home");
|
||||
let root = TempDir::new().expect("create temp search root");
|
||||
async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> {
|
||||
let codex_home = TempDir::new().context("create temp codex home")?;
|
||||
let root = TempDir::new().context("create temp search root")?;
|
||||
|
||||
std::fs::write(root.path().join("alpha.txt"), "contents").expect("write alpha");
|
||||
std::fs::write(root.path().join("alpha.txt"), "contents").context("write alpha")?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await.expect("spawn mcp");
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.context("spawn mcp")?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
.context("init timeout")?
|
||||
.context("init failed")?;
|
||||
|
||||
let root_path = root.path().to_string_lossy().to_string();
|
||||
let request_id = mcp
|
||||
.send_fuzzy_file_search_request("alp", vec![root_path.clone()], None)
|
||||
.await
|
||||
.expect("send fuzzyFileSearch");
|
||||
.context("send fuzzyFileSearch")?;
|
||||
|
||||
let request_id_2 = mcp
|
||||
.send_fuzzy_file_search_request(
|
||||
@@ -81,24 +120,27 @@ async fn test_fuzzy_file_search_accepts_cancellation_token() {
|
||||
Some(request_id.to_string()),
|
||||
)
|
||||
.await
|
||||
.expect("send fuzzyFileSearch");
|
||||
.context("send fuzzyFileSearch")?;
|
||||
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id_2)),
|
||||
)
|
||||
.await
|
||||
.expect("fuzzyFileSearch timeout")
|
||||
.expect("fuzzyFileSearch resp");
|
||||
.context("fuzzyFileSearch timeout")?
|
||||
.context("fuzzyFileSearch resp")?;
|
||||
|
||||
let files = resp
|
||||
.result
|
||||
.get("files")
|
||||
.and_then(|value| value.as_array())
|
||||
.cloned()
|
||||
.expect("files array");
|
||||
.context("files key missing")?
|
||||
.as_array()
|
||||
.context("files not array")?
|
||||
.clone();
|
||||
|
||||
assert_eq!(files.len(), 1);
|
||||
assert_eq!(files[0]["root"], root_path);
|
||||
assert_eq!(files[0]["path"], "alpha.txt");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -23,5 +23,6 @@ tree-sitter-bash = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -843,6 +843,7 @@ pub fn print_summary(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use assert_matches::assert_matches;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::fs;
|
||||
use std::string::ToString;
|
||||
@@ -894,10 +895,10 @@ mod tests {
|
||||
|
||||
fn assert_not_match(script: &str) {
|
||||
let args = args_bash(script);
|
||||
assert!(matches!(
|
||||
assert_matches!(
|
||||
maybe_parse_apply_patch(&args),
|
||||
MaybeApplyPatch::NotApplyPatch
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -905,10 +906,10 @@ mod tests {
|
||||
let patch = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch".to_string();
|
||||
let args = vec![patch];
|
||||
let dir = tempdir().unwrap();
|
||||
assert!(matches!(
|
||||
assert_matches!(
|
||||
maybe_parse_apply_patch_verified(&args, dir.path()),
|
||||
MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation)
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -916,10 +917,10 @@ mod tests {
|
||||
let script = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch";
|
||||
let args = args_bash(script);
|
||||
let dir = tempdir().unwrap();
|
||||
assert!(matches!(
|
||||
assert_matches!(
|
||||
maybe_parse_apply_patch_verified(&args, dir.path()),
|
||||
MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation)
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -29,7 +29,8 @@ pub async fn run_apply_command(
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?,
|
||||
ConfigOverrides::default(),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
init_chatgpt_token_from_auth(&config.codex_home).await?;
|
||||
|
||||
|
||||
@@ -32,8 +32,10 @@ codex-app-server-protocol = { workspace = true }
|
||||
codex-protocol-ts = { workspace = true }
|
||||
codex-responses-api-proxy = { workspace = true }
|
||||
codex-tui = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-cloud-tasks = { path = "../cloud-tasks" }
|
||||
ctor = { workspace = true }
|
||||
crossterm = { workspace = true }
|
||||
owo-colors = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
supports-color = { workspace = true }
|
||||
@@ -44,8 +46,16 @@ tokio = { workspace = true, features = [
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
codex-infty = { path = "../codex-infty" }
|
||||
chrono = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
textwrap = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -73,7 +73,8 @@ async fn run_command_under_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// In practice, this should be `std::env::current_dir()` because this CLI
|
||||
// does not support `--cwd`, but let's use the config value for consistency.
|
||||
|
||||
115
codex-rs/cli/src/infty/args.rs
Normal file
115
codex-rs/cli/src/infty/args.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use clap::Subcommand;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
|
||||
use super::commands;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct InftyCli {
|
||||
#[clap(flatten)]
|
||||
pub config_overrides: CliConfigOverrides,
|
||||
|
||||
/// Override the default runs root (`~/.codex/infty`).
|
||||
#[arg(long = "runs-root", value_name = "DIR")]
|
||||
pub runs_root: Option<PathBuf>,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: InftyCommand,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum InftyCommand {
|
||||
/// Create a new run store and spawn solver/director sessions.
|
||||
Create(CreateArgs),
|
||||
|
||||
/// List stored runs.
|
||||
List(ListArgs),
|
||||
|
||||
/// Show metadata for a stored run.
|
||||
Show(ShowArgs),
|
||||
// resumable runs are disabled; Drive command removed
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct CreateArgs {
|
||||
/// Explicit run id. If omitted, a timestamp-based id is generated.
|
||||
#[arg(long = "run-id", value_name = "RUN_ID")]
|
||||
pub run_id: Option<String>,
|
||||
|
||||
/// Optional objective to send to the solver immediately after creation.
|
||||
#[arg(long)]
|
||||
pub objective: Option<String>,
|
||||
|
||||
/// Timeout in seconds when waiting for the solver reply to --objective.
|
||||
#[arg(long = "timeout-secs", default_value_t = super::commands::DEFAULT_TIMEOUT_SECS)]
|
||||
pub timeout_secs: u64,
|
||||
|
||||
/// Override only the Director's model (solver and verifiers keep defaults).
|
||||
#[arg(long = "director-model", value_name = "MODEL")]
|
||||
pub director_model: Option<String>,
|
||||
|
||||
/// Override only the Director's reasoning effort (minimal|low|medium|high).
|
||||
#[arg(
|
||||
long = "director-effort",
|
||||
value_name = "LEVEL",
|
||||
value_parser = parse_reasoning_effort
|
||||
)]
|
||||
pub director_effort: Option<ReasoningEffort>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct ListArgs {
|
||||
/// Emit JSON describing the stored runs.
|
||||
#[arg(long)]
|
||||
pub json: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub(crate) struct ShowArgs {
|
||||
/// Run id to display.
|
||||
#[arg(value_name = "RUN_ID")]
|
||||
pub run_id: String,
|
||||
|
||||
/// Emit JSON metadata instead of human-readable text.
|
||||
#[arg(long)]
|
||||
pub json: bool,
|
||||
}
|
||||
|
||||
// resumable runs are disabled; DriveArgs removed
|
||||
|
||||
impl InftyCli {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
let InftyCli {
|
||||
config_overrides,
|
||||
runs_root,
|
||||
command,
|
||||
} = self;
|
||||
|
||||
match command {
|
||||
InftyCommand::Create(args) => {
|
||||
commands::run_create(config_overrides, runs_root, args).await?;
|
||||
}
|
||||
InftyCommand::List(args) => commands::run_list(runs_root, args)?,
|
||||
InftyCommand::Show(args) => commands::run_show(runs_root, args)?,
|
||||
// Drive removed
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_reasoning_effort(s: &str) -> Result<ReasoningEffort, String> {
|
||||
match s.trim().to_ascii_lowercase().as_str() {
|
||||
"minimal" => Ok(ReasoningEffort::Minimal),
|
||||
"low" => Ok(ReasoningEffort::Low),
|
||||
"medium" => Ok(ReasoningEffort::Medium),
|
||||
"high" => Ok(ReasoningEffort::High),
|
||||
_ => Err(format!(
|
||||
"invalid reasoning effort: {s}. Expected one of: minimal|low|medium|high"
|
||||
)),
|
||||
}
|
||||
}
|
||||
438
codex-rs/cli/src/infty/commands.rs
Normal file
438
codex-rs/cli/src/infty/commands.rs
Normal file
@@ -0,0 +1,438 @@
|
||||
use std::fs;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use chrono::SecondsFormat;
|
||||
use chrono::Utc;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::auth::read_codex_api_key_from_env;
|
||||
use codex_core::auth::read_openai_api_key_from_env;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_infty::InftyOrchestrator;
|
||||
use codex_infty::RoleConfig;
|
||||
use codex_infty::RunExecutionOptions;
|
||||
use codex_infty::RunParams;
|
||||
use codex_infty::RunStore;
|
||||
use owo_colors::OwoColorize;
|
||||
use serde::Serialize;
|
||||
use std::sync::OnceLock;
|
||||
use supports_color::Stream;
|
||||
use tracing_appender::non_blocking;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::prelude::*;
|
||||
|
||||
use super::args::CreateArgs;
|
||||
use super::args::ListArgs;
|
||||
use super::args::ShowArgs;
|
||||
use super::progress::TerminalProgressReporter;
|
||||
use super::summary::print_run_summary_box;
|
||||
|
||||
const DEFAULT_VERIFIER_ROLES: [&str; 3] = ["verifier-alpha", "verifier-beta", "verifier-gamma"];
|
||||
|
||||
pub(crate) const DEFAULT_TIMEOUT_SECS: u64 = 6000;
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct RunSummary {
|
||||
run_id: String,
|
||||
path: String,
|
||||
created_at: String,
|
||||
updated_at: String,
|
||||
roles: Vec<String>,
|
||||
}
|
||||
|
||||
pub(crate) async fn run_create(
|
||||
config_overrides: CliConfigOverrides,
|
||||
runs_root_override: Option<PathBuf>,
|
||||
args: CreateArgs,
|
||||
) -> Result<()> {
|
||||
let config = load_config(config_overrides).await?;
|
||||
init_infty_logging(&config)?;
|
||||
let auth = load_auth(&config)?;
|
||||
let runs_root = resolve_runs_root(runs_root_override)?;
|
||||
let color_enabled = supports_color::on(Stream::Stdout).is_some();
|
||||
|
||||
let mut run_id = if let Some(id) = args.run_id {
|
||||
id
|
||||
} else {
|
||||
generate_run_id()
|
||||
};
|
||||
run_id = run_id.trim().to_string();
|
||||
validate_run_id(&run_id)?;
|
||||
|
||||
let run_path = runs_root.join(&run_id);
|
||||
if run_path.exists() {
|
||||
bail!("run {run_id} already exists at {}", run_path.display());
|
||||
}
|
||||
|
||||
let orchestrator = InftyOrchestrator::with_runs_root(auth, runs_root).with_progress(Arc::new(
|
||||
TerminalProgressReporter::with_color(color_enabled),
|
||||
));
|
||||
let verifiers: Vec<RoleConfig> = DEFAULT_VERIFIER_ROLES
|
||||
.iter()
|
||||
.map(|role| RoleConfig::new(role.to_string(), config.clone()))
|
||||
.collect();
|
||||
let mut director_config = config.clone();
|
||||
if let Some(model) = args.director_model.as_deref() {
|
||||
director_config.model = model.to_string();
|
||||
}
|
||||
if let Some(effort) = args.director_effort {
|
||||
director_config.model_reasoning_effort = Some(effort);
|
||||
}
|
||||
let run_params = RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(run_path.clone()),
|
||||
solver: RoleConfig::new("solver", config.clone()),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers,
|
||||
};
|
||||
|
||||
if let Some(objective) = args.objective {
|
||||
let timeout = Duration::from_secs(args.timeout_secs);
|
||||
let options = RunExecutionOptions {
|
||||
objective: Some(objective),
|
||||
director_timeout: timeout,
|
||||
verifier_timeout: timeout,
|
||||
};
|
||||
|
||||
let start = Instant::now();
|
||||
let start_header = format!("Starting run {run_id}");
|
||||
if color_enabled {
|
||||
println!("{}", start_header.blue().bold());
|
||||
} else {
|
||||
println!("{start_header}");
|
||||
}
|
||||
let location_line = format!(" run directory: {}", run_path.display());
|
||||
if color_enabled {
|
||||
println!("{}", location_line.dimmed());
|
||||
} else {
|
||||
println!("{location_line}");
|
||||
}
|
||||
if let Some(objective_text) = options.objective.as_deref()
|
||||
&& !objective_text.trim().is_empty()
|
||||
{
|
||||
let objective_line = format!(" objective: {objective_text}");
|
||||
if color_enabled {
|
||||
println!("{}", objective_line.dimmed());
|
||||
} else {
|
||||
println!("{objective_line}");
|
||||
}
|
||||
}
|
||||
println!();
|
||||
|
||||
let objective_snapshot = options.objective.clone();
|
||||
let outcome = orchestrator
|
||||
.execute_new_run(run_params, options)
|
||||
.await
|
||||
.with_context(|| format!("failed to execute run {run_id}"))?;
|
||||
let duration = start.elapsed();
|
||||
print_run_summary_box(
|
||||
color_enabled,
|
||||
&run_id,
|
||||
&run_path,
|
||||
&outcome.deliverable_path,
|
||||
outcome.summary.as_deref(),
|
||||
objective_snapshot.as_deref(),
|
||||
duration,
|
||||
);
|
||||
} else {
|
||||
let sessions = orchestrator
|
||||
.spawn_run(run_params)
|
||||
.await
|
||||
.with_context(|| format!("failed to create run {run_id}"))?;
|
||||
|
||||
println!(
|
||||
"Created run {run_id} at {}",
|
||||
sessions.store.path().display()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn run_list(runs_root_override: Option<PathBuf>, args: ListArgs) -> Result<()> {
|
||||
// Initialize logging using default Codex home discovery.
|
||||
let _ = init_infty_logging_from_home();
|
||||
let runs_root = resolve_runs_root(runs_root_override)?;
|
||||
let listings = collect_run_summaries(&runs_root)?;
|
||||
|
||||
if args.json {
|
||||
println!("{}", serde_json::to_string_pretty(&listings)?);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if listings.is_empty() {
|
||||
println!("No runs found under {}", runs_root.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Runs in {}", runs_root.display());
|
||||
for summary in listings {
|
||||
println!(
|
||||
"{}\t{}\t{}",
|
||||
summary.run_id, summary.updated_at, summary.path
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn run_show(runs_root_override: Option<PathBuf>, args: ShowArgs) -> Result<()> {
|
||||
validate_run_id(&args.run_id)?;
|
||||
let _ = init_infty_logging_from_home();
|
||||
let runs_root = resolve_runs_root(runs_root_override)?;
|
||||
let run_path = runs_root.join(&args.run_id);
|
||||
let store =
|
||||
RunStore::load(&run_path).with_context(|| format!("failed to load run {}", args.run_id))?;
|
||||
let metadata = store.metadata();
|
||||
|
||||
let summary = RunSummary {
|
||||
run_id: metadata.run_id.clone(),
|
||||
path: run_path.display().to_string(),
|
||||
created_at: metadata
|
||||
.created_at
|
||||
.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||
updated_at: metadata
|
||||
.updated_at
|
||||
.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||
roles: metadata
|
||||
.roles
|
||||
.iter()
|
||||
.map(|role| role.role.clone())
|
||||
.collect(),
|
||||
};
|
||||
|
||||
if args.json {
|
||||
println!("{}", serde_json::to_string_pretty(&summary)?);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Run: {}", summary.run_id);
|
||||
println!("Path: {}", summary.path);
|
||||
println!("Created: {}", summary.created_at);
|
||||
println!("Updated: {}", summary.updated_at);
|
||||
println!("Roles: {}", summary.roles.join(", "));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// resumable runs are disabled; run_drive removed
|
||||
|
||||
fn generate_run_id() -> String {
|
||||
let timestamp = Utc::now().format("run-%Y%m%d-%H%M%S");
|
||||
format!("{timestamp}")
|
||||
}
|
||||
|
||||
pub(crate) fn validate_run_id(run_id: &str) -> Result<()> {
|
||||
if run_id.is_empty() {
|
||||
bail!("run id must not be empty");
|
||||
}
|
||||
if run_id.starts_with('.') || run_id.ends_with('.') {
|
||||
bail!("run id must not begin or end with '.'");
|
||||
}
|
||||
if run_id
|
||||
.chars()
|
||||
.any(|c| !(c.is_ascii_alphanumeric() || matches!(c, '-' | '_' | '.')))
|
||||
{
|
||||
bail!("run id may only contain ASCII alphanumerics, '-', '_', or '.'");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn load_config(cli_overrides: CliConfigOverrides) -> Result<Config> {
|
||||
let overrides = cli_overrides
|
||||
.parse_overrides()
|
||||
.map_err(|err| anyhow!("failed to parse -c overrides: {err}"))?;
|
||||
Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load Codex configuration")
|
||||
}
|
||||
|
||||
fn load_auth(config: &Config) -> Result<CodexAuth> {
|
||||
if let Some(auth) =
|
||||
CodexAuth::from_codex_home(&config.codex_home).context("failed to read auth.json")?
|
||||
{
|
||||
return Ok(auth);
|
||||
}
|
||||
if let Some(api_key) = read_codex_api_key_from_env() {
|
||||
return Ok(CodexAuth::from_api_key(&api_key));
|
||||
}
|
||||
if let Some(api_key) = read_openai_api_key_from_env() {
|
||||
return Ok(CodexAuth::from_api_key(&api_key));
|
||||
}
|
||||
bail!("no Codex authentication found. Run `codex login` or set OPENAI_API_KEY.");
|
||||
}
|
||||
|
||||
fn resolve_runs_root(override_path: Option<PathBuf>) -> Result<PathBuf> {
|
||||
if let Some(path) = override_path {
|
||||
return Ok(path);
|
||||
}
|
||||
codex_infty::default_runs_root()
|
||||
}
|
||||
|
||||
fn collect_run_summaries(root: &Path) -> Result<Vec<RunSummary>> {
|
||||
let mut summaries = Vec::new();
|
||||
let iter = match fs::read_dir(root) {
|
||||
Ok(read_dir) => read_dir,
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(summaries),
|
||||
Err(err) => {
|
||||
return Err(
|
||||
anyhow!(err).context(format!("failed to read runs root {}", root.display()))
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
for entry in iter {
|
||||
let entry = entry?;
|
||||
if !entry.file_type()?.is_dir() {
|
||||
continue;
|
||||
}
|
||||
let run_path = entry.path();
|
||||
let store = match RunStore::load(&run_path) {
|
||||
Ok(store) => store,
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Skipping {}: failed to load run metadata: {err}",
|
||||
run_path.display()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let metadata = store.metadata();
|
||||
summaries.push(RunSummary {
|
||||
run_id: metadata.run_id.clone(),
|
||||
path: run_path.display().to_string(),
|
||||
created_at: metadata
|
||||
.created_at
|
||||
.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||
updated_at: metadata
|
||||
.updated_at
|
||||
.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||
roles: metadata
|
||||
.roles
|
||||
.iter()
|
||||
.map(|role| role.role.clone())
|
||||
.collect(),
|
||||
});
|
||||
}
|
||||
|
||||
summaries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
|
||||
Ok(summaries)
|
||||
}
|
||||
|
||||
fn init_infty_logging(config: &codex_core::config::Config) -> std::io::Result<()> {
|
||||
let log_dir = codex_core::config::log_dir(config)?;
|
||||
std::fs::create_dir_all(&log_dir)?;
|
||||
|
||||
let mut log_file_opts = OpenOptions::new();
|
||||
log_file_opts.create(true).append(true);
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
log_file_opts.mode(0o600);
|
||||
}
|
||||
|
||||
let log_file = log_file_opts.open(log_dir.join("codex-infty.log"))?;
|
||||
let (non_blocking, guard) = non_blocking(log_file);
|
||||
static INFTY_LOG_GUARD: OnceLock<tracing_appender::non_blocking::WorkerGuard> = OnceLock::new();
|
||||
let _ = INFTY_LOG_GUARD.set(guard);
|
||||
|
||||
// Use RUST_LOG if set, otherwise default to info for common codex crates
|
||||
let env_filter = || {
|
||||
EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| EnvFilter::new("codex_core=info,codex_infty=info,codex_cli=info"))
|
||||
};
|
||||
|
||||
let file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(non_blocking)
|
||||
.with_target(false)
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE)
|
||||
.with_filter(env_filter());
|
||||
|
||||
// Initialize once; subsequent calls are no‑ops.
|
||||
let _ = tracing_subscriber::registry().with(file_layer).try_init();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_infty_logging_from_home() -> std::io::Result<()> {
|
||||
let mut log_dir = codex_core::config::find_codex_home()?;
|
||||
log_dir.push("log");
|
||||
std::fs::create_dir_all(&log_dir)?;
|
||||
|
||||
let mut log_file_opts = OpenOptions::new();
|
||||
log_file_opts.create(true).append(true);
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
log_file_opts.mode(0o600);
|
||||
}
|
||||
|
||||
let log_file = log_file_opts.open(log_dir.join("codex-infty.log"))?;
|
||||
let (non_blocking, guard) = non_blocking(log_file);
|
||||
static INFTY_LOG_GUARD: OnceLock<tracing_appender::non_blocking::WorkerGuard> = OnceLock::new();
|
||||
let _ = INFTY_LOG_GUARD.set(guard);
|
||||
|
||||
let env_filter = || {
|
||||
EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| EnvFilter::new("codex_core=info,codex_infty=info,codex_cli=info"))
|
||||
};
|
||||
|
||||
let file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(non_blocking)
|
||||
.with_target(false)
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE)
|
||||
.with_filter(env_filter());
|
||||
|
||||
let _ = tracing_subscriber::registry().with(file_layer).try_init();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn default_verifier_roles_are_stable() {
|
||||
assert_eq!(
|
||||
DEFAULT_VERIFIER_ROLES,
|
||||
["verifier-alpha", "verifier-beta", "verifier-gamma"]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validates_run_ids() {
|
||||
assert!(validate_run_id("run-20241030-123000").is_ok());
|
||||
assert!(validate_run_id("run.alpha").is_ok());
|
||||
assert!(validate_run_id("").is_err());
|
||||
assert!(validate_run_id("..bad").is_err());
|
||||
assert!(validate_run_id("bad/value").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generates_timestamped_run_id() {
|
||||
let run_id = generate_run_id();
|
||||
assert!(run_id.starts_with("run-"));
|
||||
assert_eq!(run_id.len(), "run-YYYYMMDD-HHMMSS".len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collect_summaries_returns_empty_for_missing_root() {
|
||||
let temp = TempDir::new().expect("temp dir");
|
||||
let missing = temp.path().join("not-present");
|
||||
let summaries = collect_run_summaries(&missing).expect("collect");
|
||||
assert!(summaries.is_empty());
|
||||
}
|
||||
}
|
||||
6
codex-rs/cli/src/infty/mod.rs
Normal file
6
codex-rs/cli/src/infty/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
mod args;
|
||||
mod commands;
|
||||
mod progress;
|
||||
mod summary;
|
||||
|
||||
pub use args::InftyCli;
|
||||
194
codex-rs/cli/src/infty/progress.rs
Normal file
194
codex-rs/cli/src/infty/progress.rs
Normal file
@@ -0,0 +1,194 @@
|
||||
use chrono::Local;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_infty::AggregatedVerifierVerdict;
|
||||
use codex_infty::DirectiveResponse;
|
||||
use codex_infty::ProgressReporter;
|
||||
use codex_infty::VerifierDecision;
|
||||
use codex_infty::VerifierVerdict;
|
||||
use crossterm::style::Stylize;
|
||||
use std::path::Path;
|
||||
use supports_color::Stream;
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub(crate) struct TerminalProgressReporter;
|
||||
|
||||
impl TerminalProgressReporter {
|
||||
pub(crate) fn with_color(_color_enabled: bool) -> Self {
|
||||
Self
|
||||
}
|
||||
|
||||
fn format_role_label(&self, role: &str) -> String {
|
||||
let lower = role.to_ascii_lowercase();
|
||||
if lower == "solver" {
|
||||
return "[solver]".magenta().bold().to_string();
|
||||
}
|
||||
if lower == "director" {
|
||||
return "[director]".blue().bold().to_string();
|
||||
}
|
||||
if lower == "user" {
|
||||
return "[user]".cyan().bold().to_string();
|
||||
}
|
||||
if lower.contains("verifier") {
|
||||
return format!("[{role}]").green().bold().to_string();
|
||||
}
|
||||
format!("[{role}]").magenta().bold().to_string()
|
||||
}
|
||||
|
||||
fn timestamp(&self) -> String {
|
||||
let timestamp = Local::now().format("%H:%M:%S");
|
||||
let display = format!("[{timestamp}]");
|
||||
if supports_color::on(Stream::Stdout).is_some() {
|
||||
format!("{}", display.dim())
|
||||
} else {
|
||||
display
|
||||
}
|
||||
}
|
||||
|
||||
fn print_exchange(
|
||||
&self,
|
||||
from_role: &str,
|
||||
to_role: &str,
|
||||
lines: Vec<String>,
|
||||
trailing_blank_line: bool,
|
||||
) {
|
||||
let header = format!(
|
||||
"{} ----> {}",
|
||||
self.format_role_label(from_role),
|
||||
self.format_role_label(to_role)
|
||||
);
|
||||
println!("{} {header}", self.timestamp());
|
||||
for line in lines {
|
||||
println!("{line}");
|
||||
}
|
||||
if trailing_blank_line {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
fn format_decision(&self, decision: VerifierDecision) -> String {
|
||||
match decision {
|
||||
VerifierDecision::Pass => "pass".green().bold().to_string(),
|
||||
VerifierDecision::Fail => "fail".red().bold().to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProgressReporter for TerminalProgressReporter {
|
||||
fn objective_posted(&self, objective: &str) {
|
||||
let objective_line = format!("{}", format!("→ objective: {objective}").dim());
|
||||
self.print_exchange("user", "solver", vec![objective_line], true);
|
||||
}
|
||||
|
||||
fn solver_event(&self, event: &EventMsg) {
|
||||
match serde_json::to_string_pretty(event) {
|
||||
Ok(json) => {
|
||||
tracing::debug!("[solver:event]\n{json}");
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!("[solver:event] (failed to serialize: {err}) {event:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn role_event(&self, role: &str, event: &EventMsg) {
|
||||
match serde_json::to_string_pretty(event) {
|
||||
Ok(json) => {
|
||||
tracing::debug!("[{role}:event]\n{json}");
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!("[{role}:event] (failed to serialize: {err}) {event:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn solver_agent_message(&self, agent_msg: &AgentMessageEvent) {
|
||||
tracing::info!("Agent Message: {agent_msg:?}");
|
||||
}
|
||||
|
||||
fn invalid_solver_signal(&self, raw_message: &str) {
|
||||
let heading = "Warning".yellow().bold();
|
||||
let body = format!(
|
||||
"solver reply did not match expected JSON signal; got: {}",
|
||||
raw_message
|
||||
);
|
||||
println!("{} {} {}", self.timestamp(), heading, body);
|
||||
}
|
||||
|
||||
fn direction_request(&self, prompt: &str) {
|
||||
let prompt_line = format!("{}", prompt.yellow());
|
||||
self.print_exchange("solver", "director", vec![prompt_line], true);
|
||||
}
|
||||
|
||||
fn director_response(&self, directive: &DirectiveResponse) {
|
||||
let suffix = directive
|
||||
.rationale
|
||||
.as_deref()
|
||||
.filter(|rationale| !rationale.is_empty())
|
||||
.map(|rationale| format!(" (rationale: {rationale})"))
|
||||
.unwrap_or_default();
|
||||
let directive_line = format!("{}{}", directive.directive, suffix);
|
||||
self.print_exchange("director", "solver", vec![directive_line], true);
|
||||
}
|
||||
|
||||
fn verification_request(&self, claim_path: &str, notes: Option<&str>) {
|
||||
let mut lines = Vec::new();
|
||||
let path_line = format!("→ path: {claim_path}");
|
||||
lines.push(format!("{}", path_line.dim()));
|
||||
if let Some(notes) = notes.filter(|notes| !notes.is_empty()) {
|
||||
let note_line = format!("→ note: {notes}");
|
||||
lines.push(format!("{}", note_line.dim()));
|
||||
}
|
||||
self.print_exchange("solver", "verifier", lines, true);
|
||||
}
|
||||
|
||||
fn verifier_verdict(&self, role: &str, verdict: &VerifierVerdict) {
|
||||
let decision = self.format_decision(verdict.verdict);
|
||||
let mut lines = Vec::new();
|
||||
lines.push(format!("verdict: {decision}"));
|
||||
if !verdict.reasons.is_empty() {
|
||||
let reasons = verdict.reasons.join("; ");
|
||||
let reason_line = format!("→ reasons: {reasons}");
|
||||
lines.push(format!("{}", reason_line.dim()));
|
||||
}
|
||||
if !verdict.suggestions.is_empty() {
|
||||
let suggestions = verdict.suggestions.join("; ");
|
||||
let suggestion_line = format!("→ suggestions: {suggestions}");
|
||||
lines.push(format!("{}", suggestion_line.dim()));
|
||||
}
|
||||
self.print_exchange(role, "solver", lines, false);
|
||||
}
|
||||
|
||||
fn verification_summary(&self, summary: &AggregatedVerifierVerdict) {
|
||||
let decision = self.format_decision(summary.overall);
|
||||
let heading = "Verification summary".bold();
|
||||
let summary_line = format!("{heading}: {decision}");
|
||||
self.print_exchange("verifier", "solver", vec![summary_line], true);
|
||||
}
|
||||
|
||||
fn final_delivery(&self, deliverable_path: &Path, summary: Option<&str>) {
|
||||
let delivery_line = format!(
|
||||
"{}",
|
||||
format!("→ path: {}", deliverable_path.display()).dim()
|
||||
);
|
||||
let summary_line = format!(
|
||||
"{}",
|
||||
format!("→ summary: {}", summary.unwrap_or("<none>")).dim()
|
||||
);
|
||||
self.print_exchange(
|
||||
"solver",
|
||||
"verifier",
|
||||
vec![delivery_line, summary_line],
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
fn run_interrupted(&self) {
|
||||
println!(
|
||||
"{}",
|
||||
"Run interrupted by Ctrl+C. Shutting down sessions…"
|
||||
.red()
|
||||
.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
123
codex-rs/cli/src/infty/summary.rs
Normal file
123
codex-rs/cli/src/infty/summary.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_common::elapsed::format_duration;
|
||||
use crossterm::terminal;
|
||||
use owo_colors::OwoColorize;
|
||||
use textwrap::Options as WrapOptions;
|
||||
use textwrap::wrap;
|
||||
|
||||
pub(crate) fn print_run_summary_box(
|
||||
color_enabled: bool,
|
||||
run_id: &str,
|
||||
run_path: &Path,
|
||||
deliverable_path: &Path,
|
||||
summary: Option<&str>,
|
||||
objective: Option<&str>,
|
||||
duration: Duration,
|
||||
) {
|
||||
let mut items = Vec::new();
|
||||
items.push(("Run ID".to_string(), run_id.to_string()));
|
||||
items.push(("Run Directory".to_string(), run_path.display().to_string()));
|
||||
if let Some(objective) = objective
|
||||
&& !objective.trim().is_empty()
|
||||
{
|
||||
items.push(("Objective".to_string(), objective.trim().to_string()));
|
||||
}
|
||||
items.push((
|
||||
"Deliverable".to_string(),
|
||||
deliverable_path.display().to_string(),
|
||||
));
|
||||
items.push(("Total Time".to_string(), format_duration(duration)));
|
||||
if let Some(summary) = summary {
|
||||
let trimmed = summary.trim();
|
||||
if !trimmed.is_empty() {
|
||||
items.push(("Summary".to_string(), trimmed.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let label_width = items
|
||||
.iter()
|
||||
.map(|(label, _)| label.len())
|
||||
.max()
|
||||
.unwrap_or(0)
|
||||
.max(12);
|
||||
|
||||
const DEFAULT_MAX_WIDTH: usize = 84;
|
||||
const MIN_VALUE_WIDTH: usize = 20;
|
||||
let label_padding = label_width + 7;
|
||||
let min_total_width = label_padding + MIN_VALUE_WIDTH;
|
||||
let available_width = terminal::size()
|
||||
.ok()
|
||||
.map(|(cols, _)| usize::from(cols).saturating_sub(2))
|
||||
.unwrap_or(DEFAULT_MAX_WIDTH);
|
||||
let max_width = available_width.min(DEFAULT_MAX_WIDTH);
|
||||
let lower_bound = min_total_width.min(available_width);
|
||||
let mut total_width = max_width.max(lower_bound).max(label_padding + 1);
|
||||
let mut value_width = total_width.saturating_sub(label_padding);
|
||||
if value_width < MIN_VALUE_WIDTH {
|
||||
value_width = MIN_VALUE_WIDTH;
|
||||
total_width = label_padding + value_width;
|
||||
}
|
||||
|
||||
let inner_width = total_width.saturating_sub(4);
|
||||
let top_border = format!("+{}+", "=".repeat(total_width.saturating_sub(2)));
|
||||
let separator = format!("+{}+", "-".repeat(total_width.saturating_sub(2)));
|
||||
let title_line = format!(
|
||||
"| {:^inner_width$} |",
|
||||
"Run Summary",
|
||||
inner_width = inner_width
|
||||
);
|
||||
|
||||
println!();
|
||||
println!("{top_border}");
|
||||
if color_enabled {
|
||||
println!("{}", title_line.bold());
|
||||
} else {
|
||||
println!("{title_line}");
|
||||
}
|
||||
println!("{separator}");
|
||||
|
||||
for (index, (label, value)) in items.iter().enumerate() {
|
||||
let mut rows = Vec::new();
|
||||
for (idx, paragraph) in value.split('\n').enumerate() {
|
||||
let trimmed = paragraph.trim();
|
||||
if trimmed.is_empty() {
|
||||
if idx > 0 {
|
||||
rows.push(String::new());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
let wrapped = wrap(trimmed, WrapOptions::new(value_width).break_words(false));
|
||||
if wrapped.is_empty() {
|
||||
rows.push(String::new());
|
||||
} else {
|
||||
rows.extend(wrapped.into_iter().map(std::borrow::Cow::into_owned));
|
||||
}
|
||||
}
|
||||
if rows.is_empty() {
|
||||
rows.push(String::new());
|
||||
}
|
||||
|
||||
for (line_idx, line) in rows.iter().enumerate() {
|
||||
let label_cell = if line_idx == 0 { label.as_str() } else { "" };
|
||||
let row_line = format!("| {label_cell:<label_width$} | {line:<value_width$} |");
|
||||
if color_enabled {
|
||||
match label.as_str() {
|
||||
"Deliverable" => println!("{}", row_line.green()),
|
||||
"Summary" => println!("{}", row_line.bold()),
|
||||
_ => println!("{row_line}"),
|
||||
}
|
||||
} else {
|
||||
println!("{row_line}");
|
||||
}
|
||||
}
|
||||
|
||||
if index + 1 < items.len() {
|
||||
println!("{separator}");
|
||||
}
|
||||
}
|
||||
|
||||
println!("{top_border}");
|
||||
println!();
|
||||
}
|
||||
@@ -9,6 +9,8 @@ use codex_core::config::ConfigOverrides;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::run_device_code_login;
|
||||
use codex_login::run_login_server;
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
@@ -24,7 +26,7 @@ pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
}
|
||||
|
||||
pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match login_with_chatgpt(config.codex_home).await {
|
||||
Ok(_) => {
|
||||
@@ -42,7 +44,7 @@ pub async fn run_login_with_api_key(
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
api_key: String,
|
||||
) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match login_with_api_key(&config.codex_home, &api_key) {
|
||||
Ok(_) => {
|
||||
@@ -56,13 +58,40 @@ pub async fn run_login_with_api_key(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_api_key_from_stdin() -> String {
|
||||
let mut stdin = std::io::stdin();
|
||||
|
||||
if stdin.is_terminal() {
|
||||
eprintln!(
|
||||
"--with-api-key expects the API key on stdin. Try piping it, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`."
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
eprintln!("Reading API key from stdin...");
|
||||
|
||||
let mut buffer = String::new();
|
||||
if let Err(err) = stdin.read_to_string(&mut buffer) {
|
||||
eprintln!("Failed to read API key from stdin: {err}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let api_key = buffer.trim().to_string();
|
||||
if api_key.is_empty() {
|
||||
eprintln!("No API key provided via stdin.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
api_key
|
||||
}
|
||||
|
||||
/// Login using the OAuth device code flow.
|
||||
pub async fn run_login_with_device_code(
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
issuer_base_url: Option<String>,
|
||||
client_id: Option<String>,
|
||||
) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
let mut opts = ServerOptions::new(
|
||||
config.codex_home,
|
||||
client_id.unwrap_or(CLIENT_ID.to_string()),
|
||||
@@ -83,7 +112,7 @@ pub async fn run_login_with_device_code(
|
||||
}
|
||||
|
||||
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match CodexAuth::from_codex_home(&config.codex_home) {
|
||||
Ok(Some(auth)) => match auth.mode {
|
||||
@@ -114,7 +143,7 @@ pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
}
|
||||
|
||||
pub async fn run_logout(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match logout(&config.codex_home) {
|
||||
Ok(true) => {
|
||||
@@ -132,7 +161,7 @@ pub async fn run_logout(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config {
|
||||
async fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config {
|
||||
let cli_overrides = match cli_config_overrides.parse_overrides() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
@@ -142,7 +171,7 @@ fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config {
|
||||
};
|
||||
|
||||
let config_overrides = ConfigOverrides::default();
|
||||
match Config::load_with_cli_overrides(cli_overrides, config_overrides) {
|
||||
match Config::load_with_cli_overrides(cli_overrides, config_overrides).await {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
eprintln!("Error loading configuration: {e}");
|
||||
|
||||
@@ -7,6 +7,7 @@ use codex_chatgpt::apply_command::ApplyCommand;
|
||||
use codex_chatgpt::apply_command::run_apply_command;
|
||||
use codex_cli::LandlockCommand;
|
||||
use codex_cli::SeatbeltCommand;
|
||||
use codex_cli::login::read_api_key_from_stdin;
|
||||
use codex_cli::login::run_login_status;
|
||||
use codex_cli::login::run_login_with_api_key;
|
||||
use codex_cli::login::run_login_with_chatgpt;
|
||||
@@ -18,13 +19,18 @@ use codex_exec::Cli as ExecCli;
|
||||
use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
|
||||
use codex_tui::AppExitInfo;
|
||||
use codex_tui::Cli as TuiCli;
|
||||
use codex_tui::UpdateAction;
|
||||
use owo_colors::OwoColorize;
|
||||
use std::path::PathBuf;
|
||||
use supports_color::Stream;
|
||||
|
||||
mod infty;
|
||||
mod mcp_cmd;
|
||||
|
||||
use crate::infty::InftyCli;
|
||||
use crate::mcp_cmd::McpCli;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
|
||||
/// Codex CLI
|
||||
///
|
||||
@@ -44,6 +50,9 @@ struct MultitoolCli {
|
||||
#[clap(flatten)]
|
||||
pub config_overrides: CliConfigOverrides,
|
||||
|
||||
#[clap(flatten)]
|
||||
pub feature_toggles: FeatureToggles,
|
||||
|
||||
#[clap(flatten)]
|
||||
interactive: TuiCli,
|
||||
|
||||
@@ -75,8 +84,9 @@ enum Subcommand {
|
||||
/// Generate shell completion scripts.
|
||||
Completion(CompletionCommand),
|
||||
|
||||
/// Internal debugging commands.
|
||||
Debug(DebugArgs),
|
||||
/// Run commands within a Codex-provided sandbox.
|
||||
#[clap(visible_alias = "debug")]
|
||||
Sandbox(SandboxArgs),
|
||||
|
||||
/// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree.
|
||||
#[clap(visible_alias = "a")]
|
||||
@@ -95,6 +105,13 @@ enum Subcommand {
|
||||
/// Internal: run the responses API proxy.
|
||||
#[clap(hide = true)]
|
||||
ResponsesApiProxy(ResponsesApiProxyArgs),
|
||||
|
||||
/// Inspect feature flags.
|
||||
Features(FeaturesCli),
|
||||
|
||||
/// [experimental] Manage Codex Infty long-running task runs.
|
||||
#[clap(name = "infty")]
|
||||
Infty(InftyCli),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
@@ -120,18 +137,20 @@ struct ResumeCommand {
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct DebugArgs {
|
||||
struct SandboxArgs {
|
||||
#[command(subcommand)]
|
||||
cmd: DebugCommand,
|
||||
cmd: SandboxCommand,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
enum DebugCommand {
|
||||
enum SandboxCommand {
|
||||
/// Run a command under Seatbelt (macOS only).
|
||||
Seatbelt(SeatbeltCommand),
|
||||
#[clap(visible_alias = "seatbelt")]
|
||||
Macos(SeatbeltCommand),
|
||||
|
||||
/// Run a command under Landlock+seccomp (Linux only).
|
||||
Landlock(LandlockCommand),
|
||||
#[clap(visible_alias = "landlock")]
|
||||
Linux(LandlockCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
@@ -139,12 +158,21 @@ struct LoginCommand {
|
||||
#[clap(skip)]
|
||||
config_overrides: CliConfigOverrides,
|
||||
|
||||
#[arg(long = "api-key", value_name = "API_KEY")]
|
||||
#[arg(
|
||||
long = "with-api-key",
|
||||
help = "Read the API key from stdin (e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`)"
|
||||
)]
|
||||
with_api_key: bool,
|
||||
|
||||
#[arg(
|
||||
long = "api-key",
|
||||
value_name = "API_KEY",
|
||||
help = "(deprecated) Previously accepted the API key directly; now exits with guidance to use --with-api-key",
|
||||
hide = true
|
||||
)]
|
||||
api_key: Option<String>,
|
||||
|
||||
/// EXPERIMENTAL: Use device code flow (not yet supported)
|
||||
/// This feature is experimental and may changed in future releases.
|
||||
#[arg(long = "experimental_use-device-code", hide = true)]
|
||||
#[arg(long = "device-auth")]
|
||||
use_device_code: bool,
|
||||
|
||||
/// EXPERIMENTAL: Use custom OAuth issuer base URL (advanced)
|
||||
@@ -187,6 +215,7 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<Stri
|
||||
let AppExitInfo {
|
||||
token_usage,
|
||||
conversation_id,
|
||||
..
|
||||
} = exit_info;
|
||||
|
||||
if token_usage.is_zero() {
|
||||
@@ -211,11 +240,79 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<Stri
|
||||
lines
|
||||
}
|
||||
|
||||
fn print_exit_messages(exit_info: AppExitInfo) {
|
||||
/// Handle the app exit and print the results. Optionally run the update action.
|
||||
fn handle_app_exit(exit_info: AppExitInfo) -> anyhow::Result<()> {
|
||||
let update_action = exit_info.update_action;
|
||||
let color_enabled = supports_color::on(Stream::Stdout).is_some();
|
||||
for line in format_exit_messages(exit_info, color_enabled) {
|
||||
println!("{line}");
|
||||
}
|
||||
if let Some(action) = update_action {
|
||||
run_update_action(action)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run the update action and print the result.
|
||||
fn run_update_action(action: UpdateAction) -> anyhow::Result<()> {
|
||||
println!();
|
||||
let (cmd, args) = action.command_args();
|
||||
let cmd_str = action.command_str();
|
||||
println!("Updating Codex via `{cmd_str}`...");
|
||||
let status = std::process::Command::new(cmd).args(args).status()?;
|
||||
if !status.success() {
|
||||
anyhow::bail!("`{cmd_str}` failed with status {status}");
|
||||
}
|
||||
println!();
|
||||
println!("🎉 Update ran successfully! Please restart Codex.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Parser, Clone)]
|
||||
struct FeatureToggles {
|
||||
/// Enable a feature (repeatable). Equivalent to `-c features.<name>=true`.
|
||||
#[arg(long = "enable", value_name = "FEATURE", action = clap::ArgAction::Append, global = true)]
|
||||
enable: Vec<String>,
|
||||
|
||||
/// Disable a feature (repeatable). Equivalent to `-c features.<name>=false`.
|
||||
#[arg(long = "disable", value_name = "FEATURE", action = clap::ArgAction::Append, global = true)]
|
||||
disable: Vec<String>,
|
||||
}
|
||||
|
||||
impl FeatureToggles {
|
||||
fn to_overrides(&self) -> Vec<String> {
|
||||
let mut v = Vec::new();
|
||||
for k in &self.enable {
|
||||
v.push(format!("features.{k}=true"));
|
||||
}
|
||||
for k in &self.disable {
|
||||
v.push(format!("features.{k}=false"));
|
||||
}
|
||||
v
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct FeaturesCli {
|
||||
#[command(subcommand)]
|
||||
sub: FeaturesSubcommand,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
enum FeaturesSubcommand {
|
||||
/// List known features with their stage and effective state.
|
||||
List,
|
||||
}
|
||||
|
||||
fn stage_str(stage: codex_core::features::Stage) -> &'static str {
|
||||
use codex_core::features::Stage;
|
||||
match stage {
|
||||
Stage::Experimental => "experimental",
|
||||
Stage::Beta => "beta",
|
||||
Stage::Stable => "stable",
|
||||
Stage::Deprecated => "deprecated",
|
||||
Stage::Removed => "removed",
|
||||
}
|
||||
}
|
||||
|
||||
/// As early as possible in the process lifecycle, apply hardening measures. We
|
||||
@@ -235,11 +332,17 @@ fn main() -> anyhow::Result<()> {
|
||||
|
||||
async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
|
||||
let MultitoolCli {
|
||||
config_overrides: root_config_overrides,
|
||||
config_overrides: mut root_config_overrides,
|
||||
feature_toggles,
|
||||
mut interactive,
|
||||
subcommand,
|
||||
} = MultitoolCli::parse();
|
||||
|
||||
// Fold --enable/--disable into config overrides so they flow to all subcommands.
|
||||
root_config_overrides
|
||||
.raw_overrides
|
||||
.extend(feature_toggles.to_overrides());
|
||||
|
||||
match subcommand {
|
||||
None => {
|
||||
prepend_config_flags(
|
||||
@@ -247,7 +350,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
let exit_info = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
print_exit_messages(exit_info);
|
||||
handle_app_exit(exit_info)?;
|
||||
}
|
||||
Some(Subcommand::Exec(mut exec_cli)) => {
|
||||
prepend_config_flags(
|
||||
@@ -279,7 +382,8 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
last,
|
||||
config_overrides,
|
||||
);
|
||||
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
let exit_info = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?;
|
||||
handle_app_exit(exit_info)?;
|
||||
}
|
||||
Some(Subcommand::Login(mut login_cli)) => {
|
||||
prepend_config_flags(
|
||||
@@ -298,7 +402,13 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
login_cli.client_id,
|
||||
)
|
||||
.await;
|
||||
} else if let Some(api_key) = login_cli.api_key {
|
||||
} else if login_cli.api_key.is_some() {
|
||||
eprintln!(
|
||||
"The --api-key flag is no longer supported. Pipe the key instead, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`."
|
||||
);
|
||||
std::process::exit(1);
|
||||
} else if login_cli.with_api_key {
|
||||
let api_key = read_api_key_from_stdin();
|
||||
run_login_with_api_key(login_cli.config_overrides, api_key).await;
|
||||
} else {
|
||||
run_login_with_chatgpt(login_cli.config_overrides).await;
|
||||
@@ -323,8 +433,15 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
);
|
||||
codex_cloud_tasks::run_main(cloud_cli, codex_linux_sandbox_exe).await?;
|
||||
}
|
||||
Some(Subcommand::Debug(debug_args)) => match debug_args.cmd {
|
||||
DebugCommand::Seatbelt(mut seatbelt_cli) => {
|
||||
Some(Subcommand::Infty(mut infty_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut infty_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
);
|
||||
infty_cli.run().await?;
|
||||
}
|
||||
Some(Subcommand::Sandbox(sandbox_args)) => match sandbox_args.cmd {
|
||||
SandboxCommand::Macos(mut seatbelt_cli) => {
|
||||
prepend_config_flags(
|
||||
&mut seatbelt_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
@@ -335,7 +452,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
DebugCommand::Landlock(mut landlock_cli) => {
|
||||
SandboxCommand::Linux(mut landlock_cli) => {
|
||||
prepend_config_flags(
|
||||
&mut landlock_cli.config_overrides,
|
||||
root_config_overrides.clone(),
|
||||
@@ -361,6 +478,30 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
Some(Subcommand::GenerateTs(gen_cli)) => {
|
||||
codex_protocol_ts::generate_ts(&gen_cli.out_dir, gen_cli.prettier.as_deref())?;
|
||||
}
|
||||
Some(Subcommand::Features(FeaturesCli { sub })) => match sub {
|
||||
FeaturesSubcommand::List => {
|
||||
// Respect root-level `-c` overrides plus top-level flags like `--profile`.
|
||||
let cli_kv_overrides = root_config_overrides
|
||||
.parse_overrides()
|
||||
.map_err(|e| anyhow::anyhow!(e))?;
|
||||
|
||||
// Thread through relevant top-level flags (at minimum, `--profile`).
|
||||
// Also honor `--search` since it maps to a feature toggle.
|
||||
let overrides = ConfigOverrides {
|
||||
config_profile: interactive.config_profile.clone(),
|
||||
tools_web_search_request: interactive.web_search.then_some(true),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides).await?;
|
||||
for def in codex_core::features::FEATURES.iter() {
|
||||
let name = def.key;
|
||||
let stage = stage_str(def.stage);
|
||||
let enabled = config.features.enabled(def.id);
|
||||
println!("{name}\t{stage}\t{enabled}");
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -454,6 +595,7 @@ fn print_completion(cmd: CompletionCommand) {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use assert_matches::assert_matches;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::ConversationId;
|
||||
|
||||
@@ -463,6 +605,7 @@ mod tests {
|
||||
interactive,
|
||||
config_overrides: root_overrides,
|
||||
subcommand,
|
||||
feature_toggles: _,
|
||||
} = cli;
|
||||
|
||||
let Subcommand::Resume(ResumeCommand {
|
||||
@@ -488,6 +631,7 @@ mod tests {
|
||||
conversation_id: conversation
|
||||
.map(ConversationId::from_string)
|
||||
.map(Result::unwrap),
|
||||
update_action: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -496,6 +640,7 @@ mod tests {
|
||||
let exit_info = AppExitInfo {
|
||||
token_usage: TokenUsage::default(),
|
||||
conversation_id: None,
|
||||
update_action: None,
|
||||
};
|
||||
let lines = format_exit_messages(exit_info, false);
|
||||
assert!(lines.is_empty());
|
||||
@@ -586,14 +731,14 @@ mod tests {
|
||||
assert_eq!(interactive.model.as_deref(), Some("gpt-5-test"));
|
||||
assert!(interactive.oss);
|
||||
assert_eq!(interactive.config_profile.as_deref(), Some("my-profile"));
|
||||
assert!(matches!(
|
||||
assert_matches!(
|
||||
interactive.sandbox_mode,
|
||||
Some(codex_common::SandboxModeCliArg::WorkspaceWrite)
|
||||
));
|
||||
assert!(matches!(
|
||||
);
|
||||
assert_matches!(
|
||||
interactive.approval_policy,
|
||||
Some(codex_common::ApprovalModeCliArg::OnRequest)
|
||||
));
|
||||
);
|
||||
assert!(interactive.full_auto);
|
||||
assert_eq!(
|
||||
interactive.cwd.as_deref(),
|
||||
|
||||
@@ -4,7 +4,9 @@ use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use clap::ArgGroup;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_common::format_env_display::format_env_display;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::find_codex_home;
|
||||
@@ -12,6 +14,12 @@ use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::write_global_mcp_servers;
|
||||
use codex_core::config_types::McpServerConfig;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::mcp::auth::compute_auth_statuses;
|
||||
use codex_core::protocol::McpAuthStatus;
|
||||
use codex_rmcp_client::delete_oauth_tokens;
|
||||
use codex_rmcp_client::perform_oauth_login;
|
||||
use codex_rmcp_client::supports_oauth_login;
|
||||
|
||||
/// [experimental] Launch Codex as an MCP server or manage configured MCP servers.
|
||||
///
|
||||
@@ -43,6 +51,14 @@ pub enum McpSubcommand {
|
||||
|
||||
/// [experimental] Remove a global MCP server entry.
|
||||
Remove(RemoveArgs),
|
||||
|
||||
/// [experimental] Authenticate with a configured MCP server via OAuth.
|
||||
/// Requires experimental_use_rmcp_client = true in config.toml.
|
||||
Login(LoginArgs),
|
||||
|
||||
/// [experimental] Remove stored OAuth credentials for a server.
|
||||
/// Requires experimental_use_rmcp_client = true in config.toml.
|
||||
Logout(LogoutArgs),
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
@@ -67,13 +83,61 @@ pub struct AddArgs {
|
||||
/// Name for the MCP server configuration.
|
||||
pub name: String,
|
||||
|
||||
/// Environment variables to set when launching the server.
|
||||
#[arg(long, value_parser = parse_env_pair, value_name = "KEY=VALUE")]
|
||||
pub env: Vec<(String, String)>,
|
||||
#[command(flatten)]
|
||||
pub transport_args: AddMcpTransportArgs,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Args)]
|
||||
#[command(
|
||||
group(
|
||||
ArgGroup::new("transport")
|
||||
.args(["command", "url"])
|
||||
.required(true)
|
||||
.multiple(false)
|
||||
)
|
||||
)]
|
||||
pub struct AddMcpTransportArgs {
|
||||
#[command(flatten)]
|
||||
pub stdio: Option<AddMcpStdioArgs>,
|
||||
|
||||
#[command(flatten)]
|
||||
pub streamable_http: Option<AddMcpStreamableHttpArgs>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Args)]
|
||||
pub struct AddMcpStdioArgs {
|
||||
/// Command to launch the MCP server.
|
||||
#[arg(trailing_var_arg = true, num_args = 1..)]
|
||||
/// Use --url for a streamable HTTP server.
|
||||
#[arg(
|
||||
trailing_var_arg = true,
|
||||
num_args = 0..,
|
||||
)]
|
||||
pub command: Vec<String>,
|
||||
|
||||
/// Environment variables to set when launching the server.
|
||||
/// Only valid with stdio servers.
|
||||
#[arg(
|
||||
long,
|
||||
value_parser = parse_env_pair,
|
||||
value_name = "KEY=VALUE",
|
||||
)]
|
||||
pub env: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Args)]
|
||||
pub struct AddMcpStreamableHttpArgs {
|
||||
/// URL for a streamable HTTP MCP server.
|
||||
#[arg(long)]
|
||||
pub url: String,
|
||||
|
||||
/// Optional environment variable to read for a bearer token.
|
||||
/// Only valid with streamable HTTP servers.
|
||||
#[arg(
|
||||
long = "bearer-token-env-var",
|
||||
value_name = "ENV_VAR",
|
||||
requires = "url"
|
||||
)]
|
||||
pub bearer_token_env_var: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
@@ -82,6 +146,18 @@ pub struct RemoveArgs {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct LoginArgs {
|
||||
/// Name of the MCP server to authenticate with oauth.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct LogoutArgs {
|
||||
/// Name of the MCP server to deauthenticate.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl McpCli {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
let McpCli {
|
||||
@@ -91,16 +167,22 @@ impl McpCli {
|
||||
|
||||
match subcommand {
|
||||
McpSubcommand::List(args) => {
|
||||
run_list(&config_overrides, args)?;
|
||||
run_list(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Get(args) => {
|
||||
run_get(&config_overrides, args)?;
|
||||
run_get(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Add(args) => {
|
||||
run_add(&config_overrides, args)?;
|
||||
run_add(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Remove(args) => {
|
||||
run_remove(&config_overrides, args)?;
|
||||
run_remove(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Login(args) => {
|
||||
run_login(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Logout(args) => {
|
||||
run_logout(&config_overrides, args).await?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,40 +190,67 @@ impl McpCli {
|
||||
}
|
||||
}
|
||||
|
||||
fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
|
||||
async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
|
||||
// Validate any provided overrides even though they are not currently applied.
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let AddArgs { name, env, command } = add_args;
|
||||
let AddArgs {
|
||||
name,
|
||||
transport_args,
|
||||
} = add_args;
|
||||
|
||||
validate_server_name(&name)?;
|
||||
|
||||
let mut command_parts = command.into_iter();
|
||||
let command_bin = command_parts
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("command is required"))?;
|
||||
let command_args: Vec<String> = command_parts.collect();
|
||||
|
||||
let env_map = if env.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let mut map = HashMap::new();
|
||||
for (key, value) in env {
|
||||
map.insert(key, value);
|
||||
}
|
||||
Some(map)
|
||||
};
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.await
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let new_entry = McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
command: command_bin,
|
||||
args: command_args,
|
||||
env: env_map,
|
||||
let transport = match transport_args {
|
||||
AddMcpTransportArgs {
|
||||
stdio: Some(stdio), ..
|
||||
} => {
|
||||
let mut command_parts = stdio.command.into_iter();
|
||||
let command_bin = command_parts
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("command is required"))?;
|
||||
let command_args: Vec<String> = command_parts.collect();
|
||||
|
||||
let env_map = if stdio.env.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(stdio.env.into_iter().collect::<HashMap<_, _>>())
|
||||
};
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: command_bin,
|
||||
args: command_args,
|
||||
env: env_map,
|
||||
env_vars: Vec::new(),
|
||||
cwd: None,
|
||||
}
|
||||
}
|
||||
AddMcpTransportArgs {
|
||||
streamable_http:
|
||||
Some(AddMcpStreamableHttpArgs {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
}),
|
||||
..
|
||||
} => McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
},
|
||||
AddMcpTransportArgs { .. } => bail!("exactly one of --command or --url must be provided"),
|
||||
};
|
||||
|
||||
let new_entry = McpServerConfig {
|
||||
transport: transport.clone(),
|
||||
enabled: true,
|
||||
startup_timeout_sec: None,
|
||||
tool_timeout_sec: None,
|
||||
};
|
||||
@@ -153,10 +262,30 @@ fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<(
|
||||
|
||||
println!("Added global MCP server '{name}'.");
|
||||
|
||||
if let McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var: None,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
} = transport
|
||||
&& matches!(supports_oauth_login(&url).await, Ok(true))
|
||||
{
|
||||
println!("Detected OAuth support. Starting OAuth flow…");
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
|
||||
async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
|
||||
let RemoveArgs { name } = remove_args;
|
||||
@@ -165,6 +294,7 @@ fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) ->
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.await
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let removed = servers.remove(&name).is_some();
|
||||
@@ -183,36 +313,129 @@ fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) ->
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
|
||||
async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
bail!(
|
||||
"OAuth login is only supported when experimental_use_rmcp_client is true in config.toml."
|
||||
);
|
||||
}
|
||||
|
||||
let LoginArgs { name } = login_args;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
bail!("No MCP server named '{name}' found.");
|
||||
};
|
||||
|
||||
let (url, http_headers, env_http_headers) = match &server.transport {
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
..
|
||||
} => (url.clone(), http_headers.clone(), env_http_headers.clone()),
|
||||
_ => bail!("OAuth login is only supported for streamable HTTP servers."),
|
||||
};
|
||||
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in to MCP server '{name}'.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let LogoutArgs { name } = logout_args;
|
||||
|
||||
let server = config
|
||||
.mcp_servers
|
||||
.get(&name)
|
||||
.ok_or_else(|| anyhow!("No MCP server named '{name}' found in configuration."))?;
|
||||
|
||||
let url = match &server.transport {
|
||||
McpServerTransportConfig::StreamableHttp { url, .. } => url.clone(),
|
||||
_ => bail!("OAuth logout is only supported for streamable_http transports."),
|
||||
};
|
||||
|
||||
match delete_oauth_tokens(&name, &url, config.mcp_oauth_credentials_store_mode) {
|
||||
Ok(true) => println!("Removed OAuth credentials for '{name}'."),
|
||||
Ok(false) => println!("No OAuth credentials stored for '{name}'."),
|
||||
Err(err) => return Err(anyhow!("failed to delete OAuth credentials: {err}")),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let mut entries: Vec<_> = config.mcp_servers.iter().collect();
|
||||
entries.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
let auth_statuses = compute_auth_statuses(
|
||||
config.mcp_servers.iter(),
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
)
|
||||
.await;
|
||||
|
||||
if list_args.json {
|
||||
let json_entries: Vec<_> = entries
|
||||
.into_iter()
|
||||
.map(|(name, cfg)| {
|
||||
let auth_status = auth_statuses
|
||||
.get(name.as_str())
|
||||
.copied()
|
||||
.unwrap_or(McpAuthStatus::Unsupported);
|
||||
let transport = match &cfg.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => serde_json::json!({
|
||||
McpServerTransportConfig::Stdio {
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
env_vars,
|
||||
cwd,
|
||||
} => serde_json::json!({
|
||||
"type": "stdio",
|
||||
"command": command,
|
||||
"args": args,
|
||||
"env": env,
|
||||
"env_vars": env_vars,
|
||||
"cwd": cwd,
|
||||
}),
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
} => {
|
||||
serde_json::json!({
|
||||
"type": "streamable_http",
|
||||
"url": url,
|
||||
"bearer_token": bearer_token,
|
||||
"bearer_token_env_var": bearer_token_env_var,
|
||||
"http_headers": http_headers,
|
||||
"env_http_headers": env_http_headers,
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
serde_json::json!({
|
||||
"name": name,
|
||||
"enabled": cfg.enabled,
|
||||
"transport": transport,
|
||||
"startup_timeout_sec": cfg
|
||||
.startup_timeout_sec
|
||||
@@ -220,6 +443,7 @@ fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Resul
|
||||
"tool_timeout_sec": cfg
|
||||
.tool_timeout_sec
|
||||
.map(|timeout| timeout.as_secs_f64()),
|
||||
"auth_status": auth_status,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
@@ -233,45 +457,85 @@ fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Resul
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut stdio_rows: Vec<[String; 4]> = Vec::new();
|
||||
let mut http_rows: Vec<[String; 3]> = Vec::new();
|
||||
let mut stdio_rows: Vec<[String; 7]> = Vec::new();
|
||||
let mut http_rows: Vec<[String; 5]> = Vec::new();
|
||||
|
||||
for (name, cfg) in entries {
|
||||
match &cfg.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
McpServerTransportConfig::Stdio {
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
env_vars,
|
||||
cwd,
|
||||
} => {
|
||||
let args_display = if args.is_empty() {
|
||||
"-".to_string()
|
||||
} else {
|
||||
args.join(" ")
|
||||
};
|
||||
let env_display = match env.as_ref() {
|
||||
None => "-".to_string(),
|
||||
Some(map) if map.is_empty() => "-".to_string(),
|
||||
Some(map) => {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
pairs
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
};
|
||||
stdio_rows.push([name.clone(), command.clone(), args_display, env_display]);
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
let has_bearer = if bearer_token.is_some() {
|
||||
"True"
|
||||
let env_display = format_env_display(env.as_ref(), env_vars);
|
||||
let cwd_display = cwd
|
||||
.as_ref()
|
||||
.map(|path| path.display().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
.unwrap_or_else(|| "-".to_string());
|
||||
let status = if cfg.enabled {
|
||||
"enabled".to_string()
|
||||
} else {
|
||||
"False"
|
||||
"disabled".to_string()
|
||||
};
|
||||
http_rows.push([name.clone(), url.clone(), has_bearer.into()]);
|
||||
let auth_status = auth_statuses
|
||||
.get(name.as_str())
|
||||
.copied()
|
||||
.unwrap_or(McpAuthStatus::Unsupported)
|
||||
.to_string();
|
||||
stdio_rows.push([
|
||||
name.clone(),
|
||||
command.clone(),
|
||||
args_display,
|
||||
env_display,
|
||||
cwd_display,
|
||||
status,
|
||||
auth_status,
|
||||
]);
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
..
|
||||
} => {
|
||||
let status = if cfg.enabled {
|
||||
"enabled".to_string()
|
||||
} else {
|
||||
"disabled".to_string()
|
||||
};
|
||||
let auth_status = auth_statuses
|
||||
.get(name.as_str())
|
||||
.copied()
|
||||
.unwrap_or(McpAuthStatus::Unsupported)
|
||||
.to_string();
|
||||
http_rows.push([
|
||||
name.clone(),
|
||||
url.clone(),
|
||||
bearer_token_env_var.clone().unwrap_or("-".to_string()),
|
||||
status,
|
||||
auth_status,
|
||||
]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !stdio_rows.is_empty() {
|
||||
let mut widths = ["Name".len(), "Command".len(), "Args".len(), "Env".len()];
|
||||
let mut widths = [
|
||||
"Name".len(),
|
||||
"Command".len(),
|
||||
"Args".len(),
|
||||
"Env".len(),
|
||||
"Cwd".len(),
|
||||
"Status".len(),
|
||||
"Auth".len(),
|
||||
];
|
||||
for row in &stdio_rows {
|
||||
for (i, cell) in row.iter().enumerate() {
|
||||
widths[i] = widths[i].max(cell.len());
|
||||
@@ -279,28 +543,40 @@ fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Resul
|
||||
}
|
||||
|
||||
println!(
|
||||
"{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}",
|
||||
"Name",
|
||||
"Command",
|
||||
"Args",
|
||||
"Env",
|
||||
"{name:<name_w$} {command:<cmd_w$} {args:<args_w$} {env:<env_w$} {cwd:<cwd_w$} {status:<status_w$} {auth:<auth_w$}",
|
||||
name = "Name",
|
||||
command = "Command",
|
||||
args = "Args",
|
||||
env = "Env",
|
||||
cwd = "Cwd",
|
||||
status = "Status",
|
||||
auth = "Auth",
|
||||
name_w = widths[0],
|
||||
cmd_w = widths[1],
|
||||
args_w = widths[2],
|
||||
env_w = widths[3],
|
||||
cwd_w = widths[4],
|
||||
status_w = widths[5],
|
||||
auth_w = widths[6],
|
||||
);
|
||||
|
||||
for row in &stdio_rows {
|
||||
println!(
|
||||
"{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}",
|
||||
row[0],
|
||||
row[1],
|
||||
row[2],
|
||||
row[3],
|
||||
"{name:<name_w$} {command:<cmd_w$} {args:<args_w$} {env:<env_w$} {cwd:<cwd_w$} {status:<status_w$} {auth:<auth_w$}",
|
||||
name = row[0].as_str(),
|
||||
command = row[1].as_str(),
|
||||
args = row[2].as_str(),
|
||||
env = row[3].as_str(),
|
||||
cwd = row[4].as_str(),
|
||||
status = row[5].as_str(),
|
||||
auth = row[6].as_str(),
|
||||
name_w = widths[0],
|
||||
cmd_w = widths[1],
|
||||
args_w = widths[2],
|
||||
env_w = widths[3],
|
||||
cwd_w = widths[4],
|
||||
status_w = widths[5],
|
||||
auth_w = widths[6],
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -310,7 +586,13 @@ fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Resul
|
||||
}
|
||||
|
||||
if !http_rows.is_empty() {
|
||||
let mut widths = ["Name".len(), "Url".len(), "Has Bearer Token".len()];
|
||||
let mut widths = [
|
||||
"Name".len(),
|
||||
"Url".len(),
|
||||
"Bearer Token Env Var".len(),
|
||||
"Status".len(),
|
||||
"Auth".len(),
|
||||
];
|
||||
for row in &http_rows {
|
||||
for (i, cell) in row.iter().enumerate() {
|
||||
widths[i] = widths[i].max(cell.len());
|
||||
@@ -318,24 +600,32 @@ fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Resul
|
||||
}
|
||||
|
||||
println!(
|
||||
"{:<name_w$} {:<url_w$} {:<token_w$}",
|
||||
"Name",
|
||||
"Url",
|
||||
"Has Bearer Token",
|
||||
"{name:<name_w$} {url:<url_w$} {token:<token_w$} {status:<status_w$} {auth:<auth_w$}",
|
||||
name = "Name",
|
||||
url = "Url",
|
||||
token = "Bearer Token Env Var",
|
||||
status = "Status",
|
||||
auth = "Auth",
|
||||
name_w = widths[0],
|
||||
url_w = widths[1],
|
||||
token_w = widths[2],
|
||||
status_w = widths[3],
|
||||
auth_w = widths[4],
|
||||
);
|
||||
|
||||
for row in &http_rows {
|
||||
println!(
|
||||
"{:<name_w$} {:<url_w$} {:<token_w$}",
|
||||
row[0],
|
||||
row[1],
|
||||
row[2],
|
||||
"{name:<name_w$} {url:<url_w$} {token:<token_w$} {status:<status_w$} {auth:<auth_w$}",
|
||||
name = row[0].as_str(),
|
||||
url = row[1].as_str(),
|
||||
token = row[2].as_str(),
|
||||
status = row[3].as_str(),
|
||||
auth = row[4].as_str(),
|
||||
name_w = widths[0],
|
||||
url_w = widths[1],
|
||||
token_w = widths[2],
|
||||
status_w = widths[3],
|
||||
auth_w = widths[4],
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -343,9 +633,10 @@ fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Resul
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
|
||||
async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&get_args.name) else {
|
||||
@@ -354,20 +645,36 @@ fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<(
|
||||
|
||||
if get_args.json {
|
||||
let transport = match &server.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => serde_json::json!({
|
||||
McpServerTransportConfig::Stdio {
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
env_vars,
|
||||
cwd,
|
||||
} => serde_json::json!({
|
||||
"type": "stdio",
|
||||
"command": command,
|
||||
"args": args,
|
||||
"env": env,
|
||||
"env_vars": env_vars,
|
||||
"cwd": cwd,
|
||||
}),
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => serde_json::json!({
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
} => serde_json::json!({
|
||||
"type": "streamable_http",
|
||||
"url": url,
|
||||
"bearer_token": bearer_token,
|
||||
"bearer_token_env_var": bearer_token_env_var,
|
||||
"http_headers": http_headers,
|
||||
"env_http_headers": env_http_headers,
|
||||
}),
|
||||
};
|
||||
let output = serde_json::to_string_pretty(&serde_json::json!({
|
||||
"name": get_args.name,
|
||||
"enabled": server.enabled,
|
||||
"transport": transport,
|
||||
"startup_timeout_sec": server
|
||||
.startup_timeout_sec
|
||||
@@ -381,8 +688,15 @@ fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<(
|
||||
}
|
||||
|
||||
println!("{}", get_args.name);
|
||||
println!(" enabled: {}", server.enabled);
|
||||
match &server.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
McpServerTransportConfig::Stdio {
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
env_vars,
|
||||
cwd,
|
||||
} => {
|
||||
println!(" transport: stdio");
|
||||
println!(" command: {command}");
|
||||
let args_display = if args.is_empty() {
|
||||
@@ -391,10 +705,27 @@ fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<(
|
||||
args.join(" ")
|
||||
};
|
||||
println!(" args: {args_display}");
|
||||
let env_display = match env.as_ref() {
|
||||
None => "-".to_string(),
|
||||
Some(map) if map.is_empty() => "-".to_string(),
|
||||
Some(map) => {
|
||||
let cwd_display = cwd
|
||||
.as_ref()
|
||||
.map(|path| path.display().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
.unwrap_or_else(|| "-".to_string());
|
||||
println!(" cwd: {cwd_display}");
|
||||
let env_display = format_env_display(env.as_ref(), env_vars);
|
||||
println!(" env: {env_display}");
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
} => {
|
||||
println!(" transport: streamable_http");
|
||||
println!(" url: {url}");
|
||||
let env_var = bearer_token_env_var.as_deref().unwrap_or("-");
|
||||
println!(" bearer_token_env_var: {env_var}");
|
||||
let headers_display = match http_headers {
|
||||
Some(map) if !map.is_empty() => {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
pairs
|
||||
@@ -403,14 +734,22 @@ fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<(
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
_ => "-".to_string(),
|
||||
};
|
||||
println!(" env: {env_display}");
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
println!(" transport: streamable_http");
|
||||
println!(" url: {url}");
|
||||
let bearer = bearer_token.as_deref().unwrap_or("-");
|
||||
println!(" bearer_token: {bearer}");
|
||||
println!(" http_headers: {headers_display}");
|
||||
let env_headers_display = match env_http_headers {
|
||||
Some(map) if !map.is_empty() => {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
pairs
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{k}={v}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
}
|
||||
_ => "-".to_string(),
|
||||
};
|
||||
println!(" env_http_headers: {env_headers_display}");
|
||||
}
|
||||
}
|
||||
if let Some(timeout) = server.startup_timeout_sec {
|
||||
|
||||
@@ -13,8 +13,8 @@ fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
@@ -24,17 +24,26 @@ fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
.success()
|
||||
.stdout(contains("Added global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert_eq!(servers.len(), 1);
|
||||
let docs = servers.get("docs").expect("server should exist");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
McpServerTransportConfig::Stdio {
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
env_vars,
|
||||
cwd,
|
||||
} => {
|
||||
assert_eq!(command, "echo");
|
||||
assert_eq!(args, &vec!["hello".to_string()]);
|
||||
assert!(env.is_none());
|
||||
assert!(env_vars.is_empty());
|
||||
assert!(cwd.is_none());
|
||||
}
|
||||
other => panic!("unexpected transport: {other:?}"),
|
||||
}
|
||||
assert!(docs.enabled);
|
||||
|
||||
let mut remove_cmd = codex_command(codex_home.path())?;
|
||||
remove_cmd
|
||||
@@ -43,7 +52,7 @@ fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
.success()
|
||||
.stdout(contains("Removed global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
let mut remove_again_cmd = codex_command(codex_home.path())?;
|
||||
@@ -53,14 +62,14 @@ fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
.success()
|
||||
.stdout(contains("No MCP server named 'docs' found."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
@@ -80,7 +89,7 @@ fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let envy = servers.get("envy").expect("server should exist");
|
||||
let env = match &envy.transport {
|
||||
McpServerTransportConfig::Stdio { env: Some(env), .. } => env,
|
||||
@@ -90,6 +99,130 @@ fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
assert_eq!(env.len(), 2);
|
||||
assert_eq!(env.get("FOO"), Some(&"bar".to_string()));
|
||||
assert_eq!(env.get("ALPHA"), Some(&"beta".to_string()));
|
||||
assert!(envy.enabled);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_streamable_http_without_manual_token() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args(["mcp", "add", "github", "--url", "https://example.com/mcp"])
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let github = servers.get("github").expect("github server should exist");
|
||||
match &github.transport {
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
} => {
|
||||
assert_eq!(url, "https://example.com/mcp");
|
||||
assert!(bearer_token_env_var.is_none());
|
||||
assert!(http_headers.is_none());
|
||||
assert!(env_http_headers.is_none());
|
||||
}
|
||||
other => panic!("unexpected transport: {other:?}"),
|
||||
}
|
||||
assert!(github.enabled);
|
||||
|
||||
assert!(!codex_home.path().join(".credentials.json").exists());
|
||||
assert!(!codex_home.path().join(".env").exists());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_streamable_http_with_custom_env_var() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args([
|
||||
"mcp",
|
||||
"add",
|
||||
"issues",
|
||||
"--url",
|
||||
"https://example.com/issues",
|
||||
"--bearer-token-env-var",
|
||||
"GITHUB_TOKEN",
|
||||
])
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let issues = servers.get("issues").expect("issues server should exist");
|
||||
match &issues.transport {
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
} => {
|
||||
assert_eq!(url, "https://example.com/issues");
|
||||
assert_eq!(bearer_token_env_var.as_deref(), Some("GITHUB_TOKEN"));
|
||||
assert!(http_headers.is_none());
|
||||
assert!(env_http_headers.is_none());
|
||||
}
|
||||
other => panic!("unexpected transport: {other:?}"),
|
||||
}
|
||||
assert!(issues.enabled);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_streamable_http_rejects_removed_flag() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args([
|
||||
"mcp",
|
||||
"add",
|
||||
"github",
|
||||
"--url",
|
||||
"https://example.com/mcp",
|
||||
"--with-bearer-token",
|
||||
])
|
||||
.assert()
|
||||
.failure()
|
||||
.stderr(contains("--with-bearer-token"));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_cant_add_command_and_url() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
add_cmd
|
||||
.args([
|
||||
"mcp",
|
||||
"add",
|
||||
"github",
|
||||
"--url",
|
||||
"https://example.com/mcp",
|
||||
"--command",
|
||||
"--",
|
||||
"echo",
|
||||
"hello",
|
||||
])
|
||||
.assert()
|
||||
.failure()
|
||||
.stderr(contains("unexpected argument '--command' found"));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::write_global_mcp_servers;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use predicates::prelude::PredicateBooleanExt;
|
||||
use predicates::str::contains;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value as JsonValue;
|
||||
@@ -26,8 +30,8 @@ fn list_shows_empty_state() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_and_get_render_expected_output() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn list_and_get_render_expected_output() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add = codex_command(codex_home.path())?;
|
||||
@@ -45,6 +49,18 @@ fn list_and_get_render_expected_output() -> Result<()> {
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let mut servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let docs_entry = servers
|
||||
.get_mut("docs")
|
||||
.expect("docs server should exist after add");
|
||||
match &mut docs_entry.transport {
|
||||
McpServerTransportConfig::Stdio { env_vars, .. } => {
|
||||
*env_vars = vec!["APP_TOKEN".to_string(), "WORKSPACE_ID".to_string()];
|
||||
}
|
||||
other => panic!("unexpected transport: {other:?}"),
|
||||
}
|
||||
write_global_mcp_servers(codex_home.path(), &servers)?;
|
||||
|
||||
let mut list_cmd = codex_command(codex_home.path())?;
|
||||
let list_output = list_cmd.args(["mcp", "list"]).output()?;
|
||||
assert!(list_output.status.success());
|
||||
@@ -53,6 +69,12 @@ fn list_and_get_render_expected_output() -> Result<()> {
|
||||
assert!(stdout.contains("docs"));
|
||||
assert!(stdout.contains("docs-server"));
|
||||
assert!(stdout.contains("TOKEN=secret"));
|
||||
assert!(stdout.contains("APP_TOKEN=$APP_TOKEN"));
|
||||
assert!(stdout.contains("WORKSPACE_ID=$WORKSPACE_ID"));
|
||||
assert!(stdout.contains("Status"));
|
||||
assert!(stdout.contains("Auth"));
|
||||
assert!(stdout.contains("enabled"));
|
||||
assert!(stdout.contains("Unsupported"));
|
||||
|
||||
let mut list_json_cmd = codex_command(codex_home.path())?;
|
||||
let json_output = list_json_cmd.args(["mcp", "list", "--json"]).output()?;
|
||||
@@ -64,6 +86,7 @@ fn list_and_get_render_expected_output() -> Result<()> {
|
||||
json!([
|
||||
{
|
||||
"name": "docs",
|
||||
"enabled": true,
|
||||
"transport": {
|
||||
"type": "stdio",
|
||||
"command": "docs-server",
|
||||
@@ -73,10 +96,16 @@ fn list_and_get_render_expected_output() -> Result<()> {
|
||||
],
|
||||
"env": {
|
||||
"TOKEN": "secret"
|
||||
}
|
||||
},
|
||||
"env_vars": [
|
||||
"APP_TOKEN",
|
||||
"WORKSPACE_ID"
|
||||
],
|
||||
"cwd": null
|
||||
},
|
||||
"startup_timeout_sec": null,
|
||||
"tool_timeout_sec": null
|
||||
"tool_timeout_sec": null,
|
||||
"auth_status": "unsupported"
|
||||
}
|
||||
]
|
||||
)
|
||||
@@ -91,6 +120,9 @@ fn list_and_get_render_expected_output() -> Result<()> {
|
||||
assert!(stdout.contains("command: docs-server"));
|
||||
assert!(stdout.contains("args: --port 4000"));
|
||||
assert!(stdout.contains("env: TOKEN=secret"));
|
||||
assert!(stdout.contains("APP_TOKEN=$APP_TOKEN"));
|
||||
assert!(stdout.contains("WORKSPACE_ID=$WORKSPACE_ID"));
|
||||
assert!(stdout.contains("enabled: true"));
|
||||
assert!(stdout.contains("remove: codex mcp remove docs"));
|
||||
|
||||
let mut get_json_cmd = codex_command(codex_home.path())?;
|
||||
@@ -98,7 +130,7 @@ fn list_and_get_render_expected_output() -> Result<()> {
|
||||
.args(["mcp", "get", "docs", "--json"])
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(contains("\"name\": \"docs\""));
|
||||
.stdout(contains("\"name\": \"docs\"").and(contains("\"enabled\": true")));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
edition = "2024"
|
||||
name = "codex-cloud-tasks"
|
||||
version = { workspace = true }
|
||||
edition = "2024"
|
||||
|
||||
[lib]
|
||||
name = "codex_cloud_tasks"
|
||||
@@ -11,26 +11,28 @@ path = "src/lib.rs"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
anyhow = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-cloud-tasks-client = { path = "../cloud-tasks-client", features = [
|
||||
"mock",
|
||||
"online",
|
||||
] }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
|
||||
tracing = { version = "0.1.41", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
codex-cloud-tasks-client = { path = "../cloud-tasks-client", features = ["mock", "online"] }
|
||||
ratatui = { version = "0.29.0" }
|
||||
crossterm = { version = "0.28.1", features = ["event-stream"] }
|
||||
tokio-stream = "0.1.17"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
codex-login = { path = "../login" }
|
||||
codex-core = { path = "../core" }
|
||||
throbber-widgets-tui = "0.8.0"
|
||||
base64 = "0.22"
|
||||
serde_json = "1"
|
||||
reqwest = { version = "0.12", features = ["json"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
unicode-width = "0.1"
|
||||
codex-login = { path = "../login" }
|
||||
codex-tui = { path = "../tui" }
|
||||
crossterm = { workspace = true, features = ["event-stream"] }
|
||||
ratatui = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["json"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
tokio-stream = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
unicode-width = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
async-trait = "0.1"
|
||||
async-trait = { workspace = true }
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
// Environment filter data models for the TUI
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -42,15 +43,13 @@ use crate::scrollable_diff::ScrollableDiff;
|
||||
use codex_cloud_tasks_client::CloudBackend;
|
||||
use codex_cloud_tasks_client::TaskId;
|
||||
use codex_cloud_tasks_client::TaskSummary;
|
||||
use throbber_widgets_tui::ThrobberState;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct App {
|
||||
pub tasks: Vec<TaskSummary>,
|
||||
pub selected: usize,
|
||||
pub status: String,
|
||||
pub diff_overlay: Option<DiffOverlay>,
|
||||
pub throbber: ThrobberState,
|
||||
pub spinner_start: Option<Instant>,
|
||||
pub refresh_inflight: bool,
|
||||
pub details_inflight: bool,
|
||||
// Environment filter state
|
||||
@@ -82,7 +81,7 @@ impl App {
|
||||
selected: 0,
|
||||
status: "Press r to refresh".to_string(),
|
||||
diff_overlay: None,
|
||||
throbber: ThrobberState::default(),
|
||||
spinner_start: None,
|
||||
refresh_inflight: false,
|
||||
details_inflight: false,
|
||||
env_filter: None,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use clap::Args;
|
||||
use clap::Parser;
|
||||
use codex_common::CliConfigOverrides;
|
||||
|
||||
@@ -6,4 +7,43 @@ use codex_common::CliConfigOverrides;
|
||||
pub struct Cli {
|
||||
#[clap(skip)]
|
||||
pub config_overrides: CliConfigOverrides,
|
||||
|
||||
#[command(subcommand)]
|
||||
pub command: Option<Command>,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum Command {
|
||||
/// Submit a new Codex Cloud task without launching the TUI.
|
||||
Exec(ExecCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct ExecCommand {
|
||||
/// Task prompt to run in Codex Cloud.
|
||||
#[arg(value_name = "QUERY")]
|
||||
pub query: Option<String>,
|
||||
|
||||
/// Target environment identifier (see `codex cloud` to browse).
|
||||
#[arg(long = "env", value_name = "ENV_ID")]
|
||||
pub environment: String,
|
||||
|
||||
/// Number of assistant attempts (best-of-N).
|
||||
#[arg(
|
||||
long = "attempts",
|
||||
default_value_t = 1usize,
|
||||
value_parser = parse_attempts
|
||||
)]
|
||||
pub attempts: usize,
|
||||
}
|
||||
|
||||
fn parse_attempts(input: &str) -> Result<usize, String> {
|
||||
let value: usize = input
|
||||
.parse()
|
||||
.map_err(|_| "attempts must be an integer between 1 and 4".to_string())?;
|
||||
if (1..=4).contains(&value) {
|
||||
Ok(value)
|
||||
} else {
|
||||
Err("attempts must be between 1 and 4".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,9 @@ mod ui;
|
||||
pub mod util;
|
||||
pub use cli::Cli;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -23,6 +25,175 @@ struct ApplyJob {
|
||||
diff_override: Option<String>,
|
||||
}
|
||||
|
||||
struct BackendContext {
|
||||
backend: Arc<dyn codex_cloud_tasks_client::CloudBackend>,
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
async fn init_backend(user_agent_suffix: &str) -> anyhow::Result<BackendContext> {
|
||||
let use_mock = matches!(
|
||||
std::env::var("CODEX_CLOUD_TASKS_MODE").ok().as_deref(),
|
||||
Some("mock") | Some("MOCK")
|
||||
);
|
||||
let base_url = std::env::var("CODEX_CLOUD_TASKS_BASE_URL")
|
||||
.unwrap_or_else(|_| "https://chatgpt.com/backend-api".to_string());
|
||||
|
||||
set_user_agent_suffix(user_agent_suffix);
|
||||
|
||||
if use_mock {
|
||||
return Ok(BackendContext {
|
||||
backend: Arc::new(codex_cloud_tasks_client::MockClient),
|
||||
base_url,
|
||||
});
|
||||
}
|
||||
|
||||
let ua = codex_core::default_client::get_codex_user_agent();
|
||||
let mut http = codex_cloud_tasks_client::HttpClient::new(base_url.clone())?.with_user_agent(ua);
|
||||
let style = if base_url.contains("/backend-api") {
|
||||
"wham"
|
||||
} else {
|
||||
"codex-api"
|
||||
};
|
||||
append_error_log(format!("startup: base_url={base_url} path_style={style}"));
|
||||
|
||||
let auth = match codex_core::config::find_codex_home()
|
||||
.ok()
|
||||
.map(|home| codex_login::AuthManager::new(home, false))
|
||||
.and_then(|am| am.auth())
|
||||
{
|
||||
Some(auth) => auth,
|
||||
None => {
|
||||
eprintln!(
|
||||
"Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'."
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(acc) = auth.get_account_id() {
|
||||
append_error_log(format!("auth: mode=ChatGPT account_id={acc}"));
|
||||
}
|
||||
|
||||
let token = match auth.get_token().await {
|
||||
Ok(t) if !t.is_empty() => t,
|
||||
_ => {
|
||||
eprintln!(
|
||||
"Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'."
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
http = http.with_bearer_token(token.clone());
|
||||
if let Some(acc) = auth
|
||||
.get_account_id()
|
||||
.or_else(|| util::extract_chatgpt_account_id(&token))
|
||||
{
|
||||
append_error_log(format!("auth: set ChatGPT-Account-Id header: {acc}"));
|
||||
http = http.with_chatgpt_account_id(acc);
|
||||
}
|
||||
|
||||
Ok(BackendContext {
|
||||
backend: Arc::new(http),
|
||||
base_url,
|
||||
})
|
||||
}
|
||||
|
||||
async fn run_exec_command(args: crate::cli::ExecCommand) -> anyhow::Result<()> {
|
||||
let crate::cli::ExecCommand {
|
||||
query,
|
||||
environment,
|
||||
attempts,
|
||||
} = args;
|
||||
let ctx = init_backend("codex_cloud_tasks_exec").await?;
|
||||
let prompt = resolve_query_input(query)?;
|
||||
let env_id = resolve_environment_id(&ctx, &environment).await?;
|
||||
let created = codex_cloud_tasks_client::CloudBackend::create_task(
|
||||
&*ctx.backend,
|
||||
&env_id,
|
||||
&prompt,
|
||||
"main",
|
||||
false,
|
||||
attempts,
|
||||
)
|
||||
.await?;
|
||||
let url = util::task_url(&ctx.base_url, &created.id.0);
|
||||
println!("{url}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn resolve_environment_id(ctx: &BackendContext, requested: &str) -> anyhow::Result<String> {
|
||||
let trimmed = requested.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Err(anyhow!("environment id must not be empty"));
|
||||
}
|
||||
let normalized = util::normalize_base_url(&ctx.base_url);
|
||||
let headers = util::build_chatgpt_headers().await;
|
||||
let environments = crate::env_detect::list_environments(&normalized, &headers).await?;
|
||||
if environments.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"no cloud environments are available for this workspace"
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(row) = environments.iter().find(|row| row.id == trimmed) {
|
||||
return Ok(row.id.clone());
|
||||
}
|
||||
|
||||
let label_matches = environments
|
||||
.iter()
|
||||
.filter(|row| {
|
||||
row.label
|
||||
.as_deref()
|
||||
.map(|label| label.eq_ignore_ascii_case(trimmed))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
match label_matches.as_slice() {
|
||||
[] => Err(anyhow!(
|
||||
"environment '{trimmed}' not found; run `codex cloud` to list available environments"
|
||||
)),
|
||||
[single] => Ok(single.id.clone()),
|
||||
[first, rest @ ..] => {
|
||||
let first_id = &first.id;
|
||||
if rest.iter().all(|row| row.id == *first_id) {
|
||||
Ok(first_id.clone())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"environment label '{trimmed}' is ambiguous; run `codex cloud` to pick the desired environment id"
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_query_input(query_arg: Option<String>) -> anyhow::Result<String> {
|
||||
match query_arg {
|
||||
Some(q) if q != "-" => Ok(q),
|
||||
maybe_dash => {
|
||||
let force_stdin = matches!(maybe_dash.as_deref(), Some("-"));
|
||||
if std::io::stdin().is_terminal() && !force_stdin {
|
||||
return Err(anyhow!(
|
||||
"no query provided. Pass one as an argument or pipe it via stdin."
|
||||
));
|
||||
}
|
||||
if !force_stdin {
|
||||
eprintln!("Reading query from stdin...");
|
||||
}
|
||||
let mut buffer = String::new();
|
||||
std::io::stdin()
|
||||
.read_to_string(&mut buffer)
|
||||
.map_err(|e| anyhow!("failed to read query from stdin: {e}"))?;
|
||||
if buffer.trim().is_empty() {
|
||||
return Err(anyhow!(
|
||||
"no query provided via stdin (received empty input)."
|
||||
));
|
||||
}
|
||||
Ok(buffer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn level_from_status(status: codex_cloud_tasks_client::ApplyStatus) -> app::ApplyResultLevel {
|
||||
match status {
|
||||
codex_cloud_tasks_client::ApplyStatus::Success => app::ApplyResultLevel::Success,
|
||||
@@ -148,7 +319,14 @@ fn spawn_apply(
|
||||
// (no standalone patch summarizer needed – UI displays raw diffs)
|
||||
|
||||
/// Entry point for the `codex cloud` subcommand.
|
||||
pub async fn run_main(_cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
|
||||
pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
|
||||
if let Some(command) = cli.command {
|
||||
return match command {
|
||||
crate::cli::Command::Exec(args) => run_exec_command(args).await,
|
||||
};
|
||||
}
|
||||
let Cli { .. } = cli;
|
||||
|
||||
// Very minimal logging setup; mirrors other crates' pattern.
|
||||
let default_level = "error";
|
||||
let _ = tracing_subscriber::fmt()
|
||||
@@ -162,72 +340,8 @@ pub async fn run_main(_cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> a
|
||||
.try_init();
|
||||
|
||||
info!("Launching Cloud Tasks list UI");
|
||||
set_user_agent_suffix("codex_cloud_tasks_tui");
|
||||
|
||||
// Default to online unless explicitly configured to use mock.
|
||||
let use_mock = matches!(
|
||||
std::env::var("CODEX_CLOUD_TASKS_MODE").ok().as_deref(),
|
||||
Some("mock") | Some("MOCK")
|
||||
);
|
||||
|
||||
let backend: Arc<dyn codex_cloud_tasks_client::CloudBackend> = if use_mock {
|
||||
Arc::new(codex_cloud_tasks_client::MockClient)
|
||||
} else {
|
||||
// Build an HTTP client against the configured (or default) base URL.
|
||||
let base_url = std::env::var("CODEX_CLOUD_TASKS_BASE_URL")
|
||||
.unwrap_or_else(|_| "https://chatgpt.com/backend-api".to_string());
|
||||
let ua = codex_core::default_client::get_codex_user_agent();
|
||||
let mut http =
|
||||
codex_cloud_tasks_client::HttpClient::new(base_url.clone())?.with_user_agent(ua);
|
||||
// Log which base URL and path style we're going to use.
|
||||
let style = if base_url.contains("/backend-api") {
|
||||
"wham"
|
||||
} else {
|
||||
"codex-api"
|
||||
};
|
||||
append_error_log(format!("startup: base_url={base_url} path_style={style}"));
|
||||
|
||||
// Require ChatGPT login (SWIC). Exit with a clear message if missing.
|
||||
let _token = match codex_core::config::find_codex_home()
|
||||
.ok()
|
||||
.map(|home| codex_login::AuthManager::new(home, false))
|
||||
.and_then(|am| am.auth())
|
||||
{
|
||||
Some(auth) => {
|
||||
// Log account context for debugging workspace selection.
|
||||
if let Some(acc) = auth.get_account_id() {
|
||||
append_error_log(format!("auth: mode=ChatGPT account_id={acc}"));
|
||||
}
|
||||
match auth.get_token().await {
|
||||
Ok(t) if !t.is_empty() => {
|
||||
// Attach token and ChatGPT-Account-Id header if available
|
||||
http = http.with_bearer_token(t.clone());
|
||||
if let Some(acc) = auth
|
||||
.get_account_id()
|
||||
.or_else(|| util::extract_chatgpt_account_id(&t))
|
||||
{
|
||||
append_error_log(format!("auth: set ChatGPT-Account-Id header: {acc}"));
|
||||
http = http.with_chatgpt_account_id(acc);
|
||||
}
|
||||
t
|
||||
}
|
||||
_ => {
|
||||
eprintln!(
|
||||
"Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'."
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
eprintln!(
|
||||
"Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'."
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
Arc::new(http)
|
||||
};
|
||||
let BackendContext { backend, .. } = init_backend("codex_cloud_tasks_tui").await?;
|
||||
let backend = backend;
|
||||
|
||||
// Terminal setup
|
||||
use crossterm::ExecutableCommand;
|
||||
@@ -400,16 +514,20 @@ pub async fn run_main(_cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> a
|
||||
let _ = frame_tx.send(Instant::now() + codex_tui::ComposerInput::recommended_flush_delay());
|
||||
}
|
||||
}
|
||||
// Advance throbber only while loading.
|
||||
// Keep spinner pulsing only while loading.
|
||||
if app.refresh_inflight
|
||||
|| app.details_inflight
|
||||
|| app.env_loading
|
||||
|| app.apply_preflight_inflight
|
||||
|| app.apply_inflight
|
||||
{
|
||||
app.throbber.calc_next();
|
||||
if app.spinner_start.is_none() {
|
||||
app.spinner_start = Some(Instant::now());
|
||||
}
|
||||
needs_redraw = true;
|
||||
let _ = frame_tx.send(Instant::now() + Duration::from_millis(100));
|
||||
let _ = frame_tx.send(Instant::now() + Duration::from_millis(600));
|
||||
} else {
|
||||
app.spinner_start = None;
|
||||
}
|
||||
render_if_needed(&mut terminal, &mut app, &mut needs_redraw)?;
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ use ratatui::widgets::ListState;
|
||||
use ratatui::widgets::Padding;
|
||||
use ratatui::widgets::Paragraph;
|
||||
use std::sync::OnceLock;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::app::App;
|
||||
use crate::app::AttemptView;
|
||||
@@ -229,7 +230,7 @@ fn draw_list(frame: &mut Frame, area: Rect, app: &mut App) {
|
||||
|
||||
// In-box spinner during initial/refresh loads
|
||||
if app.refresh_inflight {
|
||||
draw_centered_spinner(frame, inner, &mut app.throbber, "Loading tasks…");
|
||||
draw_centered_spinner(frame, inner, &mut app.spinner_start, "Loading tasks…");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -291,7 +292,7 @@ fn draw_footer(frame: &mut Frame, area: Rect, app: &mut App) {
|
||||
|| app.apply_preflight_inflight
|
||||
|| app.apply_inflight
|
||||
{
|
||||
draw_inline_spinner(frame, top[1], &mut app.throbber, "Loading…");
|
||||
draw_inline_spinner(frame, top[1], &mut app.spinner_start, "Loading…");
|
||||
} else {
|
||||
frame.render_widget(Clear, top[1]);
|
||||
}
|
||||
@@ -449,7 +450,12 @@ fn draw_diff_overlay(frame: &mut Frame, area: Rect, app: &mut App) {
|
||||
.map(|o| o.sd.wrapped_lines().is_empty())
|
||||
.unwrap_or(true);
|
||||
if app.details_inflight && raw_empty {
|
||||
draw_centered_spinner(frame, content_area, &mut app.throbber, "Loading details…");
|
||||
draw_centered_spinner(
|
||||
frame,
|
||||
content_area,
|
||||
&mut app.spinner_start,
|
||||
"Loading details…",
|
||||
);
|
||||
} else {
|
||||
let scroll = app
|
||||
.diff_overlay
|
||||
@@ -494,11 +500,11 @@ pub fn draw_apply_modal(frame: &mut Frame, area: Rect, app: &mut App) {
|
||||
frame.render_widget(header, rows[0]);
|
||||
// Body: spinner while preflight/apply runs; otherwise show result message and path lists
|
||||
if app.apply_preflight_inflight {
|
||||
draw_centered_spinner(frame, rows[1], &mut app.throbber, "Checking…");
|
||||
draw_centered_spinner(frame, rows[1], &mut app.spinner_start, "Checking…");
|
||||
} else if app.apply_inflight {
|
||||
draw_centered_spinner(frame, rows[1], &mut app.throbber, "Applying…");
|
||||
draw_centered_spinner(frame, rows[1], &mut app.spinner_start, "Applying…");
|
||||
} else if m.result_message.is_none() {
|
||||
draw_centered_spinner(frame, rows[1], &mut app.throbber, "Loading…");
|
||||
draw_centered_spinner(frame, rows[1], &mut app.spinner_start, "Loading…");
|
||||
} else if let Some(msg) = &m.result_message {
|
||||
let mut body_lines: Vec<Line> = Vec::new();
|
||||
let first = match m.result_level {
|
||||
@@ -859,29 +865,29 @@ fn format_relative_time(ts: chrono::DateTime<Utc>) -> String {
|
||||
fn draw_inline_spinner(
|
||||
frame: &mut Frame,
|
||||
area: Rect,
|
||||
state: &mut throbber_widgets_tui::ThrobberState,
|
||||
spinner_start: &mut Option<Instant>,
|
||||
label: &str,
|
||||
) {
|
||||
use ratatui::style::Style;
|
||||
use throbber_widgets_tui::BRAILLE_EIGHT;
|
||||
use throbber_widgets_tui::Throbber;
|
||||
use throbber_widgets_tui::WhichUse;
|
||||
let w = Throbber::default()
|
||||
.label(label)
|
||||
.style(Style::default().cyan())
|
||||
.throbber_style(Style::default().magenta().bold())
|
||||
.throbber_set(BRAILLE_EIGHT)
|
||||
.use_type(WhichUse::Spin);
|
||||
frame.render_stateful_widget(w, area, state);
|
||||
use ratatui::widgets::Paragraph;
|
||||
let start = spinner_start.get_or_insert_with(Instant::now);
|
||||
let blink_on = (start.elapsed().as_millis() / 600).is_multiple_of(2);
|
||||
let dot = if blink_on {
|
||||
"• ".into()
|
||||
} else {
|
||||
"◦ ".dim()
|
||||
};
|
||||
let label = label.cyan();
|
||||
let line = Line::from(vec![dot, label]);
|
||||
frame.render_widget(Paragraph::new(line), area);
|
||||
}
|
||||
|
||||
fn draw_centered_spinner(
|
||||
frame: &mut Frame,
|
||||
area: Rect,
|
||||
state: &mut throbber_widgets_tui::ThrobberState,
|
||||
spinner_start: &mut Option<Instant>,
|
||||
label: &str,
|
||||
) {
|
||||
// Center a 1xN throbber within the given rect
|
||||
// Center a 1xN spinner within the given rect
|
||||
let rows = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
@@ -898,7 +904,7 @@ fn draw_centered_spinner(
|
||||
Constraint::Percentage(50),
|
||||
])
|
||||
.split(rows[1]);
|
||||
draw_inline_spinner(frame, cols[1], state, label);
|
||||
draw_inline_spinner(frame, cols[1], spinner_start, label);
|
||||
}
|
||||
|
||||
// Styling helpers for diff rendering live inline where used.
|
||||
@@ -918,7 +924,12 @@ pub fn draw_env_modal(frame: &mut Frame, area: Rect, app: &mut App) {
|
||||
let content = overlay_content(inner);
|
||||
|
||||
if app.env_loading {
|
||||
draw_centered_spinner(frame, content, &mut app.throbber, "Loading environments…");
|
||||
draw_centered_spinner(
|
||||
frame,
|
||||
content,
|
||||
&mut app.spinner_start,
|
||||
"Loading environments…",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -91,3 +91,18 @@ pub async fn build_chatgpt_headers() -> HeaderMap {
|
||||
}
|
||||
headers
|
||||
}
|
||||
|
||||
/// Construct a browser-friendly task URL for the given backend base URL.
|
||||
pub fn task_url(base_url: &str, task_id: &str) -> String {
|
||||
let normalized = normalize_base_url(base_url);
|
||||
if let Some(root) = normalized.strip_suffix("/backend-api") {
|
||||
return format!("{root}/codex/tasks/{task_id}");
|
||||
}
|
||||
if let Some(root) = normalized.strip_suffix("/api/codex") {
|
||||
return format!("{root}/codex/tasks/{task_id}");
|
||||
}
|
||||
if normalized.ends_with("/codex") {
|
||||
return format!("{normalized}/tasks/{task_id}");
|
||||
}
|
||||
format!("{normalized}/codex/tasks/{task_id}")
|
||||
}
|
||||
|
||||
24
codex-rs/codex-infty/Cargo.toml
Normal file
24
codex-rs/codex-infty/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "codex-infty"
|
||||
version = { workspace = true }
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
dirs = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread", "signal"] }
|
||||
tokio-stream = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
futures = "0.3"
|
||||
|
||||
[dev-dependencies]
|
||||
core_test_support = { path = "../core/tests/common" }
|
||||
tempfile = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
196
codex-rs/codex-infty/README.md
Normal file
196
codex-rs/codex-infty/README.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Codex Infty
|
||||
|
||||
Codex Infty is a small orchestration layer that coordinates multiple Codex roles (Solver, Director, Verifier(s)) to drive longer, multi‑step objectives with minimal human intervention. It provides:
|
||||
|
||||
- A run orchestrator that routes messages between roles and advances the workflow.
|
||||
- A durable run store on disk with metadata and standard subfolders.
|
||||
- Default role prompts for Solver/Director/Verifier.
|
||||
- A lightweight progress reporting hook for UIs/CLIs.
|
||||
|
||||
The crate is designed to be embedded (via the library API) and also powers the `codex infty` CLI commands.
|
||||
|
||||
## High‑Level Flow
|
||||
|
||||
```
|
||||
objective → Solver
|
||||
Solver → direction_request → Director → directive → Solver
|
||||
… (iterate) …
|
||||
Solver → final_delivery → Orchestrator returns RunOutcome
|
||||
```
|
||||
|
||||
- The Solver always speaks structured JSON. The orchestrator parses those messages and decides the next hop.
|
||||
- The Director provides crisp guidance (also JSON) that is forwarded back to the Solver.
|
||||
- One or more Verifiers may assess the final deliverable; the orchestrator aggregates results and reports a summary to the Solver.
|
||||
- On final_delivery, the orchestrator resolves and validates the deliverable path and returns the `RunOutcome`.
|
||||
|
||||
## Directory Layout (Run Store)
|
||||
|
||||
When a run is created, a directory is initialized with this structure:
|
||||
|
||||
```
|
||||
<runs_root>/<run_id>/
|
||||
artifacts/ # long‑lived artifacts produced by the Solver
|
||||
memory/ # durable notes, claims, context
|
||||
index/ # indexes and caches
|
||||
deliverable/ # final output(s) assembled by the Solver
|
||||
run.json # run metadata (id, timestamps, roles)
|
||||
```
|
||||
|
||||
See: `codex-infty/src/run_store.rs`.
|
||||
|
||||
- The orchestrator persists rollout paths and optional config paths for each role into `run.json`.
|
||||
- Metadata timestamps are updated on significant events (role spawns, handoffs, final delivery).
|
||||
- Final deliverables must remain within the run directory. Paths are canonicalized and validated.
|
||||
|
||||
## Roles and Prompts
|
||||
|
||||
Default base instructions are injected per role if the provided `Config` has none:
|
||||
|
||||
- Solver: `codex-infty/src/prompts/solver.md`
|
||||
- Director: `codex-infty/src/prompts/director.md`
|
||||
- Verifier: `codex-infty/src/prompts/verifier.md`
|
||||
|
||||
You can provide your own instructions by pre‑populating `Config.base_instructions`.
|
||||
|
||||
## Solver Signal Contract
|
||||
|
||||
The Solver communicates intent using JSON messages (possibly wrapped in a fenced block). The orchestrator accepts two shapes:
|
||||
|
||||
- Direction request (sent to Director):
|
||||
|
||||
```json
|
||||
{"type":"direction_request","prompt":"<question or decision>"}
|
||||
```
|
||||
|
||||
- Final delivery (completes the run):
|
||||
|
||||
```json
|
||||
{"type":"final_delivery","deliverable_path":"deliverable/summary.txt","summary":"<short text>"}
|
||||
```
|
||||
|
||||
JSON may be fenced as ```json … ```; the orchestrator will strip the fence.
|
||||
|
||||
## Key Types and Modules
|
||||
|
||||
- Orchestrator: `codex-infty/src/orchestrator.rs`
|
||||
- `InftyOrchestrator`: spawns/resumes role sessions, drives the event loop, and routes signals.
|
||||
- `execute_new_run`: one‑shot helper that spawns and then drives.
|
||||
- `spawn_run`: set up sessions and the run store.
|
||||
- `call_role`, `relay_assistant_to_role`, `post_to_role`, `await_first_assistant`, `stream_events`: utilities when integrating custom flows.
|
||||
|
||||
- Run store: `codex-infty/src/run_store.rs`
|
||||
- `RunStore`, `RunMetadata`, `RoleMetadata`: metadata and persistence helpers.
|
||||
|
||||
- Types: `codex-infty/src/types.rs`
|
||||
- `RoleConfig`: wraps a `Config` and sets sensible defaults for autonomous flows (no approvals, full sandbox access). Also used to persist optional config paths.
|
||||
- `RunParams`: input to spawn runs.
|
||||
- `RunExecutionOptions`: per‑run options (objective, timeouts).
|
||||
- `RunOutcome`: returned on successful final delivery.
|
||||
|
||||
- Signals: `codex-infty/src/signals.rs`
|
||||
- DTOs for director responses and verifier verdicts, and the aggregated summary type.
|
||||
|
||||
- Progress: `codex-infty/src/progress.rs`
|
||||
- `ProgressReporter` trait: hook for UIs/CLIs to observe solver/director/verifier activity.
|
||||
|
||||
## Orchestrator Workflow (Details)
|
||||
|
||||
1. Spawn or resume role sessions (Solver, Director, and zero or more Verifiers). Default prompts are applied if the role’s `Config` has no base instructions.
|
||||
2. Optionally post an `objective` to the Solver. The progress reporter is notified and the orchestrator waits for the first Solver signal.
|
||||
3. On `direction_request`:
|
||||
- Post a structured request to the Director and await the first assistant message.
|
||||
- Parse it into a `DirectiveResponse` and forward the normalized JSON to the Solver.
|
||||
4. On `final_delivery`:
|
||||
- Canonicalize and validate that `deliverable_path` stays within the run directory.
|
||||
- Optionally run a verification pass using configured Verifier(s), aggregate results, and post a summary back to the Solver.
|
||||
- Notify the progress reporter, touch the run store, and return `RunOutcome`.
|
||||
|
||||
## Library Usage
|
||||
|
||||
```rust
|
||||
use std::sync::Arc;
|
||||
use codex_core::{CodexAuth, config::Config};
|
||||
use codex_infty::{InftyOrchestrator, RoleConfig, RunParams, RunExecutionOptions};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
// 1) Load or build a Config for each role
|
||||
let solver_cfg: Config = load_config();
|
||||
let mut director_cfg = solver_cfg.clone();
|
||||
director_cfg.model = "o4-mini".into();
|
||||
|
||||
// 2) Build role configs
|
||||
let solver = RoleConfig::new("solver", solver_cfg.clone());
|
||||
let director = RoleConfig::new("director", director_cfg);
|
||||
let verifiers = vec![RoleConfig::new("verifier-alpha", solver_cfg.clone())];
|
||||
|
||||
// 3) Create an orchestrator (using default runs root)
|
||||
let auth = CodexAuth::from_api_key("sk-…");
|
||||
let orchestrator = InftyOrchestrator::new(auth)?;
|
||||
|
||||
// 4) Execute a new run with an objective
|
||||
let params = RunParams {
|
||||
run_id: "my-run".into(),
|
||||
run_root: None, // use default ~/.codex/infty/<run_id>
|
||||
solver,
|
||||
director,
|
||||
verifiers,
|
||||
};
|
||||
let mut opts = RunExecutionOptions::default();
|
||||
opts.objective = Some("Implement feature X".into());
|
||||
|
||||
let outcome = orchestrator.execute_new_run(params, opts).await?;
|
||||
println!("deliverable: {}", outcome.deliverable_path.display());
|
||||
Ok(())
|
||||
}
|
||||
# fn load_config() -> codex_core::config::Config { codex_core::config::Config::default() }
|
||||
```
|
||||
|
||||
Note: Resuming runs is currently disabled.
|
||||
|
||||
## CLI Quickstart
|
||||
|
||||
The CLI (`codex`) exposes Infty helpers under the `infty` subcommand. Examples:
|
||||
|
||||
```bash
|
||||
# Create a run and immediately drive toward completion
|
||||
codex infty create --run-id demo --objective "Build and test feature"
|
||||
|
||||
# Inspect runs
|
||||
codex infty list
|
||||
codex infty show demo
|
||||
|
||||
# Sending one-off messages to stored runs is currently disabled
|
||||
```
|
||||
|
||||
Flags allow customizing the Director’s model and reasoning effort; see `codex infty create --help`.
|
||||
|
||||
## Progress Reporting
|
||||
|
||||
Integrate your UI by implementing `ProgressReporter` and attaching it with `InftyOrchestrator::with_progress(...)`. You’ll receive callbacks on key milestones (objective posted, solver messages, director response, verification summaries, final delivery, etc.).
|
||||
|
||||
## Safety and Guardrails
|
||||
|
||||
- `RoleConfig::new` sets `SandboxPolicy::DangerFullAccess` and `AskForApproval::Never` to support autonomous flows. Adjust if your environment requires stricter policies.
|
||||
- Deliverable paths are validated to stay inside the run directory and are fully canonicalized.
|
||||
- JSON payloads are schema‑checked where applicable (e.g., solver signals and final delivery shape).
|
||||
|
||||
## Tests
|
||||
|
||||
Run the crate’s tests:
|
||||
|
||||
```bash
|
||||
cargo test -p codex-infty
|
||||
```
|
||||
|
||||
Many tests rely on mocked SSE streams and will auto‑skip in sandboxes where network is disabled.
|
||||
|
||||
## When to Use This Crate
|
||||
|
||||
Use `codex-infty` when you want a minimal, pragmatic multi‑role loop with:
|
||||
|
||||
- Clear role separation and routing.
|
||||
- Durable, restart‑resilient state on disk.
|
||||
- Simple integration points (progress hooks and helper APIs).
|
||||
|
||||
It’s intentionally small and focused so it can be embedded into larger tools or extended to meet your workflows.
|
||||
38
codex-rs/codex-infty/src/lib.rs
Normal file
38
codex-rs/codex-infty/src/lib.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||||
|
||||
mod orchestrator;
|
||||
mod progress;
|
||||
mod prompts;
|
||||
mod roles;
|
||||
mod run_store;
|
||||
mod session;
|
||||
mod signals;
|
||||
mod types;
|
||||
pub(crate) mod utils;
|
||||
|
||||
pub use orchestrator::InftyOrchestrator;
|
||||
pub use progress::ProgressReporter;
|
||||
pub use run_store::RoleMetadata;
|
||||
pub use run_store::RunMetadata;
|
||||
pub use run_store::RunStore;
|
||||
pub use signals::AggregatedVerifierVerdict;
|
||||
pub use signals::DirectiveResponse;
|
||||
pub use signals::VerifierDecision;
|
||||
pub use signals::VerifierReport;
|
||||
pub use signals::VerifierVerdict;
|
||||
pub use types::RoleConfig;
|
||||
pub use types::RoleSession;
|
||||
pub use types::RunExecutionOptions;
|
||||
pub use types::RunOutcome;
|
||||
pub use types::RunParams;
|
||||
pub use types::RunSessions;
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use dirs::home_dir;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub fn default_runs_root() -> Result<PathBuf> {
|
||||
let home = home_dir().ok_or_else(|| anyhow!("failed to determine home directory"))?;
|
||||
Ok(home.join(".codex").join("infty"))
|
||||
}
|
||||
552
codex-rs/codex-infty/src/orchestrator.rs
Normal file
552
codex-rs/codex-infty/src/orchestrator.rs
Normal file
@@ -0,0 +1,552 @@
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::CodexConversation;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::cross_session::CrossSessionHub;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_protocol::ConversationId;
|
||||
use tokio::signal;
|
||||
use tokio_stream::StreamExt;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::progress::ProgressReporter;
|
||||
use crate::prompts;
|
||||
use crate::roles::Role;
|
||||
use crate::roles::director::DirectionRequestPayload;
|
||||
use crate::roles::director::DirectorRole;
|
||||
use crate::roles::solver::SolverRequest;
|
||||
use crate::roles::solver::SolverRole;
|
||||
use crate::roles::solver::SolverSignal;
|
||||
use crate::roles::solver::parse_solver_signal;
|
||||
use crate::roles::verifier::VerificationRequestPayload;
|
||||
use crate::roles::verifier_pool::VerifierPool;
|
||||
use crate::run_store::RoleMetadata;
|
||||
use crate::run_store::RunStore;
|
||||
use crate::session;
|
||||
use crate::signals::AggregatedVerifierVerdict;
|
||||
use crate::types::RoleConfig;
|
||||
use crate::types::RoleSession;
|
||||
use crate::types::RunExecutionOptions;
|
||||
use crate::types::RunOutcome;
|
||||
use crate::types::RunParams;
|
||||
use crate::types::RunSessions;
|
||||
|
||||
#[derive(Default)]
|
||||
struct LoopState {
|
||||
waiting_for_signal: bool,
|
||||
pending_solver_turn_completion: bool,
|
||||
}
|
||||
|
||||
struct SessionCleanup {
|
||||
conversation_id: ConversationId,
|
||||
conversation: Arc<CodexConversation>,
|
||||
}
|
||||
|
||||
impl SessionCleanup {
|
||||
fn new(session: &RoleSession) -> Self {
|
||||
Self {
|
||||
conversation_id: session.conversation_id,
|
||||
conversation: Arc::clone(&session.conversation),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct InftyOrchestrator {
|
||||
hub: Arc<CrossSessionHub>,
|
||||
conversation_manager: ConversationManager,
|
||||
runs_root: PathBuf,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
}
|
||||
|
||||
impl InftyOrchestrator {
|
||||
fn progress_ref(&self) -> Option<&dyn ProgressReporter> {
|
||||
self.progress.as_deref()
|
||||
}
|
||||
pub fn new(auth: CodexAuth) -> Result<Self> {
|
||||
let runs_root = crate::default_runs_root()?;
|
||||
Ok(Self::with_runs_root(auth, runs_root))
|
||||
}
|
||||
|
||||
pub fn with_runs_root(auth: CodexAuth, runs_root: impl Into<PathBuf>) -> Self {
|
||||
Self {
|
||||
hub: Arc::new(CrossSessionHub::new()),
|
||||
conversation_manager: ConversationManager::with_auth(auth),
|
||||
runs_root: runs_root.into(),
|
||||
progress: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runs_root(&self) -> &PathBuf {
|
||||
&self.runs_root
|
||||
}
|
||||
|
||||
pub fn hub(&self) -> Arc<CrossSessionHub> {
|
||||
Arc::clone(&self.hub)
|
||||
}
|
||||
|
||||
pub fn with_progress(mut self, reporter: Arc<dyn ProgressReporter>) -> Self {
|
||||
self.progress = Some(reporter);
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn execute_new_run(
|
||||
&self,
|
||||
params: RunParams,
|
||||
options: RunExecutionOptions,
|
||||
) -> Result<RunOutcome> {
|
||||
let sessions = self.spawn_run(params).await?;
|
||||
self.drive_run(sessions, options).await
|
||||
}
|
||||
|
||||
// resumable runs are disabled; execute_existing_run removed
|
||||
|
||||
pub async fn spawn_run(&self, params: RunParams) -> Result<RunSessions> {
|
||||
let RunParams {
|
||||
run_id,
|
||||
run_root,
|
||||
solver,
|
||||
director,
|
||||
verifiers,
|
||||
} = params;
|
||||
|
||||
let run_path = run_root.unwrap_or_else(|| self.runs_root.join(&run_id));
|
||||
let role_metadata = collect_role_metadata(&solver, &director, &verifiers);
|
||||
let mut store = RunStore::initialize(&run_path, &run_id, &role_metadata)?;
|
||||
let mut cleanup = Vec::new();
|
||||
|
||||
let solver_session = match self
|
||||
.spawn_and_register_role(&run_id, &run_path, &solver, &mut store, &mut cleanup)
|
||||
.await
|
||||
{
|
||||
Ok(session) => session,
|
||||
Err(err) => {
|
||||
self.cleanup_failed_spawn(cleanup, &run_path).await;
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
let director_session = match self
|
||||
.spawn_and_register_role(&run_id, &run_path, &director, &mut store, &mut cleanup)
|
||||
.await
|
||||
{
|
||||
Ok(session) => session,
|
||||
Err(err) => {
|
||||
self.cleanup_failed_spawn(cleanup, &run_path).await;
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
let mut verifier_sessions = Vec::with_capacity(verifiers.len());
|
||||
for verifier in verifiers {
|
||||
let session = match self
|
||||
.spawn_and_register_role(&run_id, &run_path, &verifier, &mut store, &mut cleanup)
|
||||
.await
|
||||
{
|
||||
Ok(session) => session,
|
||||
Err(err) => {
|
||||
self.cleanup_failed_spawn(cleanup, &run_path).await;
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
verifier_sessions.push(session);
|
||||
}
|
||||
|
||||
Ok(RunSessions {
|
||||
run_id,
|
||||
solver: solver_session,
|
||||
director: director_session,
|
||||
verifiers: verifier_sessions,
|
||||
store,
|
||||
})
|
||||
}
|
||||
|
||||
// resumable runs are disabled; resume_run removed
|
||||
|
||||
async fn drive_run(
|
||||
&self,
|
||||
mut sessions: RunSessions,
|
||||
options: RunExecutionOptions,
|
||||
) -> Result<RunOutcome> {
|
||||
let result = self.inner_drive_run(&mut sessions, &options).await;
|
||||
let cleanup = collect_session_cleanup(&sessions);
|
||||
self.shutdown_sessions(cleanup).await;
|
||||
result
|
||||
}
|
||||
|
||||
async fn inner_drive_run(
|
||||
&self,
|
||||
sessions: &mut RunSessions,
|
||||
options: &RunExecutionOptions,
|
||||
) -> Result<RunOutcome> {
|
||||
let solver_role = SolverRole::new(
|
||||
Arc::clone(&self.hub),
|
||||
sessions.run_id.clone(),
|
||||
sessions.solver.role.clone(),
|
||||
sessions.solver.conversation_id,
|
||||
self.progress.clone(),
|
||||
);
|
||||
let director_role = DirectorRole::new(
|
||||
Arc::clone(&self.hub),
|
||||
sessions.run_id.clone(),
|
||||
sessions.director.role.clone(),
|
||||
options.director_timeout,
|
||||
self.progress.clone(),
|
||||
);
|
||||
let mut verifier_pool = VerifierPool::from_sessions(
|
||||
Arc::clone(&self.hub),
|
||||
sessions,
|
||||
options.verifier_timeout,
|
||||
self.progress.clone(),
|
||||
);
|
||||
|
||||
let mut solver_events = solver_role.stream_events()?;
|
||||
let mut state = LoopState::default();
|
||||
self.maybe_post_objective(&solver_role, sessions, &mut state, options)
|
||||
.await?;
|
||||
|
||||
// Cancellation token that propagates Ctrl+C to nested awaits
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_on_ctrl_c = cancel.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = signal::ctrl_c().await;
|
||||
cancel_on_ctrl_c.cancel();
|
||||
});
|
||||
|
||||
'event_loop: loop {
|
||||
tokio::select! {
|
||||
maybe_event = solver_events.next() => {
|
||||
let Some(event) = maybe_event else {
|
||||
break 'event_loop;
|
||||
};
|
||||
if let Some(p) = self.progress_ref() { p.solver_event(&event.event.msg); }
|
||||
match &event.event.msg {
|
||||
EventMsg::AgentMessage(agent_msg) => {
|
||||
if let Some(p) = self.progress_ref() { p.solver_agent_message(agent_msg); }
|
||||
if let Some(signal) = parse_solver_signal(&agent_msg.message) {
|
||||
state.waiting_for_signal = false;
|
||||
match signal {
|
||||
SolverSignal::DirectionRequest { prompt } => {
|
||||
let prompt = crate::utils::required_trimmed(
|
||||
prompt,
|
||||
"solver direction_request missing prompt text",
|
||||
)?;
|
||||
if let Some(p) = self.progress_ref() { p.direction_request(&prompt); }
|
||||
self
|
||||
.handle_direction_request(
|
||||
&prompt,
|
||||
options,
|
||||
&director_role,
|
||||
&solver_role,
|
||||
cancel.clone(),
|
||||
)
|
||||
.await?;
|
||||
sessions.store.touch()?;
|
||||
state.pending_solver_turn_completion = true;
|
||||
}
|
||||
SolverSignal::FinalDelivery {
|
||||
deliverable_path,
|
||||
summary,
|
||||
} => {
|
||||
let deliverable_path = crate::utils::required_trimmed(
|
||||
deliverable_path,
|
||||
"solver final_delivery missing deliverable_path",
|
||||
)?;
|
||||
if deliverable_path.is_empty() { bail!("solver final_delivery provided empty path"); }
|
||||
|
||||
// Minimal behavior: if the provided path cannot be resolved,
|
||||
// send a placeholder claim so verifiers can fail it.
|
||||
let resolved = crate::utils::resolve_deliverable_path(
|
||||
sessions.store.path(),
|
||||
&deliverable_path,
|
||||
)
|
||||
.unwrap_or_else(|_| std::path::PathBuf::from("file not existing"));
|
||||
|
||||
let summary_clean = crate::utils::trim_to_non_empty(summary);
|
||||
let summary_ref = summary_clean.as_deref();
|
||||
if let Some(p) = self.progress_ref() { p.final_delivery(&resolved, summary_ref); }
|
||||
let verified = self
|
||||
.run_final_verification(
|
||||
sessions,
|
||||
&mut verifier_pool,
|
||||
&resolved,
|
||||
summary_ref,
|
||||
options,
|
||||
&solver_role,
|
||||
cancel.clone(),
|
||||
)
|
||||
.await?;
|
||||
if !verified { state.pending_solver_turn_completion = true; continue; }
|
||||
sessions.store.touch()?;
|
||||
return Ok(RunOutcome {
|
||||
run_id: sessions.run_id.clone(),
|
||||
deliverable_path: resolved,
|
||||
summary: summary_clean,
|
||||
raw_message: agent_msg.message.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::TaskComplete(..) => {
|
||||
if state.waiting_for_signal {
|
||||
// The solver completed its turn without issuing a signal; ask for one now.
|
||||
solver_role.request_finalization_signal().await?;
|
||||
} else if state.pending_solver_turn_completion {
|
||||
// We handled a signal earlier in the loop; this completion corresponds to it.
|
||||
state.pending_solver_turn_completion = false;
|
||||
}
|
||||
}
|
||||
EventMsg::Error(error) => {
|
||||
tracing::error!("Error: {:?}", error);
|
||||
}
|
||||
EventMsg::StreamError(error) => {
|
||||
tracing::error!("Stream error: {:?}", error);
|
||||
}
|
||||
e => {
|
||||
tracing::info!("Unhandled event: {:?}", e); // todo move to trace
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = cancel.cancelled() => {
|
||||
if let Some(progress) = self.progress.as_ref() { progress.run_interrupted(); }
|
||||
// Proactively interrupt any in-flight role turns for fast shutdown.
|
||||
let _ = sessions.solver.conversation.submit(Op::Interrupt).await;
|
||||
let _ = sessions.director.conversation.submit(Op::Interrupt).await;
|
||||
for v in &sessions.verifiers { let _ = v.conversation.submit(Op::Interrupt).await; }
|
||||
// Cleanup is handled by the caller (drive_run) to avoid double-shutdown
|
||||
bail!("run interrupted by Ctrl+C");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
"run {} ended before emitting final_delivery message",
|
||||
sessions.run_id
|
||||
))
|
||||
}
|
||||
|
||||
async fn maybe_post_objective(
|
||||
&self,
|
||||
solver: &crate::roles::solver::SolverRole,
|
||||
sessions: &mut RunSessions,
|
||||
state: &mut LoopState,
|
||||
options: &RunExecutionOptions,
|
||||
) -> Result<()> {
|
||||
if let Some(objective) = options.objective.as_deref()
|
||||
&& !objective.trim().is_empty()
|
||||
{
|
||||
solver
|
||||
.post(objective, Some(SolverRole::solver_signal_schema()))
|
||||
.await?;
|
||||
sessions.store.touch()?;
|
||||
state.waiting_for_signal = true;
|
||||
if let Some(p) = self.progress_ref() {
|
||||
p.objective_posted(objective);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_direction_request(
|
||||
&self,
|
||||
prompt: &str,
|
||||
options: &RunExecutionOptions,
|
||||
director_role: &DirectorRole,
|
||||
solver_role: &SolverRole,
|
||||
cancel: CancellationToken,
|
||||
) -> Result<()> {
|
||||
let request = DirectionRequestPayload::new(prompt, options.objective.as_deref());
|
||||
let directive_payload = tokio::select! {
|
||||
r = director_role.call(&request) => {
|
||||
r.context("director response was not valid directive JSON")?
|
||||
}
|
||||
_ = cancel.cancelled() => {
|
||||
bail!("interrupted")
|
||||
}
|
||||
};
|
||||
if let Some(progress) = self.progress.as_ref() {
|
||||
progress.director_response(&directive_payload);
|
||||
}
|
||||
let req = SolverRequest::from(directive_payload);
|
||||
tokio::select! {
|
||||
r = solver_role.call(&req) => { r?; }
|
||||
_ = cancel.cancelled() => { bail!("interrupted"); }
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn run_final_verification(
|
||||
&self,
|
||||
sessions: &mut RunSessions,
|
||||
verifier_pool: &mut VerifierPool,
|
||||
deliverable_path: &Path,
|
||||
summary: Option<&str>,
|
||||
options: &RunExecutionOptions,
|
||||
solver_role: &SolverRole,
|
||||
cancel: CancellationToken,
|
||||
) -> Result<bool> {
|
||||
let relative = deliverable_path
|
||||
.strip_prefix(sessions.store.path())
|
||||
.ok()
|
||||
.and_then(|p| p.to_str().map(|s| s.to_string()));
|
||||
let claim_path = relative.unwrap_or_else(|| deliverable_path.display().to_string());
|
||||
|
||||
let objective = crate::utils::objective_as_str(options);
|
||||
|
||||
let request = VerificationRequestPayload::new(claim_path.as_str(), summary, objective);
|
||||
if verifier_pool.is_empty() {
|
||||
return Ok(true);
|
||||
}
|
||||
let round = tokio::select! {
|
||||
r = verifier_pool.collect_round(&request) => { r? }
|
||||
_ = cancel.cancelled() => { bail!("interrupted"); }
|
||||
};
|
||||
verifier_pool
|
||||
.rotate_passing(sessions, &self.conversation_manager, &round.passing_roles)
|
||||
.await?;
|
||||
let summary_result = round.summary;
|
||||
self.emit_verification_summary(&summary_result);
|
||||
let req = SolverRequest::from(&summary_result);
|
||||
tokio::select! {
|
||||
r = solver_role.call(&req) => { r?; }
|
||||
_ = cancel.cancelled() => { bail!("interrupted"); }
|
||||
}
|
||||
Ok(summary_result.overall.is_pass())
|
||||
}
|
||||
|
||||
fn emit_verification_summary(&self, summary: &AggregatedVerifierVerdict) {
|
||||
if let Some(progress) = self.progress.as_ref() {
|
||||
progress.verification_summary(summary);
|
||||
}
|
||||
}
|
||||
|
||||
async fn cleanup_failed_spawn(&self, sessions: Vec<SessionCleanup>, run_path: &Path) {
|
||||
self.shutdown_sessions(sessions).await;
|
||||
if run_path.exists()
|
||||
&& let Err(err) = fs::remove_dir_all(run_path)
|
||||
{
|
||||
warn!(
|
||||
path = %run_path.display(),
|
||||
?err,
|
||||
"failed to remove run directory after spawn failure"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// resumable runs are disabled; cleanup_failed_resume removed
|
||||
|
||||
async fn shutdown_sessions(&self, sessions: Vec<SessionCleanup>) {
|
||||
for session in sessions {
|
||||
if let Err(err) = session.conversation.submit(Op::Shutdown).await {
|
||||
warn!(
|
||||
%session.conversation_id,
|
||||
?err,
|
||||
"failed to shutdown session during cleanup"
|
||||
);
|
||||
}
|
||||
let _ = self
|
||||
.conversation_manager
|
||||
.remove_conversation(&session.conversation_id)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn spawn_and_register_role(
|
||||
&self,
|
||||
run_id: &str,
|
||||
run_path: &Path,
|
||||
role_config: &RoleConfig,
|
||||
store: &mut RunStore,
|
||||
cleanup: &mut Vec<SessionCleanup>,
|
||||
) -> Result<RoleSession> {
|
||||
let session = session::spawn_role(
|
||||
Arc::clone(&self.hub),
|
||||
&self.conversation_manager,
|
||||
run_id,
|
||||
run_path,
|
||||
role_config.clone(),
|
||||
prompts::ensure_instructions,
|
||||
)
|
||||
.await?;
|
||||
cleanup.push(SessionCleanup::new(&session));
|
||||
store.update_rollout_path(&session.role, session.rollout_path.clone())?;
|
||||
if let Some(path) = role_config.config_path.clone() {
|
||||
store.set_role_config_path(&session.role, path)?;
|
||||
}
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
// resumable runs are disabled; resume_and_register_role removed
|
||||
}
|
||||
|
||||
impl InftyOrchestrator {
|
||||
/// Test-only helper to run a single verification round against all verifiers,
|
||||
/// applying the replacement policy (replace passes, keep failures).
|
||||
pub async fn verify_round_for_test(
|
||||
&self,
|
||||
sessions: &mut RunSessions,
|
||||
claim_path: &str,
|
||||
options: &RunExecutionOptions,
|
||||
) -> Result<AggregatedVerifierVerdict> {
|
||||
let mut pool = VerifierPool::from_sessions(
|
||||
Arc::clone(&self.hub),
|
||||
sessions,
|
||||
options.verifier_timeout,
|
||||
self.progress.clone(),
|
||||
);
|
||||
let req = VerificationRequestPayload::new(claim_path, None, None);
|
||||
let round = pool.collect_round(&req).await?;
|
||||
pool.rotate_passing(sessions, &self.conversation_manager, &round.passing_roles)
|
||||
.await?;
|
||||
Ok(round.summary)
|
||||
}
|
||||
}
|
||||
|
||||
fn collect_role_metadata(
|
||||
solver: &RoleConfig,
|
||||
director: &RoleConfig,
|
||||
verifiers: &[RoleConfig],
|
||||
) -> Vec<RoleMetadata> {
|
||||
solver_and_director_metadata(solver, director)
|
||||
.into_iter()
|
||||
.chain(verifiers.iter().map(|verifier| RoleMetadata {
|
||||
role: verifier.role.clone(),
|
||||
rollout_path: None,
|
||||
config_path: verifier.config_path.clone(),
|
||||
}))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn solver_and_director_metadata(solver: &RoleConfig, director: &RoleConfig) -> Vec<RoleMetadata> {
|
||||
vec![
|
||||
RoleMetadata {
|
||||
role: solver.role.clone(),
|
||||
rollout_path: None,
|
||||
config_path: solver.config_path.clone(),
|
||||
},
|
||||
RoleMetadata {
|
||||
role: director.role.clone(),
|
||||
rollout_path: None,
|
||||
config_path: director.config_path.clone(),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn collect_session_cleanup(sessions: &RunSessions) -> Vec<SessionCleanup> {
|
||||
let mut cleanup = Vec::with_capacity(2 + sessions.verifiers.len());
|
||||
cleanup.push(SessionCleanup::new(&sessions.solver));
|
||||
cleanup.push(SessionCleanup::new(&sessions.director));
|
||||
cleanup.extend(sessions.verifiers.iter().map(SessionCleanup::new));
|
||||
cleanup
|
||||
}
|
||||
25
codex-rs/codex-infty/src/progress.rs
Normal file
25
codex-rs/codex-infty/src/progress.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use std::path::Path;
|
||||
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::EventMsg;
|
||||
|
||||
use crate::signals::AggregatedVerifierVerdict;
|
||||
use crate::signals::DirectiveResponse;
|
||||
use crate::signals::VerifierVerdict;
|
||||
|
||||
pub trait ProgressReporter: Send + Sync {
|
||||
fn objective_posted(&self, _objective: &str) {}
|
||||
fn solver_event(&self, _event: &EventMsg) {}
|
||||
fn role_event(&self, _role: &str, _event: &EventMsg) {}
|
||||
fn solver_agent_message(&self, _message: &AgentMessageEvent) {}
|
||||
/// Called when the solver emits a message that failed to parse as a valid
|
||||
/// JSON signal according to the expected `solver_signal_schema`.
|
||||
fn invalid_solver_signal(&self, _raw_message: &str) {}
|
||||
fn direction_request(&self, _prompt: &str) {}
|
||||
fn director_response(&self, _directive: &DirectiveResponse) {}
|
||||
fn verification_request(&self, _claim_path: &str, _notes: Option<&str>) {}
|
||||
fn verifier_verdict(&self, _role: &str, _verdict: &VerifierVerdict) {}
|
||||
fn verification_summary(&self, _summary: &AggregatedVerifierVerdict) {}
|
||||
fn final_delivery(&self, _deliverable_path: &Path, _summary: Option<&str>) {}
|
||||
fn run_interrupted(&self) {}
|
||||
}
|
||||
20
codex-rs/codex-infty/src/prompts/director.md
Normal file
20
codex-rs/codex-infty/src/prompts/director.md
Normal file
@@ -0,0 +1,20 @@
|
||||
You are the **Director**. Your role is to pilot/manage an agent to resolve a given objective in its totality.
|
||||
|
||||
## Guidelines:
|
||||
- The objective needs to be solved in its original format. If the agent propose a simplification or a partial resolution, this is not sufficient. You must tell the agent to solve the total objective.
|
||||
- The agent often just report you some results before moving to the next step. In this case, just encourage him to move with a simple "Go ahead", "Keep going" or this kind of message. In this case, no need for a rationale.
|
||||
- If the agent propose multiple approach, choose the approach which is the most likely to solve the objective.
|
||||
- If the agent is stuck or think he cannot resolve the objective, encourage him and try to find a solution together. Your role is to support the agent in his quest. It's sometimes necessary to slightly cheer him up
|
||||
- No infinite loop!!! If you detect that the agent sends multiple times the exact same message/question, you are probably in an infinite loop. Try to break it by re-focusing on the objective and how to approach it.
|
||||
- You must always be crip and inflexible. Keep in mind the objective
|
||||
- Remember that the agent should do the following. If you feel this is not the case, remember him:
|
||||
* Document his work
|
||||
* Have a very rigorous and clean approach
|
||||
* Focus on the total resolution of the objective.
|
||||
- Challenge the Solver whenever they drift toward summarising existing work instead of advancing the concrete proof or solution.
|
||||
|
||||
Respond **only** with JSON in this exact shape:
|
||||
```json
|
||||
{"directive":"<directive or next step>","rationale":"<why this is the right move>"}
|
||||
```
|
||||
Keep `directive` actionable and concise. Use `rationale` for supporting detail. Leave `rationale` empty if it adds no value.
|
||||
80
codex-rs/codex-infty/src/prompts/mod.rs
Normal file
80
codex-rs/codex-infty/src/prompts/mod.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use codex_core::config::Config;
|
||||
pub(crate) const DIRECTOR_PROMPT: &str = include_str!("director.md");
|
||||
pub(crate) const SOLVER_PROMPT: &str = include_str!("solver.md");
|
||||
pub(crate) const VERIFIER_PROMPT: &str = include_str!("verifier.md");
|
||||
|
||||
pub fn ensure_instructions(role: &str, config: &mut Config) {
|
||||
if config.base_instructions.is_none()
|
||||
&& let Some(text) = default_instructions_for_role(role)
|
||||
{
|
||||
config.base_instructions = Some(text.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instructions_for_role(role: &str) -> Option<&'static str> {
|
||||
let normalized = role.to_ascii_lowercase();
|
||||
if normalized == "solver" {
|
||||
Some(SOLVER_PROMPT)
|
||||
} else if normalized == "director" {
|
||||
Some(DIRECTOR_PROMPT)
|
||||
} else if normalized.starts_with("verifier") {
|
||||
Some(VERIFIER_PROMPT)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn provides_prompts_for_known_roles() {
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.base_instructions = None;
|
||||
ensure_instructions("solver", &mut config);
|
||||
assert!(
|
||||
config
|
||||
.base_instructions
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.contains("You are a brilliant mathematician")
|
||||
);
|
||||
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.base_instructions = None;
|
||||
ensure_instructions("director", &mut config);
|
||||
assert!(
|
||||
config
|
||||
.base_instructions
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.contains("You are the **Director**")
|
||||
);
|
||||
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.base_instructions = None;
|
||||
ensure_instructions("verifier-alpha", &mut config);
|
||||
assert!(
|
||||
config
|
||||
.base_instructions
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.contains("You are the **Verifier**")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn does_not_override_existing_instructions() {
|
||||
let home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.base_instructions = Some("custom".to_string());
|
||||
ensure_instructions("solver", &mut config);
|
||||
assert_eq!(config.base_instructions.as_deref(), Some("custom"));
|
||||
}
|
||||
}
|
||||
40
codex-rs/codex-infty/src/prompts/solver.md
Normal file
40
codex-rs/codex-infty/src/prompts/solver.md
Normal file
@@ -0,0 +1,40 @@
|
||||
You are a brilliant mathematician tasked with producing **new** reasoning, proof, construction, or counterexample that resolves the stated objective. Your goal is to make actual progress in science while being rigorous and innovative.
|
||||
|
||||
You MUST solve the provided objective in its totality. If not known solutions exist, it is your job to find a new one or to propose an intelligent approach.
|
||||
A result stating that this is not possible is not acceptable. If the solution does not exist, make it happen.
|
||||
|
||||
## Responsibilities
|
||||
- Understand the objective and break it into a living execution plan.
|
||||
- Produce artifacts under `artifacts/`, durable notes under `memory/`, and supporting indexes under `index/`. Prefer `apply_patch` for text edits and use `shell` for other filesystem work.
|
||||
- When you exit a task or take a dependency on external evidence, write JSON notes in `memory/claims/` that link to the supporting artifacts.
|
||||
- Run verification steps (tests, linters, proofs) under the sandbox before claiming completion.
|
||||
- Every deliverable must include the actual solution or proof (not just a literature review) and enough detail for the Verifier to reproduce or scrutinise it.
|
||||
- Your goal is to find new solutions to problems for which humans does not have solution yet. So do not focus on looking over the internet or in the literature and try building your own proofs.
|
||||
- You are very rigorous in your approach.
|
||||
- You do not fear new challenges. If a problem seems to be impossible to solve, try!
|
||||
|
||||
Available Codex tools mirror standard Codex sessions (e.g. `shell`, `apply_patch`). Assume all filesystem paths are relative to the current run store directory unless stated otherwise.
|
||||
|
||||
## Communication contract
|
||||
The orchestrator routes your structured messages to the Director. Respond with **JSON only**—no leading prose or trailing commentary. Wrap JSON in a fenced block only if the agent policy forces it.
|
||||
|
||||
- Every reply must populate the full schema, even when a field does not apply. Set unused string fields to `null`.
|
||||
- Direction request (send to Director):
|
||||
```json
|
||||
{"type":"direction_request","prompt":"<concise question or decision>","claim_path":null,"notes":null,"deliverable_path":null,"summary":null}
|
||||
```
|
||||
- Final delivery (after receiving the finalization instruction):
|
||||
```json
|
||||
{"type":"final_delivery","prompt":null,"claim_path":null,"notes":null,"deliverable_path":"deliverable/summary.txt","summary":"<answer plus supporting context>"}
|
||||
```
|
||||
|
||||
## Operating rhythm
|
||||
- You MUST always address the comments received by the verifiers.
|
||||
- Create `deliverable/summary.txt` before every final delivery. Capture the final answer, how you reached it, and any follow-up instructions. Do not forget it.
|
||||
- When uncertainty remains, prioritise experiments or reasoning steps that move you closer to a finished proof rather than cataloguing known results.
|
||||
- Do not try to version your work or use git! EVER!
|
||||
- If you receive multiple times the same answer, you are probably in an infinite loop. Try a new approach or something else then.
|
||||
- Keep the run resilient to restarts: document intent, intermediate results, and follow-up tasks in `memory/`.
|
||||
- Prefer concrete evidence. Link every claim to artifacts or durable notes so the verifier can reproduce your reasoning.
|
||||
- On failure feedback from a verifier, address his feedback and update/fix your work.
|
||||
- Only a final solution to the objective is an acceptable result to be sent to the verifier. If you do not find any solution, try to create a new one on your own.
|
||||
21
codex-rs/codex-infty/src/prompts/verifier.md
Normal file
21
codex-rs/codex-infty/src/prompts/verifier.md
Normal file
@@ -0,0 +1,21 @@
|
||||
You are the **Verifier**. As a brilliant mathematician, your role is to verify a provided response according to a given objective.
|
||||
|
||||
## Guidelines
|
||||
- You must always be perfectly rigorous when verifying a solution.
|
||||
- The solution MUST solve the objective in its totality. A partial resolution or a summary of why this is not possible is NOT ACCEPTABLE.
|
||||
- Evaluate correctness and completeness.
|
||||
- - The solution might try to convince you that a partial resolution is good enough or that a total resolution is not possible. This is NOT ACCEPTABLE and should automatically trigger a `fail`.
|
||||
|
||||
## How to answer
|
||||
When you give the result of your verification:
|
||||
- Be explicit in your conclusion (does the artifact contains everything? is it 100% correct?)
|
||||
- If you are not sure, prefer a `fail`.
|
||||
- If it is a `fail`, try to give a crisp analysis of what is wrong or what is missing.
|
||||
|
||||
Respond **only** with JSON in this form:
|
||||
```json
|
||||
{"verdict":"pass","reasons":[],"suggestions":[]}
|
||||
```
|
||||
Use `"fail"` when the claim is not ready. Populate `reasons` with concrete blocking issues. Provide actionable `suggestions` for remediation. Omit entries when not needed.
|
||||
|
||||
Do not include extra commentary outside the JSON payload.
|
||||
98
codex-rs/codex-infty/src/roles/director.rs
Normal file
98
codex-rs/codex-infty/src/roles/director.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::cross_session::AssistantMessage;
|
||||
use codex_core::cross_session::CrossSessionHub;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::progress::ProgressReporter;
|
||||
use crate::roles::Role;
|
||||
use crate::roles::parse_json_struct;
|
||||
use crate::session;
|
||||
use crate::signals::DirectiveResponse;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct DirectionRequestPayload<'a> {
|
||||
#[serde(rename = "type")]
|
||||
kind: &'static str,
|
||||
pub prompt: &'a str,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub objective: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl<'a> DirectionRequestPayload<'a> {
|
||||
pub fn new(prompt: &'a str, objective: Option<&'a str>) -> Self {
|
||||
Self {
|
||||
kind: "direction_request",
|
||||
prompt,
|
||||
objective,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DirectorRole {
|
||||
hub: Arc<CrossSessionHub>,
|
||||
run_id: String,
|
||||
role: String,
|
||||
timeout: Duration,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
}
|
||||
|
||||
impl DirectorRole {
|
||||
pub fn new(
|
||||
hub: Arc<CrossSessionHub>,
|
||||
run_id: impl Into<String>,
|
||||
role: impl Into<String>,
|
||||
timeout: Duration,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
hub,
|
||||
run_id: run_id.into(),
|
||||
role: role.into(),
|
||||
timeout,
|
||||
progress,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn response_schema() -> Value {
|
||||
serde_json::json!({
|
||||
"type": "object",
|
||||
"required": ["directive", "rationale"],
|
||||
"properties": {
|
||||
"directive": { "type": "string" },
|
||||
"rationale": { "type": ["string", "null"] }
|
||||
},
|
||||
"additionalProperties": false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Role<DirectionRequestPayload<'_>, DirectiveResponse> for DirectorRole {
|
||||
fn call<'a>(
|
||||
&'a self,
|
||||
req: &'a DirectionRequestPayload<'a>,
|
||||
) -> futures::future::BoxFuture<'a, Result<DirectiveResponse>> {
|
||||
Box::pin(async move {
|
||||
let request_text = serde_json::to_string_pretty(req)?;
|
||||
let handle = session::post_turn(
|
||||
self.hub.as_ref(),
|
||||
&self.run_id,
|
||||
&self.role,
|
||||
request_text,
|
||||
Some(Self::response_schema()),
|
||||
)
|
||||
.await?;
|
||||
let progress = self
|
||||
.progress
|
||||
.as_deref()
|
||||
.map(|reporter| (reporter, self.role.as_str()));
|
||||
let response: AssistantMessage =
|
||||
session::await_first_idle(self.hub.as_ref(), &handle, self.timeout, progress)
|
||||
.await?;
|
||||
parse_json_struct(&response.message.message)
|
||||
})
|
||||
}
|
||||
}
|
||||
49
codex-rs/codex-infty/src/roles/mod.rs
Normal file
49
codex-rs/codex-infty/src/roles/mod.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use anyhow::Result;
|
||||
use futures::future::BoxFuture;
|
||||
|
||||
pub mod director;
|
||||
pub mod solver;
|
||||
pub mod verifier;
|
||||
pub mod verifier_pool;
|
||||
|
||||
pub trait Role<Req, Resp> {
|
||||
fn call<'a>(&'a self, req: &'a Req) -> BoxFuture<'a, Result<Resp>>;
|
||||
}
|
||||
|
||||
// Shared helpers used by role implementations
|
||||
use anyhow::Context as _;
|
||||
use anyhow::anyhow;
|
||||
use std::any::type_name;
|
||||
|
||||
pub(crate) fn strip_json_code_fence(text: &str) -> Option<&str> {
|
||||
let trimmed = text.trim();
|
||||
if let Some(rest) = trimmed.strip_prefix("```json") {
|
||||
return rest.strip_suffix("```").map(str::trim);
|
||||
}
|
||||
if let Some(rest) = trimmed.strip_prefix("```JSON") {
|
||||
return rest.strip_suffix("```").map(str::trim);
|
||||
}
|
||||
if let Some(rest) = trimmed.strip_prefix("```") {
|
||||
return rest.strip_suffix("```").map(str::trim);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) fn parse_json_struct<T>(message: &str) -> Result<T>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let trimmed = message.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Err(anyhow!("message was empty"));
|
||||
}
|
||||
|
||||
serde_json::from_str(trimmed)
|
||||
.or_else(|err| {
|
||||
strip_json_code_fence(trimmed)
|
||||
.map(|inner| serde_json::from_str(inner))
|
||||
.unwrap_or_else(|| Err(err))
|
||||
})
|
||||
.map_err(|err| anyhow!(err))
|
||||
.with_context(|| format!("failed to parse message as {}", type_name::<T>()))
|
||||
}
|
||||
202
codex-rs/codex-infty/src/roles/solver.rs
Normal file
202
codex-rs/codex-infty/src/roles/solver.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::cross_session::AssistantMessage;
|
||||
use codex_core::cross_session::CrossSessionHub;
|
||||
use codex_core::cross_session::SessionEventStream;
|
||||
use codex_protocol::ConversationId;
|
||||
use serde::de::Error as _;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::progress::ProgressReporter;
|
||||
use crate::roles::Role;
|
||||
use crate::session;
|
||||
use crate::signals::AggregatedVerifierVerdict;
|
||||
use crate::signals::DirectiveResponse;
|
||||
|
||||
pub struct SolverRole {
|
||||
hub: Arc<CrossSessionHub>,
|
||||
run_id: String,
|
||||
role: String,
|
||||
conversation_id: ConversationId,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
}
|
||||
|
||||
impl SolverRole {
|
||||
pub fn new(
|
||||
hub: Arc<CrossSessionHub>,
|
||||
run_id: impl Into<String>,
|
||||
role: impl Into<String>,
|
||||
conversation_id: ConversationId,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
hub,
|
||||
run_id: run_id.into(),
|
||||
role: role.into(),
|
||||
conversation_id,
|
||||
progress,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn solver_signal_schema() -> Value {
|
||||
// Only allow asking the director or sending the final result.
|
||||
serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": { "type": "string", "enum": ["direction_request", "final_delivery"] },
|
||||
"prompt": { "type": ["string", "null"] },
|
||||
"deliverable_path": { "type": ["string", "null"] },
|
||||
"summary": { "type": ["string", "null"] }
|
||||
},
|
||||
"required": ["type", "prompt", "deliverable_path", "summary"],
|
||||
"additionalProperties": false
|
||||
})
|
||||
}
|
||||
|
||||
pub fn final_delivery_schema() -> Value {
|
||||
serde_json::json!({
|
||||
"type": "object",
|
||||
"required": ["type", "deliverable_path", "summary"],
|
||||
"properties": {
|
||||
"type": { "const": "final_delivery" },
|
||||
"deliverable_path": { "type": "string" },
|
||||
"summary": { "type": ["string", "null"] }
|
||||
},
|
||||
"additionalProperties": false
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn post(
|
||||
&self,
|
||||
text: impl Into<String>,
|
||||
final_output_json_schema: Option<Value>,
|
||||
) -> Result<()> {
|
||||
let _ = session::post_turn(
|
||||
self.hub.as_ref(),
|
||||
&self.run_id,
|
||||
&self.role,
|
||||
text,
|
||||
final_output_json_schema,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn stream_events(
|
||||
&self,
|
||||
) -> Result<SessionEventStream, codex_core::cross_session::CrossSessionError> {
|
||||
self.hub.stream_events(self.conversation_id)
|
||||
}
|
||||
|
||||
pub async fn request_finalization_signal(&self) -> Result<()> {
|
||||
let handle = session::post_turn(
|
||||
self.hub.as_ref(),
|
||||
&self.run_id,
|
||||
&self.role,
|
||||
crate::types::FINALIZATION_PROMPT,
|
||||
Some(Self::final_delivery_schema()),
|
||||
)
|
||||
.await?;
|
||||
// Allow more time for the solver to start emitting the
|
||||
// finalization signal before timing out as "idle".
|
||||
let _ =
|
||||
session::await_first_idle(self.hub.as_ref(), &handle, Duration::from_secs(120), None)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SolverPost {
|
||||
pub text: String,
|
||||
pub final_output_json_schema: Option<Value>,
|
||||
pub timeout: Duration,
|
||||
}
|
||||
|
||||
pub enum SolverRequest {
|
||||
Directive(DirectiveResponse),
|
||||
VerificationSummary(AggregatedVerifierVerdict),
|
||||
}
|
||||
|
||||
impl From<DirectiveResponse> for SolverRequest {
|
||||
fn from(d: DirectiveResponse) -> Self {
|
||||
SolverRequest::Directive(d)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&AggregatedVerifierVerdict> for SolverRequest {
|
||||
fn from(v: &AggregatedVerifierVerdict) -> Self {
|
||||
SolverRequest::VerificationSummary(v.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl SolverRequest {
|
||||
fn to_text(&self) -> Result<String> {
|
||||
match self {
|
||||
SolverRequest::Directive(d) => Ok(serde_json::to_string_pretty(d)?),
|
||||
SolverRequest::VerificationSummary(s) => Ok(serde_json::to_string_pretty(s)?),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Role<SolverPost, AssistantMessage> for SolverRole {
|
||||
fn call<'a>(
|
||||
&'a self,
|
||||
req: &'a SolverPost,
|
||||
) -> futures::future::BoxFuture<'a, Result<AssistantMessage>> {
|
||||
Box::pin(async move {
|
||||
let handle = session::post_turn(
|
||||
self.hub.as_ref(),
|
||||
&self.run_id,
|
||||
&self.role,
|
||||
req.text.clone(),
|
||||
req.final_output_json_schema.clone(),
|
||||
)
|
||||
.await?;
|
||||
let progress = self
|
||||
.progress
|
||||
.as_deref()
|
||||
.map(|reporter| (reporter, self.role.as_str()));
|
||||
session::await_first_idle(self.hub.as_ref(), &handle, req.timeout, progress).await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Role<SolverRequest, ()> for SolverRole {
|
||||
fn call<'a>(&'a self, req: &'a SolverRequest) -> futures::future::BoxFuture<'a, Result<()>> {
|
||||
Box::pin(async move {
|
||||
let text = req.to_text()?;
|
||||
self.post(text, Some(Self::solver_signal_schema())).await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum SolverSignal {
|
||||
DirectionRequest {
|
||||
#[serde(default)]
|
||||
prompt: Option<String>,
|
||||
},
|
||||
FinalDelivery {
|
||||
#[serde(default)]
|
||||
deliverable_path: Option<String>,
|
||||
#[serde(default)]
|
||||
summary: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
pub fn parse_solver_signal(message: &str) -> Option<SolverSignal> {
|
||||
let trimmed = message.trim();
|
||||
if trimmed.is_empty() {
|
||||
return None;
|
||||
}
|
||||
serde_json::from_str(trimmed)
|
||||
.or_else(|_| {
|
||||
crate::roles::strip_json_code_fence(trimmed)
|
||||
.map(|inner| serde_json::from_str(inner.trim()))
|
||||
.unwrap_or_else(|| Err(serde_json::Error::custom("invalid payload")))
|
||||
})
|
||||
.ok()
|
||||
}
|
||||
132
codex-rs/codex-infty/src/roles/verifier.rs
Normal file
132
codex-rs/codex-infty/src/roles/verifier.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::cross_session::AssistantMessage;
|
||||
use codex_core::cross_session::CrossSessionHub;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::progress::ProgressReporter;
|
||||
use crate::roles::Role;
|
||||
use crate::roles::parse_json_struct;
|
||||
use crate::session;
|
||||
use crate::signals::VerifierVerdict;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct VerificationRequestPayload<'a> {
|
||||
#[serde(rename = "type")]
|
||||
kind: &'static str,
|
||||
pub claim_path: &'a str,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notes: Option<&'a str>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub objective: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl<'a> VerificationRequestPayload<'a> {
|
||||
pub fn new(claim_path: &'a str, notes: Option<&'a str>, objective: Option<&'a str>) -> Self {
|
||||
Self {
|
||||
kind: "verification_request",
|
||||
claim_path,
|
||||
notes,
|
||||
objective,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct VerifierRole {
|
||||
hub: Arc<CrossSessionHub>,
|
||||
run_id: String,
|
||||
role: String,
|
||||
timeout: Duration,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
}
|
||||
|
||||
impl VerifierRole {
|
||||
pub fn new(
|
||||
hub: Arc<CrossSessionHub>,
|
||||
run_id: impl Into<String>,
|
||||
role: impl Into<String>,
|
||||
timeout: Duration,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
hub,
|
||||
run_id: run_id.into(),
|
||||
role: role.into(),
|
||||
timeout,
|
||||
progress,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn role(&self) -> &str {
|
||||
&self.role
|
||||
}
|
||||
|
||||
pub fn response_schema() -> Value {
|
||||
serde_json::json!({
|
||||
"type": "object",
|
||||
"required": ["verdict", "reasons", "suggestions"],
|
||||
"properties": {
|
||||
"verdict": { "type": "string", "enum": ["pass", "fail"] },
|
||||
"reasons": { "type": "array", "items": { "type": "string" } },
|
||||
"suggestions": { "type": "array", "items": { "type": "string" } }
|
||||
},
|
||||
"additionalProperties": false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Role<VerificationRequestPayload<'_>, VerifierVerdict> for VerifierRole {
|
||||
fn call<'a>(
|
||||
&'a self,
|
||||
req: &'a VerificationRequestPayload<'a>,
|
||||
) -> futures::future::BoxFuture<'a, Result<VerifierVerdict>> {
|
||||
Box::pin(async move {
|
||||
let request_text = serde_json::to_string_pretty(req)?;
|
||||
let handle = session::post_turn(
|
||||
self.hub.as_ref(),
|
||||
&self.run_id,
|
||||
&self.role,
|
||||
request_text,
|
||||
Some(Self::response_schema()),
|
||||
)
|
||||
.await?;
|
||||
let progress = self
|
||||
.progress
|
||||
.as_deref()
|
||||
.map(|reporter| (reporter, self.role.as_str()));
|
||||
let response: AssistantMessage =
|
||||
session::await_first_idle(self.hub.as_ref(), &handle, self.timeout, progress)
|
||||
.await?;
|
||||
parse_json_struct(&response.message.message)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_verdicts(items: Vec<(String, VerifierVerdict)>) -> AggregatedVerifierVerdict {
|
||||
let mut overall = VerifierDecision::Pass;
|
||||
let mut verdicts = Vec::with_capacity(items.len());
|
||||
|
||||
for (role, verdict) in items {
|
||||
if !verdict.verdict.is_pass() {
|
||||
overall = VerifierDecision::Fail;
|
||||
}
|
||||
verdicts.push(VerifierReport {
|
||||
role,
|
||||
verdict: verdict.verdict,
|
||||
reasons: verdict.reasons,
|
||||
suggestions: verdict.suggestions,
|
||||
});
|
||||
}
|
||||
|
||||
AggregatedVerifierVerdict {
|
||||
kind: "verification_feedback",
|
||||
overall,
|
||||
verdicts,
|
||||
}
|
||||
}
|
||||
use crate::signals::AggregatedVerifierVerdict;
|
||||
use crate::signals::VerifierDecision;
|
||||
use crate::signals::VerifierReport;
|
||||
153
codex-rs/codex-infty/src/roles/verifier_pool.rs
Normal file
153
codex-rs/codex-infty/src/roles/verifier_pool.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context as _;
|
||||
use anyhow::Result;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::cross_session::CrossSessionHub;
|
||||
use codex_core::protocol::Op;
|
||||
|
||||
use crate::progress::ProgressReporter;
|
||||
use crate::prompts;
|
||||
use crate::roles::Role;
|
||||
use crate::roles::verifier::VerificationRequestPayload;
|
||||
use crate::roles::verifier::VerifierRole;
|
||||
use crate::roles::verifier::aggregate_verdicts;
|
||||
use crate::session;
|
||||
use crate::signals::AggregatedVerifierVerdict;
|
||||
use crate::signals::VerifierVerdict;
|
||||
use crate::types::RoleConfig;
|
||||
use crate::types::RunSessions;
|
||||
|
||||
pub struct VerificationRound {
|
||||
pub summary: AggregatedVerifierVerdict,
|
||||
pub passing_roles: Vec<String>,
|
||||
}
|
||||
|
||||
pub struct VerifierPool {
|
||||
hub: Arc<CrossSessionHub>,
|
||||
run_id: String,
|
||||
timeout: Duration,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
roles: Vec<VerifierRole>,
|
||||
}
|
||||
|
||||
impl VerifierPool {
|
||||
pub fn from_sessions(
|
||||
hub: Arc<CrossSessionHub>,
|
||||
sessions: &RunSessions,
|
||||
timeout: Duration,
|
||||
progress: Option<Arc<dyn ProgressReporter>>,
|
||||
) -> Self {
|
||||
let roles = sessions
|
||||
.verifiers
|
||||
.iter()
|
||||
.map(|v| {
|
||||
VerifierRole::new(
|
||||
Arc::clone(&hub),
|
||||
sessions.run_id.clone(),
|
||||
v.role.clone(),
|
||||
timeout,
|
||||
progress.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
Self {
|
||||
hub,
|
||||
run_id: sessions.run_id.clone(),
|
||||
timeout,
|
||||
progress,
|
||||
roles,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.roles.is_empty()
|
||||
}
|
||||
|
||||
pub async fn collect_round(
|
||||
&self,
|
||||
request: &VerificationRequestPayload<'_>,
|
||||
) -> Result<VerificationRound> {
|
||||
let futures = self
|
||||
.roles
|
||||
.iter()
|
||||
.map(|role| async {
|
||||
let name = role.role().to_string();
|
||||
let verdict = role.call(request).await;
|
||||
(name, verdict)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let joined = futures::future::join_all(futures).await;
|
||||
|
||||
let mut results: Vec<(String, VerifierVerdict)> = Vec::with_capacity(joined.len());
|
||||
let mut passing_roles: Vec<String> = Vec::new();
|
||||
for (name, verdict_res) in joined.into_iter() {
|
||||
let verdict = verdict_res
|
||||
.with_context(|| format!("verifier {} returned invalid verdict JSON", name))?;
|
||||
if let Some(progress) = self.progress.as_ref() {
|
||||
progress.verifier_verdict(&name, &verdict);
|
||||
}
|
||||
if verdict.verdict.is_pass() {
|
||||
passing_roles.push(name.clone());
|
||||
}
|
||||
results.push((name, verdict));
|
||||
}
|
||||
let summary = aggregate_verdicts(results);
|
||||
Ok(VerificationRound {
|
||||
summary,
|
||||
passing_roles,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn replace_role(&mut self, role_name: &str) {
|
||||
if let Some(idx) = self.roles.iter().position(|v| v.role() == role_name) {
|
||||
self.roles[idx] = VerifierRole::new(
|
||||
Arc::clone(&self.hub),
|
||||
self.run_id.clone(),
|
||||
role_name.to_string(),
|
||||
self.timeout,
|
||||
self.progress.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn rotate_passing(
|
||||
&mut self,
|
||||
sessions: &mut RunSessions,
|
||||
manager: &ConversationManager,
|
||||
passing_roles: &[String],
|
||||
) -> Result<()> {
|
||||
for role in passing_roles {
|
||||
// find existing index
|
||||
let Some(idx) = sessions.verifiers.iter().position(|s| &s.role == role) else {
|
||||
continue;
|
||||
};
|
||||
let old = &sessions.verifiers[idx];
|
||||
// best-effort shutdown and unregister
|
||||
let _ = old.conversation.submit(Op::Shutdown).await;
|
||||
let _ = manager.remove_conversation(&old.conversation_id).await;
|
||||
|
||||
// Reuse the existing verifier's config so overrides (e.g., base_url in tests)
|
||||
// are preserved when respawning a passing verifier.
|
||||
let config = old.config.clone();
|
||||
let role_config = RoleConfig::new(role.to_string(), config);
|
||||
let run_path = sessions.store.path();
|
||||
let session = session::spawn_role(
|
||||
Arc::clone(&self.hub),
|
||||
manager,
|
||||
&self.run_id,
|
||||
run_path,
|
||||
role_config,
|
||||
prompts::ensure_instructions,
|
||||
)
|
||||
.await?;
|
||||
sessions
|
||||
.store
|
||||
.update_rollout_path(&session.role, session.rollout_path.clone())?;
|
||||
sessions.verifiers[idx] = session;
|
||||
self.replace_role(role);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
211
codex-rs/codex-infty/src/run_store.rs
Normal file
211
codex-rs/codex-infty/src/run_store.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
const ARTIFACTS_DIR: &str = "artifacts";
|
||||
const MEMORY_DIR: &str = "memory";
|
||||
const INDEX_DIR: &str = "index";
|
||||
const DELIVERABLE_DIR: &str = "deliverable";
|
||||
const METADATA_FILE: &str = "run.json";
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RoleMetadata {
|
||||
pub role: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub rollout_path: Option<PathBuf>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub config_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RunMetadata {
|
||||
pub run_id: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub roles: Vec<RoleMetadata>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RunStore {
|
||||
path: PathBuf,
|
||||
metadata: RunMetadata,
|
||||
}
|
||||
|
||||
impl RunStore {
|
||||
pub fn initialize(
|
||||
run_path: impl AsRef<Path>,
|
||||
run_id: &str,
|
||||
roles: &[RoleMetadata],
|
||||
) -> Result<Self> {
|
||||
let run_path = run_path.as_ref().to_path_buf();
|
||||
fs::create_dir_all(&run_path)
|
||||
.with_context(|| format!("failed to create run directory {}", run_path.display()))?;
|
||||
|
||||
for child in [ARTIFACTS_DIR, MEMORY_DIR, INDEX_DIR, DELIVERABLE_DIR] {
|
||||
fs::create_dir_all(run_path.join(child))
|
||||
.with_context(|| format!("failed to create subdirectory {child}"))?;
|
||||
}
|
||||
|
||||
let metadata_path = run_path.join(METADATA_FILE);
|
||||
if metadata_path.exists() {
|
||||
return Err(anyhow!(
|
||||
"run metadata already exists at {}",
|
||||
metadata_path.display()
|
||||
));
|
||||
}
|
||||
|
||||
let now = Utc::now();
|
||||
let metadata = RunMetadata {
|
||||
run_id: run_id.to_string(),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
roles: roles.to_vec(),
|
||||
};
|
||||
write_metadata(&metadata_path, &metadata)?;
|
||||
|
||||
Ok(Self {
|
||||
path: run_path,
|
||||
metadata,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn load(run_path: impl AsRef<Path>) -> Result<Self> {
|
||||
let run_path = run_path.as_ref().to_path_buf();
|
||||
let metadata_path = run_path.join(METADATA_FILE);
|
||||
let metadata: RunMetadata = serde_json::from_slice(
|
||||
&fs::read(&metadata_path)
|
||||
.with_context(|| format!("failed to read {}", metadata_path.display()))?,
|
||||
)
|
||||
.with_context(|| format!("failed to parse {}", metadata_path.display()))?;
|
||||
|
||||
Ok(Self {
|
||||
path: run_path,
|
||||
metadata,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &RunMetadata {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
pub fn role_metadata(&self, role: &str) -> Option<&RoleMetadata> {
|
||||
self.metadata.roles.iter().find(|meta| meta.role == role)
|
||||
}
|
||||
|
||||
pub fn update_rollout_path(&mut self, role: &str, rollout_path: PathBuf) -> Result<()> {
|
||||
if let Some(meta) = self
|
||||
.metadata
|
||||
.roles
|
||||
.iter_mut()
|
||||
.find(|meta| meta.role == role)
|
||||
{
|
||||
meta.rollout_path = Some(rollout_path);
|
||||
self.commit_metadata()
|
||||
} else {
|
||||
Err(anyhow!("role {role} not found in run store"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_role_config_path(&mut self, role: &str, path: PathBuf) -> Result<()> {
|
||||
if let Some(meta) = self
|
||||
.metadata
|
||||
.roles
|
||||
.iter_mut()
|
||||
.find(|meta| meta.role == role)
|
||||
{
|
||||
meta.config_path = Some(path);
|
||||
self.commit_metadata()
|
||||
} else {
|
||||
Err(anyhow!("role {role} not found in run store"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn touch(&mut self) -> Result<()> {
|
||||
self.metadata.updated_at = Utc::now();
|
||||
self.commit_metadata()
|
||||
}
|
||||
|
||||
fn commit_metadata(&mut self) -> Result<()> {
|
||||
self.metadata.updated_at = Utc::now();
|
||||
let metadata_path = self.path.join(METADATA_FILE);
|
||||
write_metadata(&metadata_path, &self.metadata)
|
||||
}
|
||||
}
|
||||
|
||||
fn write_metadata(path: &Path, metadata: &RunMetadata) -> Result<()> {
|
||||
let parent = path
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("metadata path must have parent"))?;
|
||||
let mut temp = NamedTempFile::new_in(parent)
|
||||
.with_context(|| format!("failed to create temp file in {}", parent.display()))?;
|
||||
serde_json::to_writer_pretty(&mut temp, metadata)?;
|
||||
temp.flush()?;
|
||||
temp.persist(path)
|
||||
.with_context(|| format!("failed to persist metadata to {}", path.display()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn initialize_creates_directories_and_metadata() {
|
||||
let temp = TempDir::new().unwrap();
|
||||
let run_path = temp.path().join("run_1");
|
||||
let roles = vec![
|
||||
RoleMetadata {
|
||||
role: "solver".into(),
|
||||
rollout_path: None,
|
||||
config_path: None,
|
||||
},
|
||||
RoleMetadata {
|
||||
role: "director".into(),
|
||||
rollout_path: None,
|
||||
config_path: None,
|
||||
},
|
||||
];
|
||||
|
||||
let store = RunStore::initialize(&run_path, "run_1", &roles).unwrap();
|
||||
assert!(store.path().join(ARTIFACTS_DIR).is_dir());
|
||||
assert!(store.path().join(MEMORY_DIR).is_dir());
|
||||
assert!(store.path().join(INDEX_DIR).is_dir());
|
||||
assert!(store.path().join(DELIVERABLE_DIR).is_dir());
|
||||
assert_eq!(store.metadata().roles.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_rollout_persists_metadata() {
|
||||
let temp = TempDir::new().unwrap();
|
||||
let run_path = temp.path().join("run_2");
|
||||
let roles = vec![RoleMetadata {
|
||||
role: "solver".into(),
|
||||
rollout_path: None,
|
||||
config_path: None,
|
||||
}];
|
||||
let mut store = RunStore::initialize(&run_path, "run_2", &roles).unwrap();
|
||||
let rollout = PathBuf::from("/tmp/rollout.jsonl");
|
||||
store
|
||||
.update_rollout_path("solver", rollout.clone())
|
||||
.unwrap();
|
||||
|
||||
let loaded = RunStore::load(&run_path).unwrap();
|
||||
let solver = loaded.role_metadata("solver").unwrap();
|
||||
assert_eq!(solver.rollout_path.as_ref().unwrap(), &rollout);
|
||||
}
|
||||
}
|
||||
112
codex-rs/codex-infty/src/session.rs
Normal file
112
codex-rs/codex-infty/src/session.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::CrossSessionSpawnParams;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::cross_session::AssistantMessage;
|
||||
use codex_core::cross_session::CrossSessionError;
|
||||
use codex_core::cross_session::CrossSessionHub;
|
||||
use codex_core::cross_session::PostUserTurnRequest;
|
||||
use codex_core::cross_session::RoleOrId;
|
||||
use codex_core::cross_session::TurnHandle;
|
||||
use serde_json::Value;
|
||||
use tokio::time::Instant;
|
||||
use tokio_stream::StreamExt as _;
|
||||
|
||||
use crate::progress::ProgressReporter;
|
||||
use crate::types::RoleConfig;
|
||||
use crate::types::RoleSession;
|
||||
|
||||
pub async fn spawn_role(
|
||||
hub: Arc<CrossSessionHub>,
|
||||
manager: &ConversationManager,
|
||||
run_id: &str,
|
||||
run_path: &Path,
|
||||
role_config: RoleConfig,
|
||||
ensure_instructions: impl FnOnce(&str, &mut Config),
|
||||
) -> Result<RoleSession> {
|
||||
let RoleConfig {
|
||||
role, mut config, ..
|
||||
} = role_config;
|
||||
config.cwd = run_path.to_path_buf();
|
||||
ensure_instructions(&role, &mut config);
|
||||
let cfg_for_session = config.clone();
|
||||
let session = manager
|
||||
.new_conversation_with_cross_session(
|
||||
cfg_for_session,
|
||||
CrossSessionSpawnParams {
|
||||
hub: Arc::clone(&hub),
|
||||
run_id: Some(run_id.to_string()),
|
||||
role: Some(role.clone()),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
// Note: include the final config used to spawn the session
|
||||
Ok(RoleSession::from_new(role, session, config))
|
||||
}
|
||||
|
||||
// resumable runs are disabled for now; resume_role removed
|
||||
|
||||
pub async fn post_turn(
|
||||
hub: &CrossSessionHub,
|
||||
run_id: &str,
|
||||
role: &str,
|
||||
text: impl Into<String>,
|
||||
final_output_json_schema: Option<Value>,
|
||||
) -> Result<TurnHandle, CrossSessionError> {
|
||||
hub.post_user_turn(PostUserTurnRequest {
|
||||
target: RoleOrId::RunRole {
|
||||
run_id: run_id.to_string(),
|
||||
role: role.to_string(),
|
||||
},
|
||||
text: text.into(),
|
||||
final_output_json_schema,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn await_first_idle(
|
||||
hub: &CrossSessionHub,
|
||||
handle: &TurnHandle,
|
||||
idle_timeout: Duration,
|
||||
progress: Option<(&dyn ProgressReporter, &str)>,
|
||||
) -> Result<AssistantMessage> {
|
||||
let mut events = hub.stream_events(handle.conversation_id())?;
|
||||
let wait_first = hub.await_first_assistant(handle, idle_timeout);
|
||||
tokio::pin!(wait_first);
|
||||
|
||||
let idle = tokio::time::sleep(idle_timeout);
|
||||
tokio::pin!(idle);
|
||||
|
||||
let submission_id = handle.submission_id().to_string();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
result = &mut wait_first => {
|
||||
return result.map_err(|err| anyhow!(err));
|
||||
}
|
||||
maybe_event = events.next() => {
|
||||
let Some(event) = maybe_event else {
|
||||
bail!(CrossSessionError::SessionClosed);
|
||||
};
|
||||
if event.event.id == submission_id {
|
||||
if let Some((reporter, role)) = progress {
|
||||
reporter.role_event(role, &event.event.msg);
|
||||
}
|
||||
if let codex_core::protocol::EventMsg::Error(err) = &event.event.msg {
|
||||
bail!(anyhow!(err.message.clone()));
|
||||
}
|
||||
idle.as_mut().reset(Instant::now() + idle_timeout);
|
||||
}
|
||||
}
|
||||
_ = &mut idle => {
|
||||
bail!(CrossSessionError::AwaitTimeout(idle_timeout));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
55
codex-rs/codex-infty/src/signals.rs
Normal file
55
codex-rs/codex-infty/src/signals.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct DirectiveResponse {
|
||||
pub directive: String,
|
||||
#[serde(default)]
|
||||
pub rationale: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum VerifierDecision {
|
||||
Pass,
|
||||
Fail,
|
||||
}
|
||||
|
||||
impl VerifierDecision {
|
||||
pub fn is_pass(self) -> bool {
|
||||
matches!(self, VerifierDecision::Pass)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct VerifierVerdict {
|
||||
pub verdict: VerifierDecision,
|
||||
#[serde(default)]
|
||||
pub reasons: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub suggestions: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct VerifierReport {
|
||||
pub role: String,
|
||||
pub verdict: VerifierDecision,
|
||||
#[serde(default)]
|
||||
pub reasons: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub suggestions: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub struct AggregatedVerifierVerdict {
|
||||
#[serde(rename = "type")]
|
||||
pub kind: &'static str,
|
||||
pub overall: VerifierDecision,
|
||||
pub verdicts: Vec<VerifierReport>,
|
||||
}
|
||||
|
||||
impl From<&AggregatedVerifierVerdict> for String {
|
||||
fn from(value: &AggregatedVerifierVerdict) -> Self {
|
||||
serde_json::to_string_pretty(value).unwrap_or_else(|_| "{}".to_string())
|
||||
}
|
||||
}
|
||||
103
codex-rs/codex-infty/src/types.rs
Normal file
103
codex-rs/codex-infty/src/types.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_core::CodexConversation;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::ConversationId;
|
||||
|
||||
pub(crate) const DEFAULT_DIRECTOR_TIMEOUT: Duration = Duration::from_secs(1200);
|
||||
pub(crate) const DEFAULT_VERIFIER_TIMEOUT: Duration = Duration::from_secs(1800);
|
||||
pub(crate) const FINALIZATION_PROMPT: &str = "Create deliverable/: include compiled artifacts or scripts, usage docs, and tests. Write deliverable/summary.txt capturing the final answer, evidence, and follow-up steps. Also provide deliverable/README.md with overview, manifest (paths and sizes), verification steps, and limitations. Remove scratch files. Reply with JSON: {\"type\":\"final_delivery\",\"deliverable_path\":\"deliverable/summary.txt\",\"summary\":\"<answer plus supporting context>\"}.";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RoleConfig {
|
||||
pub role: String,
|
||||
pub config: Config,
|
||||
pub config_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl RoleConfig {
|
||||
pub fn new(role: impl Into<String>, mut config: Config) -> Self {
|
||||
config.sandbox_policy = SandboxPolicy::DangerFullAccess;
|
||||
config.approval_policy = AskForApproval::Never;
|
||||
Self {
|
||||
role: role.into(),
|
||||
config,
|
||||
config_path: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_path(role: impl Into<String>, config: Config, config_path: PathBuf) -> Self {
|
||||
Self {
|
||||
role: role.into(),
|
||||
config,
|
||||
config_path: Some(config_path),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RunParams {
|
||||
pub run_id: String,
|
||||
pub run_root: Option<PathBuf>,
|
||||
pub solver: RoleConfig,
|
||||
pub director: RoleConfig,
|
||||
pub verifiers: Vec<RoleConfig>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RunExecutionOptions {
|
||||
pub objective: Option<String>,
|
||||
pub director_timeout: Duration,
|
||||
pub verifier_timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for RunExecutionOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
objective: None,
|
||||
director_timeout: DEFAULT_DIRECTOR_TIMEOUT,
|
||||
verifier_timeout: DEFAULT_VERIFIER_TIMEOUT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RunOutcome {
|
||||
pub run_id: String,
|
||||
pub deliverable_path: PathBuf,
|
||||
pub summary: Option<String>,
|
||||
pub raw_message: String,
|
||||
}
|
||||
|
||||
pub struct RoleSession {
|
||||
pub role: String,
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation: Arc<CodexConversation>,
|
||||
pub session_configured: codex_core::protocol::SessionConfiguredEvent,
|
||||
pub rollout_path: PathBuf,
|
||||
pub config: Config,
|
||||
}
|
||||
|
||||
impl RoleSession {
|
||||
pub(crate) fn from_new(role: String, session: NewConversation, config: Config) -> Self {
|
||||
Self {
|
||||
role,
|
||||
conversation_id: session.conversation_id,
|
||||
conversation: session.conversation,
|
||||
session_configured: session.session_configured.clone(),
|
||||
rollout_path: session.session_configured.rollout_path.clone(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RunSessions {
|
||||
pub run_id: String,
|
||||
pub solver: RoleSession,
|
||||
pub director: RoleSession,
|
||||
pub verifiers: Vec<RoleSession>,
|
||||
pub store: crate::RunStore,
|
||||
}
|
||||
91
codex-rs/codex-infty/src/utils.rs
Normal file
91
codex-rs/codex-infty/src/utils.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
|
||||
pub fn trim_to_non_empty(opt: Option<String>) -> Option<String> {
|
||||
opt.and_then(|s| {
|
||||
let trimmed = s.trim();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(trimmed.to_string())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn required_trimmed(opt: Option<String>, err_msg: &str) -> Result<String> {
|
||||
trim_to_non_empty(opt).ok_or_else(|| anyhow!(err_msg.to_string()))
|
||||
}
|
||||
|
||||
pub fn resolve_deliverable_path(base: &Path, candidate: &str) -> Result<PathBuf> {
|
||||
let base_abs = base
|
||||
.canonicalize()
|
||||
.with_context(|| format!("failed to canonicalize run store {}", base.display()))?;
|
||||
|
||||
let candidate_path = Path::new(candidate);
|
||||
let joined = if candidate_path.is_absolute() {
|
||||
candidate_path.to_path_buf()
|
||||
} else {
|
||||
base_abs.join(candidate_path)
|
||||
};
|
||||
|
||||
let resolved = joined.canonicalize().with_context(|| {
|
||||
format!(
|
||||
"failed to canonicalize deliverable path {}",
|
||||
joined.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
if !resolved.starts_with(&base_abs) {
|
||||
bail!(
|
||||
"deliverable path {} escapes run store {}",
|
||||
resolved.display(),
|
||||
base_abs.display()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(resolved)
|
||||
}
|
||||
|
||||
pub fn objective_as_str(options: &crate::types::RunExecutionOptions) -> Option<&str> {
|
||||
options
|
||||
.objective
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|s| !s.is_empty())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn resolve_deliverable_within_base() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let base = tmp.path();
|
||||
std::fs::create_dir_all(base.join("deliverable")).unwrap();
|
||||
std::fs::write(base.join("deliverable").join("a.txt"), "ok").unwrap();
|
||||
let resolved = resolve_deliverable_path(base, "deliverable/a.txt").unwrap();
|
||||
let base_abs = base.canonicalize().unwrap();
|
||||
assert!(resolved.starts_with(&base_abs));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_deliverable_rejects_escape() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let base = tmp.path();
|
||||
// Create a real file outside of base so canonicalization succeeds
|
||||
let outside = TempDir::new().unwrap();
|
||||
let outside_file = outside.path().join("outside.txt");
|
||||
std::fs::write(&outside_file, "nope").unwrap();
|
||||
|
||||
let err = resolve_deliverable_path(base, outside_file.to_str().unwrap()).unwrap_err();
|
||||
let msg = format!("{err}");
|
||||
assert!(msg.contains("escapes run store"));
|
||||
}
|
||||
}
|
||||
327
codex-rs/codex-infty/tests/orchestrator.rs
Normal file
327
codex-rs/codex-infty/tests/orchestrator.rs
Normal file
@@ -0,0 +1,327 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::cross_session::AssistantMessage;
|
||||
use codex_core::cross_session::PostUserTurnRequest;
|
||||
use codex_core::cross_session::RoleOrId;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_infty::InftyOrchestrator;
|
||||
use codex_infty::RoleConfig;
|
||||
use codex_infty::RunExecutionOptions;
|
||||
use codex_infty::RunParams;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn orchestrator_routes_between_roles_and_records_store() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let bodies = vec![
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-1"),
|
||||
responses::ev_assistant_message("solver-msg-1", "Need direction"),
|
||||
responses::ev_completed("solver-resp-1"),
|
||||
]),
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("director-resp-1"),
|
||||
responses::ev_assistant_message("director-msg-1", "Proceed iteratively"),
|
||||
responses::ev_completed("director-resp-1"),
|
||||
]),
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-2"),
|
||||
responses::ev_assistant_message("solver-msg-2", "Acknowledged"),
|
||||
responses::ev_completed("solver-resp-2"),
|
||||
]),
|
||||
];
|
||||
let response_mock = responses::mount_sse_sequence(&server, bodies).await;
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-orchestrator".to_string();
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
|
||||
let sessions = orchestrator
|
||||
.spawn_run(RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(runs_root.path().join("runs").join(&run_id)),
|
||||
solver: RoleConfig::new("solver", solver_config.clone()),
|
||||
director: RoleConfig::new("director", director_config.clone()),
|
||||
verifiers: Vec::new(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let solver_message = call_role(
|
||||
&orchestrator,
|
||||
&sessions.run_id,
|
||||
"solver",
|
||||
"kick off plan",
|
||||
Duration::from_secs(1),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(solver_message.message.message, "Need direction");
|
||||
|
||||
let director_message = relay_assistant_to_role(
|
||||
&orchestrator,
|
||||
&sessions.run_id,
|
||||
"director",
|
||||
&solver_message,
|
||||
Duration::from_secs(1),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(director_message.message.message, "Proceed iteratively");
|
||||
|
||||
let solver_reply = relay_assistant_to_role(
|
||||
&orchestrator,
|
||||
&sessions.run_id,
|
||||
"solver",
|
||||
&director_message,
|
||||
Duration::from_secs(1),
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(solver_reply.message.message, "Acknowledged");
|
||||
|
||||
assert_eq!(response_mock.requests().len(), 3);
|
||||
let first_request = response_mock.requests().first().unwrap().body_json();
|
||||
let instructions = first_request["instructions"]
|
||||
.as_str()
|
||||
.expect("request should set instructions");
|
||||
assert!(
|
||||
instructions.contains("brilliant mathematician"),
|
||||
"missing solver prompt: {instructions}"
|
||||
);
|
||||
assert!(sessions.store.path().is_dir());
|
||||
let solver_meta = sessions.store.role_metadata("solver").unwrap();
|
||||
assert!(solver_meta.rollout_path.is_some());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// resumable runs are disabled; resume test removed
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn execute_new_run_drives_to_completion() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let bodies = vec![
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-1"),
|
||||
responses::ev_assistant_message(
|
||||
"solver-msg-1",
|
||||
r#"{"type":"direction_request","prompt":"Need directive","claim_path":null,"notes":null,"deliverable_path":null,"summary":null}"#,
|
||||
),
|
||||
responses::ev_completed("solver-resp-1"),
|
||||
]),
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("director-resp-1"),
|
||||
responses::ev_assistant_message(
|
||||
"director-msg-1",
|
||||
r#"{"directive":"Proceed","rationale":"Follow the plan"}"#,
|
||||
),
|
||||
responses::ev_completed("director-resp-1"),
|
||||
]),
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-2"),
|
||||
responses::ev_assistant_message("solver-msg-2", "Acknowledged"),
|
||||
responses::ev_assistant_message(
|
||||
"solver-msg-4",
|
||||
r#"{"type":"final_delivery","prompt":null,"claim_path":null,"notes":null,"deliverable_path":"deliverable","summary":"done"}"#,
|
||||
),
|
||||
responses::ev_completed("solver-resp-2"),
|
||||
]),
|
||||
// Final verification of the deliverable
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("verifier-resp-3"),
|
||||
responses::ev_assistant_message(
|
||||
"verifier-msg-3",
|
||||
r#"{"verdict":"pass","reasons":[],"suggestions":[]}"#,
|
||||
),
|
||||
responses::ev_completed("verifier-resp-3"),
|
||||
]),
|
||||
// Feedback turn summarizing the verification outcome back to the solver
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-5"),
|
||||
responses::ev_completed("solver-resp-5"),
|
||||
]),
|
||||
];
|
||||
for body in bodies {
|
||||
responses::mount_sse_once(&server, body).await;
|
||||
}
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-auto".to_string();
|
||||
let run_root = runs_root.path().join("runs").join(&run_id);
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
let verifier_config = build_config(&server).await?;
|
||||
|
||||
let options = RunExecutionOptions {
|
||||
objective: Some("Implement feature".to_string()),
|
||||
..RunExecutionOptions::default()
|
||||
};
|
||||
|
||||
let outcome = orchestrator
|
||||
.execute_new_run(
|
||||
RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(run_root.clone()),
|
||||
solver: RoleConfig::new("solver", solver_config),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers: vec![RoleConfig::new("verifier", verifier_config)],
|
||||
},
|
||||
options,
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(outcome.run_id, run_id);
|
||||
assert_eq!(outcome.summary.as_deref(), Some("done"));
|
||||
assert!(outcome.raw_message.contains("final_delivery"));
|
||||
let canonical_run_root = std::fs::canonicalize(&run_root)?;
|
||||
let canonical_deliverable = std::fs::canonicalize(&outcome.deliverable_path)?;
|
||||
assert!(canonical_deliverable.starts_with(&canonical_run_root));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn spawn_run_cleans_up_on_failure() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let bodies = vec![
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-1"),
|
||||
responses::ev_completed("solver-resp-1"),
|
||||
]),
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("director-resp-1"),
|
||||
responses::ev_completed("director-resp-1"),
|
||||
]),
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("dup-resp"),
|
||||
responses::ev_completed("dup-resp"),
|
||||
]),
|
||||
];
|
||||
for body in bodies {
|
||||
responses::mount_sse_once(&server, body).await;
|
||||
}
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-cleanup".to_string();
|
||||
let run_path = runs_root.path().join("runs").join(&run_id);
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
|
||||
let result = orchestrator
|
||||
.spawn_run(RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(run_path.clone()),
|
||||
solver: RoleConfig::new("solver", solver_config.clone()),
|
||||
director: RoleConfig::new("director", director_config.clone()),
|
||||
verifiers: vec![RoleConfig::new("solver", solver_config.clone())],
|
||||
})
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert!(!run_path.exists(), "failed run should remove run directory");
|
||||
|
||||
let bodies = vec![
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-2"),
|
||||
responses::ev_completed("solver-resp-2"),
|
||||
]),
|
||||
responses::sse(vec![
|
||||
responses::ev_response_created("director-resp-2"),
|
||||
responses::ev_completed("director-resp-2"),
|
||||
]),
|
||||
];
|
||||
for body in bodies {
|
||||
responses::mount_sse_once(&server, body).await;
|
||||
}
|
||||
|
||||
let sessions = orchestrator
|
||||
.spawn_run(RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(run_path.clone()),
|
||||
solver: RoleConfig::new("solver", solver_config),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers: Vec::new(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
sessions.solver.conversation.submit(Op::Shutdown).await.ok();
|
||||
sessions
|
||||
.director
|
||||
.conversation
|
||||
.submit(Op::Shutdown)
|
||||
.await
|
||||
.ok();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_config(server: &MockServer) -> anyhow::Result<Config> {
|
||||
let home = TempDir::new()?;
|
||||
let cwd = TempDir::new()?;
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
let mut provider = built_in_model_providers()["openai"].clone();
|
||||
provider.base_url = Some(format!("{}/v1", server.uri()));
|
||||
config.model_provider = provider;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
async fn call_role(
|
||||
orchestrator: &InftyOrchestrator,
|
||||
run_id: &str,
|
||||
role: &str,
|
||||
text: &str,
|
||||
timeout: Duration,
|
||||
) -> anyhow::Result<AssistantMessage> {
|
||||
let hub = orchestrator.hub();
|
||||
let handle = hub
|
||||
.post_user_turn(PostUserTurnRequest {
|
||||
target: RoleOrId::RunRole {
|
||||
run_id: run_id.to_string(),
|
||||
role: role.to_string(),
|
||||
},
|
||||
text: text.to_string(),
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
let reply = hub.await_first_assistant(&handle, timeout).await?;
|
||||
Ok(reply)
|
||||
}
|
||||
|
||||
async fn relay_assistant_to_role(
|
||||
orchestrator: &InftyOrchestrator,
|
||||
run_id: &str,
|
||||
target_role: &str,
|
||||
assistant: &AssistantMessage,
|
||||
timeout: Duration,
|
||||
) -> anyhow::Result<AssistantMessage> {
|
||||
call_role(
|
||||
orchestrator,
|
||||
run_id,
|
||||
target_role,
|
||||
&assistant.message.message,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
324
codex-rs/codex-infty/tests/schemas.rs
Normal file
324
codex-rs/codex-infty/tests/schemas.rs
Normal file
@@ -0,0 +1,324 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::config::Config;
|
||||
use codex_infty::InftyOrchestrator;
|
||||
use codex_infty::RoleConfig;
|
||||
use codex_infty::RunExecutionOptions;
|
||||
use codex_infty::RunParams;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn director_request_includes_output_schema() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
|
||||
// 1) Solver: emit a direction_request so the orchestrator calls Director.
|
||||
let body_solver = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-1"),
|
||||
responses::ev_assistant_message(
|
||||
"solver-msg-1",
|
||||
r#"{"type":"direction_request","prompt":"Need directive","claim_path":null,"notes":null,"deliverable_path":null,"summary":null}"#,
|
||||
),
|
||||
responses::ev_completed("solver-resp-1"),
|
||||
]);
|
||||
let _mock_solver = responses::mount_sse_once(&server, body_solver).await;
|
||||
|
||||
// 2) Director: reply with a directive JSON.
|
||||
let body_director = responses::sse(vec![
|
||||
responses::ev_response_created("director-resp-1"),
|
||||
responses::ev_assistant_message(
|
||||
"director-msg-1",
|
||||
r#"{"directive":"Proceed","rationale":"Follow the plan"}"#,
|
||||
),
|
||||
responses::ev_completed("director-resp-1"),
|
||||
]);
|
||||
let mock_director = responses::mount_sse_once(&server, body_director).await;
|
||||
|
||||
// 3) After relaying directive back to Solver, we do not need to continue the run.
|
||||
// Provide a short empty solver completion body to avoid hanging HTTP calls.
|
||||
let body_solver_after = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-2"),
|
||||
responses::ev_completed("solver-resp-2"),
|
||||
]);
|
||||
let _mock_solver_after = responses::mount_sse_once(&server, body_solver_after).await;
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-director-schema".to_string();
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
|
||||
let params = RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(runs_root.path().join("runs").join(&run_id)),
|
||||
solver: RoleConfig::new("solver", solver_config),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers: Vec::new(),
|
||||
};
|
||||
|
||||
let options = RunExecutionOptions {
|
||||
objective: Some("Kick off".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Drive the run in the background; we'll assert the request shape then cancel.
|
||||
let fut = tokio::spawn(async move {
|
||||
let _ = orchestrator.execute_new_run(params, options).await;
|
||||
});
|
||||
|
||||
// Wait until the Director request is captured.
|
||||
wait_for_requests(&mock_director, 1, Duration::from_secs(2)).await;
|
||||
let req = mock_director.single_request();
|
||||
let body = req.body_json();
|
||||
|
||||
// Assert that a JSON schema was sent under text.format.
|
||||
let text = &body["text"]; // Optional; present when using schemas
|
||||
assert!(text.is_object(), "missing text controls in request body");
|
||||
let fmt = &text["format"];
|
||||
assert!(fmt.is_object(), "missing text.format in request body");
|
||||
assert_eq!(fmt["type"], "json_schema");
|
||||
let schema = &fmt["schema"];
|
||||
assert!(schema.is_object(), "missing text.format.schema");
|
||||
assert_eq!(schema["type"], "object");
|
||||
// Ensure the directive property exists and is a string.
|
||||
assert_eq!(schema["properties"]["directive"]["type"], "string");
|
||||
// Enforce strictness: required must include all properties.
|
||||
let required = schema["required"]
|
||||
.as_array()
|
||||
.expect("required must be array");
|
||||
let props = schema["properties"]
|
||||
.as_object()
|
||||
.expect("properties must be object");
|
||||
for key in props.keys() {
|
||||
assert!(
|
||||
required.iter().any(|v| v == key),
|
||||
"missing {key} in required"
|
||||
);
|
||||
}
|
||||
// Ensure the objective text appears in the serialized request body
|
||||
let raw = serde_json::to_string(&body).expect("serialize body");
|
||||
assert!(
|
||||
raw.contains("Kick off"),
|
||||
"objective missing from director request body"
|
||||
);
|
||||
|
||||
// Stop the background task to end the test.
|
||||
fut.abort();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn final_delivery_request_includes_output_schema() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
|
||||
// 1) Solver: emit empty message so orchestrator asks for final_delivery via schema.
|
||||
let body_solver = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-1"),
|
||||
// No signal -> orchestrator will prompt with final_output schema.
|
||||
responses::ev_completed("solver-resp-1"),
|
||||
]);
|
||||
let _mock_solver = responses::mount_sse_once(&server, body_solver).await;
|
||||
|
||||
// 2) Capture the schema-bearing request to Solver.
|
||||
let body_solver_prompt = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-2"),
|
||||
responses::ev_assistant_message(
|
||||
"solver-msg-2",
|
||||
r#"{"type":"final_delivery","deliverable_path":"deliverable/summary.txt","summary":null}"#,
|
||||
),
|
||||
responses::ev_completed("solver-resp-2"),
|
||||
]);
|
||||
let mock_solver_prompt = responses::mount_sse_once(&server, body_solver_prompt).await;
|
||||
|
||||
// 3) Keep any follow-up quiet.
|
||||
let body_solver_done = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-3"),
|
||||
responses::ev_completed("solver-resp-3"),
|
||||
]);
|
||||
let _mock_solver_done = responses::mount_sse_once(&server, body_solver_done).await;
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-final-schema".to_string();
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
|
||||
let params = RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(runs_root.path().join("runs").join(&run_id)),
|
||||
solver: RoleConfig::new("solver", solver_config),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers: Vec::new(),
|
||||
};
|
||||
|
||||
let options = RunExecutionOptions {
|
||||
objective: Some("Kick off".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let fut = tokio::spawn(async move {
|
||||
let _ = orchestrator.execute_new_run(params, options).await;
|
||||
});
|
||||
|
||||
wait_for_requests(&mock_solver_prompt, 1, Duration::from_secs(2)).await;
|
||||
let req = mock_solver_prompt.single_request();
|
||||
let body = req.body_json();
|
||||
let text = &body["text"];
|
||||
assert!(text.is_object(), "missing text controls in request body");
|
||||
let fmt = &text["format"];
|
||||
assert!(fmt.is_object(), "missing text.format in request body");
|
||||
assert_eq!(fmt["type"], "json_schema");
|
||||
let schema = &fmt["schema"];
|
||||
assert!(schema.is_object(), "missing text.format.schema");
|
||||
let required = schema["required"]
|
||||
.as_array()
|
||||
.expect("required must be array");
|
||||
let props = schema["properties"]
|
||||
.as_object()
|
||||
.expect("properties must be object");
|
||||
for key in props.keys() {
|
||||
assert!(
|
||||
required.iter().any(|v| v == key),
|
||||
"missing {key} in required"
|
||||
);
|
||||
}
|
||||
|
||||
fut.abort();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn verifier_request_includes_output_schema() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
|
||||
// 1) Solver: issue a final_delivery which triggers verifier requests.
|
||||
let body_solver = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-1"),
|
||||
responses::ev_assistant_message(
|
||||
"solver-msg-1",
|
||||
r#"{"type":"final_delivery","deliverable_path":"deliverable/summary.txt","summary":null}"#,
|
||||
),
|
||||
responses::ev_completed("solver-resp-1"),
|
||||
]);
|
||||
let _mock_solver = responses::mount_sse_once(&server, body_solver).await;
|
||||
|
||||
// 2) Verifier: reply with a verdict JSON.
|
||||
let body_verifier = responses::sse(vec![
|
||||
responses::ev_response_created("verifier-resp-1"),
|
||||
responses::ev_assistant_message(
|
||||
"verifier-msg-1",
|
||||
r#"{"verdict":"pass","reasons":[],"suggestions":[]}"#,
|
||||
),
|
||||
responses::ev_completed("verifier-resp-1"),
|
||||
]);
|
||||
let mock_verifier = responses::mount_sse_once(&server, body_verifier).await;
|
||||
|
||||
// 3) After posting the summary back to Solver, let the request complete.
|
||||
let body_solver_after = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-2"),
|
||||
responses::ev_completed("solver-resp-2"),
|
||||
]);
|
||||
let _mock_solver_after = responses::mount_sse_once(&server, body_solver_after).await;
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-verifier-schema".to_string();
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
let verifier_config = build_config(&server).await?;
|
||||
|
||||
let params = RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(runs_root.path().join("runs").join(&run_id)),
|
||||
solver: RoleConfig::new("solver", solver_config),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers: vec![RoleConfig::new("verifier", verifier_config)],
|
||||
};
|
||||
|
||||
let options = RunExecutionOptions {
|
||||
objective: Some("Kick off".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let fut = tokio::spawn(async move {
|
||||
let _ = orchestrator.execute_new_run(params, options).await;
|
||||
});
|
||||
|
||||
// Wait until the Verifier request is captured.
|
||||
wait_for_requests(&mock_verifier, 1, Duration::from_secs(2)).await;
|
||||
let req = mock_verifier.single_request();
|
||||
let body = req.body_json();
|
||||
|
||||
// Assert that a JSON schema was sent under text.format.
|
||||
let text = &body["text"]; // Optional; present when using schemas
|
||||
assert!(text.is_object(), "missing text controls in request body");
|
||||
let fmt = &text["format"];
|
||||
assert!(fmt.is_object(), "missing text.format in request body");
|
||||
assert_eq!(fmt["type"], "json_schema");
|
||||
let schema = &fmt["schema"];
|
||||
assert!(schema.is_object(), "missing text.format.schema");
|
||||
assert_eq!(schema["type"], "object");
|
||||
// Ensure the verdict property exists and is an enum of pass/fail.
|
||||
assert!(schema["properties"]["verdict"].is_object());
|
||||
// Enforce strictness: required must include all properties.
|
||||
let required = schema["required"]
|
||||
.as_array()
|
||||
.expect("required must be array");
|
||||
let props = schema["properties"]
|
||||
.as_object()
|
||||
.expect("properties must be object");
|
||||
for key in props.keys() {
|
||||
assert!(
|
||||
required.iter().any(|v| v == key),
|
||||
"missing {key} in required"
|
||||
);
|
||||
}
|
||||
|
||||
fut.abort();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_config(server: &MockServer) -> anyhow::Result<Config> {
|
||||
let home = TempDir::new()?;
|
||||
let cwd = TempDir::new()?;
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
let mut provider = built_in_model_providers()["openai"].clone();
|
||||
provider.base_url = Some(format!("{}/v1", server.uri()));
|
||||
config.model_provider = provider;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
async fn wait_for_requests(mock: &responses::ResponseMock, min: usize, timeout: Duration) {
|
||||
use tokio::time::Instant;
|
||||
use tokio::time::sleep;
|
||||
let start = Instant::now();
|
||||
loop {
|
||||
if mock.requests().len() >= min {
|
||||
return;
|
||||
}
|
||||
if start.elapsed() > timeout {
|
||||
return;
|
||||
}
|
||||
sleep(Duration::from_millis(25)).await;
|
||||
}
|
||||
}
|
||||
98
codex-rs/codex-infty/tests/timeouts.rs
Normal file
98
codex-rs/codex-infty/tests/timeouts.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::config::Config;
|
||||
use codex_infty::InftyOrchestrator;
|
||||
use codex_infty::RoleConfig;
|
||||
use codex_infty::RunExecutionOptions;
|
||||
use codex_infty::RunParams;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn direction_request_times_out_when_director_is_silent() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
|
||||
// Solver emits a direction_request.
|
||||
let body_solver = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-1"),
|
||||
responses::ev_assistant_message(
|
||||
"solver-msg-1",
|
||||
r#"{"type":"direction_request","prompt":"Need directive","claim_path":null,"notes":null,"deliverable_path":null,"summary":null}"#,
|
||||
),
|
||||
responses::ev_completed("solver-resp-1"),
|
||||
]);
|
||||
let _mock_solver = responses::mount_sse_once(&server, body_solver).await;
|
||||
|
||||
// Director remains silent (no assistant message); the model completes immediately.
|
||||
let body_director_silent = responses::sse(vec![
|
||||
responses::ev_response_created("director-resp-1"),
|
||||
// intentionally no message
|
||||
responses::ev_completed("director-resp-1"),
|
||||
]);
|
||||
let _mock_director = responses::mount_sse_once(&server, body_director_silent).await;
|
||||
|
||||
// After attempting to relay a directive back to the solver, orchestrator won't proceed
|
||||
// as we will time out waiting for the director; however, the solver will still receive
|
||||
// a follow-up post later in the flow, so we pre-mount an empty completion to satisfy it
|
||||
// if the code ever reaches that point in future changes.
|
||||
let body_solver_after = responses::sse(vec![
|
||||
responses::ev_response_created("solver-resp-2"),
|
||||
responses::ev_completed("solver-resp-2"),
|
||||
]);
|
||||
let _mock_solver_after = responses::mount_sse_once(&server, body_solver_after).await;
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-director-timeout".to_string();
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
|
||||
let params = RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(runs_root.path().join("runs").join(&run_id)),
|
||||
solver: RoleConfig::new("solver", solver_config),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers: Vec::new(),
|
||||
};
|
||||
|
||||
let options = RunExecutionOptions {
|
||||
objective: Some("Kick off".to_string()),
|
||||
director_timeout: Duration::from_millis(50),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let err = orchestrator
|
||||
.execute_new_run(params, options)
|
||||
.await
|
||||
.err()
|
||||
.expect("expected timeout error");
|
||||
let msg = format!("{err:#}");
|
||||
assert!(
|
||||
msg.contains("timed out waiting") || msg.contains("AwaitTimeout"),
|
||||
"unexpected error: {msg}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_config(server: &MockServer) -> anyhow::Result<Config> {
|
||||
let home = TempDir::new()?;
|
||||
let cwd = TempDir::new()?;
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
let mut provider = built_in_model_providers()["openai"].clone();
|
||||
provider.base_url = Some(format!("{}/v1", server.uri()));
|
||||
config.model_provider = provider;
|
||||
Ok(config)
|
||||
}
|
||||
157
codex-rs/codex-infty/tests/verifier_replacement.rs
Normal file
157
codex-rs/codex-infty/tests/verifier_replacement.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::config::Config;
|
||||
use codex_infty::InftyOrchestrator;
|
||||
use codex_infty::RoleConfig;
|
||||
use codex_infty::RunExecutionOptions;
|
||||
use codex_infty::RunParams;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn replaces_passing_verifiers_and_keeps_failing() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
|
||||
// Round 1: alpha passes, beta fails
|
||||
let body_verifier_alpha_r1 = responses::sse(vec![
|
||||
responses::ev_response_created("verifier-alpha-r1"),
|
||||
responses::ev_assistant_message(
|
||||
"verifier-alpha-msg-r1",
|
||||
r#"{"verdict":"pass","reasons":[],"suggestions":[]}"#,
|
||||
),
|
||||
responses::ev_completed("verifier-alpha-r1"),
|
||||
]);
|
||||
let body_verifier_beta_r1 = responses::sse(vec![
|
||||
responses::ev_response_created("verifier-beta-r1"),
|
||||
responses::ev_assistant_message(
|
||||
"verifier-beta-msg-r1",
|
||||
r#"{"verdict":"fail","reasons":["missing"],"suggestions":[]}"#,
|
||||
),
|
||||
responses::ev_completed("verifier-beta-r1"),
|
||||
]);
|
||||
|
||||
// Round 2: both pass
|
||||
let body_verifier_alpha_r2 = responses::sse(vec![
|
||||
responses::ev_response_created("verifier-alpha-r2"),
|
||||
responses::ev_assistant_message(
|
||||
"verifier-alpha-msg-r2",
|
||||
r#"{"verdict":"pass","reasons":[],"suggestions":[]}"#,
|
||||
),
|
||||
responses::ev_completed("verifier-alpha-r2"),
|
||||
]);
|
||||
let body_verifier_beta_r2 = responses::sse(vec![
|
||||
responses::ev_response_created("verifier-beta-r2"),
|
||||
responses::ev_assistant_message(
|
||||
"verifier-beta-msg-r2",
|
||||
r#"{"verdict":"pass","reasons":[],"suggestions":[]}"#,
|
||||
),
|
||||
responses::ev_completed("verifier-beta-r2"),
|
||||
]);
|
||||
|
||||
// Mount verifier SSE bodies in the exact order collect_verification_summary posts to verifiers.
|
||||
// The implementation posts sequentially in the order of sessions.verifiers.
|
||||
let _m1 = responses::mount_sse_once(&server, body_verifier_alpha_r1).await;
|
||||
let _m2 = responses::mount_sse_once(&server, body_verifier_beta_r1).await;
|
||||
let _m3 = responses::mount_sse_once(&server, body_verifier_alpha_r2).await;
|
||||
let _m4 = responses::mount_sse_once(&server, body_verifier_beta_r2).await;
|
||||
|
||||
let runs_root = TempDir::new()?;
|
||||
let orchestrator =
|
||||
InftyOrchestrator::with_runs_root(CodexAuth::from_api_key("dummy-key"), runs_root.path());
|
||||
let run_id = "run-verifier-replacement".to_string();
|
||||
|
||||
let solver_config = build_config(&server).await?;
|
||||
let director_config = build_config(&server).await?;
|
||||
let verifier_config = build_config(&server).await?;
|
||||
|
||||
// Spawn run with two verifiers in known order.
|
||||
let mut sessions = orchestrator
|
||||
.spawn_run(RunParams {
|
||||
run_id: run_id.clone(),
|
||||
run_root: Some(runs_root.path().join("runs").join(&run_id)),
|
||||
solver: RoleConfig::new("solver", solver_config),
|
||||
director: RoleConfig::new("director", director_config),
|
||||
verifiers: vec![
|
||||
RoleConfig::new("verifier-alpha", verifier_config.clone()),
|
||||
RoleConfig::new("verifier-beta", verifier_config),
|
||||
],
|
||||
})
|
||||
.await?;
|
||||
|
||||
let alpha_initial = sessions
|
||||
.store
|
||||
.role_metadata("verifier-alpha")
|
||||
.and_then(|m| m.rollout_path.clone())
|
||||
.expect("alpha initial rollout path");
|
||||
let beta_initial = sessions
|
||||
.store
|
||||
.role_metadata("verifier-beta")
|
||||
.and_then(|m| m.rollout_path.clone())
|
||||
.expect("beta initial rollout path");
|
||||
|
||||
let options = RunExecutionOptions {
|
||||
verifier_timeout: Duration::from_secs(2),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Round 1: alpha pass (should be replaced), beta fail (should be kept)
|
||||
let _summary1 = orchestrator
|
||||
.verify_round_for_test(&mut sessions, "memory/claims/c1.json", &options)
|
||||
.await?;
|
||||
|
||||
let alpha_after_r1 = sessions
|
||||
.store
|
||||
.role_metadata("verifier-alpha")
|
||||
.and_then(|m| m.rollout_path.clone())
|
||||
.expect("alpha rollout after r1");
|
||||
let beta_after_r1 = sessions
|
||||
.store
|
||||
.role_metadata("verifier-beta")
|
||||
.and_then(|m| m.rollout_path.clone())
|
||||
.expect("beta rollout after r1");
|
||||
|
||||
assert_ne!(
|
||||
alpha_initial, alpha_after_r1,
|
||||
"alpha should be replaced after pass"
|
||||
);
|
||||
assert_eq!(
|
||||
beta_initial, beta_after_r1,
|
||||
"beta should be kept after fail"
|
||||
);
|
||||
|
||||
// Round 2: both pass; beta should be replaced now.
|
||||
let _summary2 = orchestrator
|
||||
.verify_round_for_test(&mut sessions, "memory/claims/c2.json", &options)
|
||||
.await?;
|
||||
let beta_after_r2 = sessions
|
||||
.store
|
||||
.role_metadata("verifier-beta")
|
||||
.and_then(|m| m.rollout_path.clone())
|
||||
.expect("beta rollout after r2");
|
||||
assert_ne!(
|
||||
beta_initial, beta_after_r2,
|
||||
"beta should be replaced after pass in r2"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_config(server: &MockServer) -> anyhow::Result<Config> {
|
||||
let home = TempDir::new()?;
|
||||
let cwd = TempDir::new()?;
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
let mut provider = built_in_model_providers()["openai"].clone();
|
||||
provider.base_url = Some(format!("{}/v1", server.uri()));
|
||||
config.model_provider = provider;
|
||||
Ok(config)
|
||||
}
|
||||
66
codex-rs/common/src/format_env_display.rs
Normal file
66
codex-rs/common/src/format_env_display.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub fn format_env_display(env: Option<&HashMap<String, String>>, env_vars: &[String]) -> String {
|
||||
let mut parts: Vec<String> = Vec::new();
|
||||
|
||||
if let Some(map) = env {
|
||||
let mut pairs: Vec<_> = map.iter().collect();
|
||||
pairs.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||
parts.extend(
|
||||
pairs
|
||||
.into_iter()
|
||||
.map(|(key, value)| format!("{key}={value}")),
|
||||
);
|
||||
}
|
||||
|
||||
if !env_vars.is_empty() {
|
||||
parts.extend(env_vars.iter().map(|var| format!("{var}=${var}")));
|
||||
}
|
||||
|
||||
if parts.is_empty() {
|
||||
"-".to_string()
|
||||
} else {
|
||||
parts.join(", ")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn returns_dash_when_empty() {
|
||||
assert_eq!(format_env_display(None, &[]), "-");
|
||||
|
||||
let empty_map = HashMap::new();
|
||||
assert_eq!(format_env_display(Some(&empty_map), &[]), "-");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn formats_sorted_env_pairs() {
|
||||
let mut env = HashMap::new();
|
||||
env.insert("B".to_string(), "two".to_string());
|
||||
env.insert("A".to_string(), "one".to_string());
|
||||
|
||||
assert_eq!(format_env_display(Some(&env), &[]), "A=one, B=two");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn formats_env_vars_with_dollar_prefix() {
|
||||
let vars = vec!["TOKEN".to_string(), "PATH".to_string()];
|
||||
|
||||
assert_eq!(format_env_display(None, &vars), "TOKEN=$TOKEN, PATH=$PATH");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn combines_env_pairs_and_vars() {
|
||||
let mut env = HashMap::new();
|
||||
env.insert("HOME".to_string(), "/tmp".to_string());
|
||||
let vars = vec!["TOKEN".to_string()];
|
||||
|
||||
assert_eq!(
|
||||
format_env_display(Some(&env), &vars),
|
||||
"HOME=/tmp, TOKEN=$TOKEN"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,9 @@ mod sandbox_mode_cli_arg;
|
||||
#[cfg(feature = "cli")]
|
||||
pub use sandbox_mode_cli_arg::SandboxModeCliArg;
|
||||
|
||||
#[cfg(feature = "cli")]
|
||||
pub mod format_env_display;
|
||||
|
||||
#[cfg(any(feature = "cli", test))]
|
||||
mod config_override;
|
||||
|
||||
|
||||
@@ -19,13 +19,14 @@ async-trait = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-apply-patch = { workspace = true }
|
||||
codex-file-search = { workspace = true }
|
||||
codex-mcp-client = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-otel = { workspace = true, features = ["otel"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-utils-string = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
dunce = { workspace = true }
|
||||
env-flags = { workspace = true }
|
||||
@@ -60,7 +61,8 @@ tokio = { workspace = true, features = [
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-util = { workspace = true, features = ["rt"] }
|
||||
tokio-stream = { workspace = true, features = ["sync"] }
|
||||
toml = { workspace = true }
|
||||
toml_edit = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
@@ -75,6 +77,9 @@ wildmatch = { workspace = true }
|
||||
landlock = { workspace = true }
|
||||
seccompiler = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
core-foundation = "0.9"
|
||||
|
||||
# Build OpenSSL from source for musl builds.
|
||||
[target.x86_64-unknown-linux-musl.dependencies]
|
||||
openssl-sys = { workspace = true, features = ["vendored"] }
|
||||
@@ -85,16 +90,18 @@ openssl-sys = { workspace = true, features = ["vendored"] }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
escargot = { workspace = true }
|
||||
maplit = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
tracing-test = { workspace = true, features = ["no-env-filter"] }
|
||||
walkdir = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
tracing-test = { workspace = true, features = ["no-env-filter"] }
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
ignored = ["openssl-sys"]
|
||||
|
||||
@@ -12,7 +12,7 @@ Expects `/usr/bin/sandbox-exec` to be present.
|
||||
|
||||
### Linux
|
||||
|
||||
Expects the binary containing `codex-core` to run the equivalent of `codex debug landlock` when `arg0` is `codex-linux-sandbox`. See the `codex-arg0` crate for details.
|
||||
Expects the binary containing `codex-core` to run the equivalent of `codex sandbox linux` (legacy alias: `codex debug landlock`) when `arg0` is `codex-linux-sandbox`. See the `codex-arg0` crate for details.
|
||||
|
||||
### All Platforms
|
||||
|
||||
|
||||
@@ -10,12 +10,14 @@ You are Codex, based on GPT-5. You are running as a coding agent in the Codex CL
|
||||
|
||||
- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them.
|
||||
- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare.
|
||||
- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase).
|
||||
- You may be in a dirty git worktree.
|
||||
* NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
|
||||
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
|
||||
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
|
||||
* If the changes are in unrelated files, just ignore them and don't revert them.
|
||||
- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
|
||||
- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user.
|
||||
|
||||
## Plan tool
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ pub(crate) enum InternalApplyPatchInvocation {
|
||||
DelegateToExec(ApplyPatchExec),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ApplyPatchExec {
|
||||
pub(crate) action: ApplyPatchAction,
|
||||
pub(crate) user_explicitly_approved_this_action: bool,
|
||||
@@ -109,3 +110,28 @@ pub(crate) fn convert_apply_patch_to_protocol(
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn convert_apply_patch_maps_add_variant() {
|
||||
let tmp = tempdir().expect("tmp");
|
||||
let p = tmp.path().join("a.txt");
|
||||
// Create an action with a single Add change
|
||||
let action = ApplyPatchAction::new_add_for_test(&p, "hello".to_string());
|
||||
|
||||
let got = convert_apply_patch_to_protocol(&action);
|
||||
|
||||
assert_eq!(
|
||||
got.get(&p),
|
||||
Some(&FileChange::Add {
|
||||
content: "hello".to_string()
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,6 +135,10 @@ impl CodexAuth {
|
||||
self.get_current_token_data().and_then(|t| t.account_id)
|
||||
}
|
||||
|
||||
pub fn get_account_email(&self) -> Option<String> {
|
||||
self.get_current_token_data().and_then(|t| t.id_token.email)
|
||||
}
|
||||
|
||||
pub(crate) fn get_plan_type(&self) -> Option<PlanType> {
|
||||
self.get_current_token_data()
|
||||
.and_then(|t| t.id_token.chatgpt_plan_type)
|
||||
|
||||
@@ -5,6 +5,8 @@ use crate::client_common::Prompt;
|
||||
use crate::client_common::ResponseEvent;
|
||||
use crate::client_common::ResponseStream;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::ConnectionFailedError;
|
||||
use crate::error::ResponseStreamFailed;
|
||||
use crate::error::Result;
|
||||
use crate::error::RetryLimitReachedError;
|
||||
use crate::error::UnexpectedResponseError;
|
||||
@@ -309,7 +311,12 @@ pub(crate) async fn stream_chat_completions(
|
||||
match res {
|
||||
Ok(resp) if resp.status().is_success() => {
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600);
|
||||
let stream = resp.bytes_stream().map_err(CodexErr::Reqwest);
|
||||
let stream = resp.bytes_stream().map_err(|e| {
|
||||
CodexErr::ResponseStreamFailed(ResponseStreamFailed {
|
||||
source: e,
|
||||
request_id: None,
|
||||
})
|
||||
});
|
||||
tokio::spawn(process_chat_sse(
|
||||
stream,
|
||||
tx_event,
|
||||
@@ -349,7 +356,9 @@ pub(crate) async fn stream_chat_completions(
|
||||
}
|
||||
Err(e) => {
|
||||
if attempt > max_retries {
|
||||
return Err(e.into());
|
||||
return Err(CodexErr::ConnectionFailed(ConnectionFailedError {
|
||||
source: e,
|
||||
}));
|
||||
}
|
||||
let delay = backoff(attempt);
|
||||
tokio::time::sleep(delay).await;
|
||||
@@ -389,10 +398,12 @@ async fn process_chat_sse<S>(
|
||||
let mut reasoning_text = String::new();
|
||||
|
||||
loop {
|
||||
let sse = match otel_event_manager
|
||||
.log_sse_event(|| timeout(idle_timeout, stream.next()))
|
||||
.await
|
||||
{
|
||||
let start = std::time::Instant::now();
|
||||
let response = timeout(idle_timeout, stream.next()).await;
|
||||
let duration = start.elapsed();
|
||||
otel_event_manager.log_sse_event(&response, duration);
|
||||
|
||||
let sse = match response {
|
||||
Ok(Some(Ok(ev))) => ev,
|
||||
Ok(Some(Err(e))) => {
|
||||
let _ = tx_event
|
||||
|
||||
@@ -5,6 +5,8 @@ use std::time::Duration;
|
||||
|
||||
use crate::AuthManager;
|
||||
use crate::auth::CodexAuth;
|
||||
use crate::error::ConnectionFailedError;
|
||||
use crate::error::ResponseStreamFailed;
|
||||
use crate::error::RetryLimitReachedError;
|
||||
use crate::error::UnexpectedResponseError;
|
||||
use bytes::Bytes;
|
||||
@@ -47,6 +49,7 @@ use crate::openai_tools::create_tools_json_for_responses_api;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::RateLimitWindow;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::state::TaskKind;
|
||||
use crate::token_data::PlanType;
|
||||
use crate::util::backoff;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
@@ -63,7 +66,6 @@ struct ErrorResponse {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Error {
|
||||
r#type: Option<String>,
|
||||
#[allow(dead_code)]
|
||||
code: Option<String>,
|
||||
message: Option<String>,
|
||||
|
||||
@@ -124,8 +126,16 @@ impl ModelClient {
|
||||
/// the provider config. Public callers always invoke `stream()` – the
|
||||
/// specialised helpers are private to avoid accidental misuse.
|
||||
pub async fn stream(&self, prompt: &Prompt) -> Result<ResponseStream> {
|
||||
self.stream_with_task_kind(prompt, TaskKind::Regular).await
|
||||
}
|
||||
|
||||
pub(crate) async fn stream_with_task_kind(
|
||||
&self,
|
||||
prompt: &Prompt,
|
||||
task_kind: TaskKind,
|
||||
) -> Result<ResponseStream> {
|
||||
match self.provider.wire_api {
|
||||
WireApi::Responses => self.stream_responses(prompt).await,
|
||||
WireApi::Responses => self.stream_responses(prompt, task_kind).await,
|
||||
WireApi::Chat => {
|
||||
// Create the raw streaming connection first.
|
||||
let response_stream = stream_chat_completions(
|
||||
@@ -166,7 +176,11 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
/// Implementation for the OpenAI *Responses* experimental API.
|
||||
async fn stream_responses(&self, prompt: &Prompt) -> Result<ResponseStream> {
|
||||
async fn stream_responses(
|
||||
&self,
|
||||
prompt: &Prompt,
|
||||
task_kind: TaskKind,
|
||||
) -> Result<ResponseStream> {
|
||||
if let Some(path) = &*CODEX_RS_SSE_FIXTURE {
|
||||
// short circuit for tests
|
||||
warn!(path, "Streaming from fixture");
|
||||
@@ -228,7 +242,7 @@ impl ModelClient {
|
||||
input: &input_with_instructions,
|
||||
tools: &tools_json,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
parallel_tool_calls: prompt.parallel_tool_calls,
|
||||
reasoning,
|
||||
store: azure_workaround,
|
||||
stream: true,
|
||||
@@ -245,7 +259,7 @@ impl ModelClient {
|
||||
let max_attempts = self.provider.request_max_retries();
|
||||
for attempt in 0..=max_attempts {
|
||||
match self
|
||||
.attempt_stream_responses(attempt, &payload_json, &auth_manager)
|
||||
.attempt_stream_responses(attempt, &payload_json, &auth_manager, task_kind)
|
||||
.await
|
||||
{
|
||||
Ok(stream) => {
|
||||
@@ -273,6 +287,7 @@ impl ModelClient {
|
||||
attempt: u64,
|
||||
payload_json: &Value,
|
||||
auth_manager: &Option<Arc<AuthManager>>,
|
||||
task_kind: TaskKind,
|
||||
) -> std::result::Result<ResponseStream, StreamAttemptError> {
|
||||
// Always fetch the latest auth in case a prior attempt refreshed the token.
|
||||
let auth = auth_manager.as_ref().and_then(|m| m.auth());
|
||||
@@ -295,6 +310,7 @@ impl ModelClient {
|
||||
.header("conversation_id", self.conversation_id.to_string())
|
||||
.header("session_id", self.conversation_id.to_string())
|
||||
.header(reqwest::header::ACCEPT, "text/event-stream")
|
||||
.header("Codex-Task-Type", task_kind.header_value())
|
||||
.json(payload_json);
|
||||
|
||||
if let Some(auth) = auth.as_ref()
|
||||
@@ -337,7 +353,12 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
// spawn task to process SSE
|
||||
let stream = resp.bytes_stream().map_err(CodexErr::Reqwest);
|
||||
let stream = resp.bytes_stream().map_err(move |e| {
|
||||
CodexErr::ResponseStreamFailed(ResponseStreamFailed {
|
||||
source: e,
|
||||
request_id: request_id.clone(),
|
||||
})
|
||||
});
|
||||
tokio::spawn(process_sse(
|
||||
stream,
|
||||
tx_event,
|
||||
@@ -417,7 +438,9 @@ impl ModelClient {
|
||||
request_id,
|
||||
})
|
||||
}
|
||||
Err(e) => Err(StreamAttemptError::RetryableTransportError(e.into())),
|
||||
Err(e) => Err(StreamAttemptError::RetryableTransportError(
|
||||
CodexErr::ConnectionFailed(ConnectionFailedError { source: e }),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -650,10 +673,12 @@ async fn process_sse<S>(
|
||||
let mut response_error: Option<CodexErr> = None;
|
||||
|
||||
loop {
|
||||
let sse = match otel_event_manager
|
||||
.log_sse_event(|| timeout(idle_timeout, stream.next()))
|
||||
.await
|
||||
{
|
||||
let start = std::time::Instant::now();
|
||||
let response = timeout(idle_timeout, stream.next()).await;
|
||||
let duration = start.elapsed();
|
||||
otel_event_manager.log_sse_event(&response, duration);
|
||||
|
||||
let sse = match response {
|
||||
Ok(Some(Ok(sse))) => sse,
|
||||
Ok(Some(Err(e))) => {
|
||||
debug!("SSE Error: {e:#}");
|
||||
@@ -794,9 +819,13 @@ async fn process_sse<S>(
|
||||
if let Some(error) = error {
|
||||
match serde_json::from_value::<Error>(error.clone()) {
|
||||
Ok(error) => {
|
||||
let delay = try_parse_retry_after(&error);
|
||||
let message = error.message.unwrap_or_default();
|
||||
response_error = Some(CodexErr::Stream(message, delay));
|
||||
if is_context_window_error(&error) {
|
||||
response_error = Some(CodexErr::ContextWindowExceeded);
|
||||
} else {
|
||||
let delay = try_parse_retry_after(&error);
|
||||
let message = error.message.clone().unwrap_or_default();
|
||||
response_error = Some(CodexErr::Stream(message, delay));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let error = format!("failed to parse ErrorResponse: {e}");
|
||||
@@ -922,9 +951,14 @@ fn try_parse_retry_after(err: &Error) -> Option<Duration> {
|
||||
None
|
||||
}
|
||||
|
||||
fn is_context_window_error(error: &Error) -> bool {
|
||||
error.code.as_deref() == Some("context_length_exceeded")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use assert_matches::assert_matches;
|
||||
use serde_json::json;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_test::io::Builder as IoBuilder;
|
||||
@@ -1005,6 +1039,7 @@ mod tests {
|
||||
"test",
|
||||
"test",
|
||||
None,
|
||||
Some("test@test.com".to_string()),
|
||||
Some(AuthMode::ChatGPT),
|
||||
false,
|
||||
"test".to_string(),
|
||||
@@ -1179,6 +1214,74 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn context_window_error_is_fatal() {
|
||||
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_5c66275b97b9baef1ed95550adb3b7ec13b17aafd1d2f11b","object":"response","created_at":1759510079,"status":"failed","background":false,"error":{"code":"context_length_exceeded","message":"Your input exceeds the context window of this model. Please adjust your input and try again."},"usage":null,"user":null,"metadata":{}}}"#;
|
||||
|
||||
let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
|
||||
let provider = ModelProviderInfo {
|
||||
name: "test".to_string(),
|
||||
base_url: Some("https://test.com".to_string()),
|
||||
env_key: Some("TEST_API_KEY".to_string()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
let otel_event_manager = otel_event_manager();
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()], provider, otel_event_manager).await;
|
||||
|
||||
assert_eq!(events.len(), 1);
|
||||
|
||||
match &events[0] {
|
||||
Err(err @ CodexErr::ContextWindowExceeded) => {
|
||||
assert_eq!(err.to_string(), CodexErr::ContextWindowExceeded.to_string());
|
||||
}
|
||||
other => panic!("unexpected context window event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn context_window_error_with_newline_is_fatal() {
|
||||
let raw_error = r#"{"type":"response.failed","sequence_number":4,"response":{"id":"resp_fatal_newline","object":"response","created_at":1759510080,"status":"failed","background":false,"error":{"code":"context_length_exceeded","message":"Your input exceeds the context window of this model. Please adjust your input and try\nagain."},"usage":null,"user":null,"metadata":{}}}"#;
|
||||
|
||||
let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
|
||||
let provider = ModelProviderInfo {
|
||||
name: "test".to_string(),
|
||||
base_url: Some("https://test.com".to_string()),
|
||||
env_key: Some("TEST_API_KEY".to_string()),
|
||||
env_key_instructions: None,
|
||||
wire_api: WireApi::Responses,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(1000),
|
||||
requires_openai_auth: false,
|
||||
};
|
||||
|
||||
let otel_event_manager = otel_event_manager();
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()], provider, otel_event_manager).await;
|
||||
|
||||
assert_eq!(events.len(), 1);
|
||||
|
||||
match &events[0] {
|
||||
Err(err @ CodexErr::ContextWindowExceeded) => {
|
||||
assert_eq!(err.to_string(), CodexErr::ContextWindowExceeded.to_string());
|
||||
}
|
||||
other => panic!("unexpected context window event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
// ────────────────────────────
|
||||
// Table-driven test from `main`
|
||||
// ────────────────────────────
|
||||
@@ -1316,10 +1419,7 @@ mod tests {
|
||||
let resp: ErrorResponse =
|
||||
serde_json::from_str(json).expect("should deserialize old schema");
|
||||
|
||||
assert!(matches!(
|
||||
resp.error.plan_type,
|
||||
Some(PlanType::Known(KnownPlan::Pro))
|
||||
));
|
||||
assert_matches!(resp.error.plan_type, Some(PlanType::Known(KnownPlan::Pro)));
|
||||
|
||||
let plan_json = serde_json::to_string(&resp.error.plan_type).expect("serialize plan_type");
|
||||
assert_eq!(plan_json, "\"pro\"");
|
||||
@@ -1334,7 +1434,7 @@ mod tests {
|
||||
let resp: ErrorResponse =
|
||||
serde_json::from_str(json).expect("should deserialize old schema");
|
||||
|
||||
assert!(matches!(resp.error.plan_type, Some(PlanType::Unknown(ref s)) if s == "vip"));
|
||||
assert_matches!(resp.error.plan_type, Some(PlanType::Unknown(ref s)) if s == "vip");
|
||||
|
||||
let plan_json = serde_json::to_string(&resp.error.plan_type).expect("serialize plan_type");
|
||||
assert_eq!(plan_json, "\"vip\"");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::TokenUsage;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
@@ -9,9 +9,11 @@ use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::config_types::Verbosity as VerbosityConfig;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use futures::Stream;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashSet;
|
||||
use std::ops::Deref;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
@@ -29,7 +31,10 @@ pub struct Prompt {
|
||||
|
||||
/// Tools available to the model, including additional tools sourced from
|
||||
/// external MCP servers.
|
||||
pub(crate) tools: Vec<OpenAiTool>,
|
||||
pub(crate) tools: Vec<ToolSpec>,
|
||||
|
||||
/// Whether parallel tool calls are permitted for this prompt.
|
||||
pub(crate) parallel_tool_calls: bool,
|
||||
|
||||
/// Optional override for the built-in BASE_INSTRUCTIONS.
|
||||
pub base_instructions_override: Option<String>,
|
||||
@@ -49,8 +54,8 @@ impl Prompt {
|
||||
// AND
|
||||
// - there is no apply_patch tool present
|
||||
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
|
||||
OpenAiTool::Function(f) => f.name == "apply_patch",
|
||||
OpenAiTool::Freeform(f) => f.name == "apply_patch",
|
||||
ToolSpec::Function(f) => f.name == "apply_patch",
|
||||
ToolSpec::Freeform(f) => f.name == "apply_patch",
|
||||
_ => false,
|
||||
});
|
||||
if self.base_instructions_override.is_none()
|
||||
@@ -64,10 +69,125 @@ impl Prompt {
|
||||
}
|
||||
|
||||
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
|
||||
self.input.clone()
|
||||
let mut input = self.input.clone();
|
||||
|
||||
// when using the *Freeform* apply_patch tool specifically, tool outputs
|
||||
// should be structured text, not json. Do NOT reserialize when using
|
||||
// the Function tool - note that this differs from the check above for
|
||||
// instructions. We declare the result as a named variable for clarity.
|
||||
let is_freeform_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
|
||||
ToolSpec::Freeform(f) => f.name == "apply_patch",
|
||||
_ => false,
|
||||
});
|
||||
if is_freeform_apply_patch_tool_present {
|
||||
reserialize_shell_outputs(&mut input);
|
||||
}
|
||||
|
||||
input
|
||||
}
|
||||
}
|
||||
|
||||
fn reserialize_shell_outputs(items: &mut [ResponseItem]) {
|
||||
let mut shell_call_ids: HashSet<String> = HashSet::new();
|
||||
|
||||
items.iter_mut().for_each(|item| match item {
|
||||
ResponseItem::LocalShellCall { call_id, id, .. } => {
|
||||
if let Some(identifier) = call_id.clone().or_else(|| id.clone()) {
|
||||
shell_call_ids.insert(identifier);
|
||||
}
|
||||
}
|
||||
ResponseItem::CustomToolCall {
|
||||
id: _,
|
||||
status: _,
|
||||
call_id,
|
||||
name,
|
||||
input: _,
|
||||
} => {
|
||||
if name == "apply_patch" {
|
||||
shell_call_ids.insert(call_id.clone());
|
||||
}
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { call_id, output } => {
|
||||
if shell_call_ids.remove(call_id)
|
||||
&& let Some(structured) = parse_structured_shell_output(output)
|
||||
{
|
||||
*output = structured
|
||||
}
|
||||
}
|
||||
ResponseItem::FunctionCall { name, call_id, .. }
|
||||
if is_shell_tool_name(name) || name == "apply_patch" =>
|
||||
{
|
||||
shell_call_ids.insert(call_id.clone());
|
||||
}
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
if shell_call_ids.remove(call_id)
|
||||
&& let Some(structured) = parse_structured_shell_output(&output.content)
|
||||
{
|
||||
output.content = structured
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
})
|
||||
}
|
||||
|
||||
fn is_shell_tool_name(name: &str) -> bool {
|
||||
matches!(name, "shell" | "container.exec")
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ExecOutputJson {
|
||||
output: String,
|
||||
metadata: ExecOutputMetadataJson,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ExecOutputMetadataJson {
|
||||
exit_code: i32,
|
||||
duration_seconds: f32,
|
||||
}
|
||||
|
||||
fn parse_structured_shell_output(raw: &str) -> Option<String> {
|
||||
let parsed: ExecOutputJson = serde_json::from_str(raw).ok()?;
|
||||
Some(build_structured_output(&parsed))
|
||||
}
|
||||
|
||||
fn build_structured_output(parsed: &ExecOutputJson) -> String {
|
||||
let mut sections = Vec::new();
|
||||
sections.push(format!("Exit code: {}", parsed.metadata.exit_code));
|
||||
sections.push(format!(
|
||||
"Wall time: {} seconds",
|
||||
parsed.metadata.duration_seconds
|
||||
));
|
||||
|
||||
let mut output = parsed.output.clone();
|
||||
if let Some(total_lines) = extract_total_output_lines(&parsed.output) {
|
||||
sections.push(format!("Total output lines: {total_lines}"));
|
||||
if let Some(stripped) = strip_total_output_header(&output) {
|
||||
output = stripped.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
sections.push("Output:".to_string());
|
||||
sections.push(output);
|
||||
|
||||
sections.join("\n")
|
||||
}
|
||||
|
||||
fn extract_total_output_lines(output: &str) -> Option<u32> {
|
||||
let marker_start = output.find("[... omitted ")?;
|
||||
let marker = &output[marker_start..];
|
||||
let (_, after_of) = marker.split_once(" of ")?;
|
||||
let (total_segment, _) = after_of.split_once(' ')?;
|
||||
total_segment.parse::<u32>().ok()
|
||||
}
|
||||
|
||||
fn strip_total_output_header(output: &str) -> Option<&str> {
|
||||
let after_prefix = output.strip_prefix("Total output lines: ")?;
|
||||
let (_, remainder) = after_prefix.split_once('\n')?;
|
||||
let remainder = remainder.strip_prefix('\n').unwrap_or(remainder);
|
||||
Some(remainder)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ResponseEvent {
|
||||
Created,
|
||||
@@ -160,6 +280,65 @@ pub(crate) struct ResponsesApiRequest<'a> {
|
||||
pub(crate) text: Option<TextControls>,
|
||||
}
|
||||
|
||||
pub(crate) mod tools {
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
/// When serialized as JSON, this produces a valid "Tool" in the OpenAI
|
||||
/// Responses API.
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
#[serde(tag = "type")]
|
||||
pub(crate) enum ToolSpec {
|
||||
#[serde(rename = "function")]
|
||||
Function(ResponsesApiTool),
|
||||
#[serde(rename = "local_shell")]
|
||||
LocalShell {},
|
||||
// TODO: Understand why we get an error on web_search although the API docs say it's supported.
|
||||
// https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#:~:text=%7B%20type%3A%20%22web_search%22%20%7D%2C
|
||||
#[serde(rename = "web_search")]
|
||||
WebSearch {},
|
||||
#[serde(rename = "custom")]
|
||||
Freeform(FreeformTool),
|
||||
}
|
||||
|
||||
impl ToolSpec {
|
||||
pub(crate) fn name(&self) -> &str {
|
||||
match self {
|
||||
ToolSpec::Function(tool) => tool.name.as_str(),
|
||||
ToolSpec::LocalShell {} => "local_shell",
|
||||
ToolSpec::WebSearch {} => "web_search",
|
||||
ToolSpec::Freeform(tool) => tool.name.as_str(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformTool {
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
pub(crate) format: FreeformToolFormat,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformToolFormat {
|
||||
pub(crate) r#type: String,
|
||||
pub(crate) syntax: String,
|
||||
pub(crate) definition: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
pub struct ResponsesApiTool {
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
/// TODO: Validation. When strict is set to true, the JSON schema,
|
||||
/// `required` and `additional_properties` must be present. All fields in
|
||||
/// `properties` must be present in `required`.
|
||||
pub(crate) strict: bool,
|
||||
pub(crate) parameters: JsonSchema,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create_reasoning_param_for_request(
|
||||
model_family: &ModelFamily,
|
||||
effort: Option<ReasoningEffortConfig>,
|
||||
@@ -279,7 +458,7 @@ mod tests {
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
parallel_tool_calls: true,
|
||||
reasoning: None,
|
||||
store: false,
|
||||
stream: true,
|
||||
@@ -320,7 +499,7 @@ mod tests {
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
parallel_tool_calls: true,
|
||||
reasoning: None,
|
||||
store: false,
|
||||
stream: true,
|
||||
@@ -356,7 +535,7 @@ mod tests {
|
||||
input: &input,
|
||||
tools: &tools,
|
||||
tool_choice: "auto",
|
||||
parallel_tool_calls: false,
|
||||
parallel_tool_calls: true,
|
||||
reasoning: None,
|
||||
store: false,
|
||||
stream: true,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -16,6 +16,7 @@ use crate::protocol::InputItem;
|
||||
use crate::protocol::InputMessageKind;
|
||||
use crate::protocol::TaskStartedEvent;
|
||||
use crate::protocol::TurnContextItem;
|
||||
use crate::state::TaskKind;
|
||||
use crate::truncate::truncate_middle;
|
||||
use crate::util::backoff;
|
||||
use askama::Template;
|
||||
@@ -70,14 +71,10 @@ async fn run_compact_task_inner(
|
||||
input: Vec<InputItem>,
|
||||
) {
|
||||
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
|
||||
let turn_input = sess
|
||||
let mut turn_input = sess
|
||||
.turn_input_with_history(vec![initial_input_for_turn.clone().into()])
|
||||
.await;
|
||||
|
||||
let prompt = Prompt {
|
||||
input: turn_input,
|
||||
..Default::default()
|
||||
};
|
||||
let mut truncated_count = 0usize;
|
||||
|
||||
let max_retries = turn_context.client.get_provider().stream_max_retries();
|
||||
let mut retries = 0;
|
||||
@@ -93,25 +90,54 @@ async fn run_compact_task_inner(
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
loop {
|
||||
let prompt = Prompt {
|
||||
input: turn_input.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let attempt_result =
|
||||
drain_to_completed(&sess, turn_context.as_ref(), &sub_id, &prompt).await;
|
||||
|
||||
match attempt_result {
|
||||
Ok(()) => {
|
||||
if truncated_count > 0 {
|
||||
sess.notify_background_event(
|
||||
&sub_id,
|
||||
format!(
|
||||
"Trimmed {truncated_count} older conversation item(s) before compacting so the prompt fits the model context window."
|
||||
),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
break;
|
||||
}
|
||||
Err(CodexErr::Interrupted) => {
|
||||
return;
|
||||
}
|
||||
Err(e @ CodexErr::ContextWindowExceeded) => {
|
||||
if turn_input.len() > 1 {
|
||||
turn_input.remove(0);
|
||||
truncated_count += 1;
|
||||
retries = 0;
|
||||
continue;
|
||||
}
|
||||
sess.set_total_tokens_full(&sub_id, turn_context.as_ref())
|
||||
.await;
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::Error(ErrorEvent {
|
||||
message: e.to_string(),
|
||||
}),
|
||||
};
|
||||
sess.send_event(event).await;
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
if retries < max_retries {
|
||||
retries += 1;
|
||||
let delay = backoff(retries);
|
||||
sess.notify_stream_error(
|
||||
&sub_id,
|
||||
format!(
|
||||
"stream error: {e}; retrying {retries}/{max_retries} in {delay:?}…"
|
||||
),
|
||||
format!("Re-connecting... {retries}/{max_retries}"),
|
||||
)
|
||||
.await;
|
||||
tokio::time::sleep(delay).await;
|
||||
@@ -233,7 +259,11 @@ async fn drain_to_completed(
|
||||
sub_id: &str,
|
||||
prompt: &Prompt,
|
||||
) -> CodexResult<()> {
|
||||
let mut stream = turn_context.client.clone().stream(prompt).await?;
|
||||
let mut stream = turn_context
|
||||
.client
|
||||
.clone()
|
||||
.stream_with_task_kind(prompt, TaskKind::Compact)
|
||||
.await?;
|
||||
loop {
|
||||
let maybe_event = stream.next().await;
|
||||
let Some(event) = maybe_event else {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
118
codex-rs/core/src/config_loader/macos.rs
Normal file
118
codex-rs/core/src/config_loader/macos.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::io;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod native {
|
||||
use super::*;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
use core_foundation::string::CFString;
|
||||
use core_foundation::string::CFStringRef;
|
||||
use std::ffi::c_void;
|
||||
use tokio::task;
|
||||
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
return if trimmed.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
};
|
||||
}
|
||||
|
||||
const LOAD_ERROR: &str = "Failed to load managed preferences configuration";
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_config).await {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed preferences load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed preferences load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other(LOAD_ERROR))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn load_managed_admin_config() -> io::Result<Option<TomlValue>> {
|
||||
#[link(name = "CoreFoundation", kind = "framework")]
|
||||
unsafe extern "C" {
|
||||
fn CFPreferencesCopyAppValue(
|
||||
key: CFStringRef,
|
||||
application_id: CFStringRef,
|
||||
) -> *mut c_void;
|
||||
}
|
||||
|
||||
const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex";
|
||||
const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
|
||||
let application_id = CFString::new(MANAGED_PREFERENCES_APPLICATION_ID);
|
||||
let key = CFString::new(MANAGED_PREFERENCES_CONFIG_KEY);
|
||||
|
||||
let value_ref = unsafe {
|
||||
CFPreferencesCopyAppValue(
|
||||
key.as_concrete_TypeRef(),
|
||||
application_id.as_concrete_TypeRef(),
|
||||
)
|
||||
};
|
||||
|
||||
if value_ref.is_null() {
|
||||
tracing::debug!(
|
||||
"Managed preferences for {} key {} not found",
|
||||
MANAGED_PREFERENCES_APPLICATION_ID,
|
||||
MANAGED_PREFERENCES_CONFIG_KEY
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) };
|
||||
let contents = value.to_string();
|
||||
let trimmed = contents.trim();
|
||||
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
}
|
||||
|
||||
pub(super) fn parse_managed_preferences_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
let decoded = BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed preferences as base64: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
let decoded_str = String::from_utf8(decoded).map_err(|err| {
|
||||
tracing::error!("Managed preferences base64 contents were not valid UTF-8: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
match toml::from_str::<TomlValue>(&decoded_str) {
|
||||
Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)),
|
||||
Ok(other) => {
|
||||
tracing::error!(
|
||||
"Managed preferences TOML must have a table at the root, found {other:?}",
|
||||
);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"managed preferences root must be a table",
|
||||
))
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse managed preferences TOML: {err}");
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
pub(crate) use native::load_managed_admin_config_layer;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
_override_base64: Option<&str>,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
Ok(None)
|
||||
}
|
||||
311
codex-rs/core/src/config_loader/mod.rs
Normal file
311
codex-rs/core/src/config_loader/mod.rs
Normal file
@@ -0,0 +1,311 @@
|
||||
mod macos;
|
||||
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use macos::load_managed_admin_config_layer;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[cfg(unix)]
|
||||
const CODEX_MANAGED_CONFIG_SYSTEM_PATH: &str = "/etc/codex/managed_config.toml";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct LoadedConfigLayers {
|
||||
pub base: TomlValue,
|
||||
pub managed_config: Option<TomlValue>,
|
||||
pub managed_preferences: Option<TomlValue>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct LoaderOverrides {
|
||||
pub managed_config_path: Option<PathBuf>,
|
||||
#[cfg(target_os = "macos")]
|
||||
pub managed_preferences_base64: Option<String>,
|
||||
}
|
||||
|
||||
// Configuration layering pipeline (top overrides bottom):
|
||||
//
|
||||
// +-------------------------+
|
||||
// | Managed preferences (*) |
|
||||
// +-------------------------+
|
||||
// ^
|
||||
// |
|
||||
// +-------------------------+
|
||||
// | managed_config.toml |
|
||||
// +-------------------------+
|
||||
// ^
|
||||
// |
|
||||
// +-------------------------+
|
||||
// | config.toml (base) |
|
||||
// +-------------------------+
|
||||
//
|
||||
// (*) Only available on macOS via managed device profiles.
|
||||
|
||||
pub async fn load_config_as_toml(codex_home: &Path) -> io::Result<TomlValue> {
|
||||
load_config_as_toml_with_overrides(codex_home, LoaderOverrides::default()).await
|
||||
}
|
||||
|
||||
fn default_empty_table() -> TomlValue {
|
||||
TomlValue::Table(Default::default())
|
||||
}
|
||||
|
||||
pub(crate) async fn load_config_layers_with_overrides(
|
||||
codex_home: &Path,
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<LoadedConfigLayers> {
|
||||
load_config_layers_internal(codex_home, overrides).await
|
||||
}
|
||||
|
||||
async fn load_config_as_toml_with_overrides(
|
||||
codex_home: &Path,
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<TomlValue> {
|
||||
let layers = load_config_layers_internal(codex_home, overrides).await?;
|
||||
Ok(apply_managed_layers(layers))
|
||||
}
|
||||
|
||||
async fn load_config_layers_internal(
|
||||
codex_home: &Path,
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<LoadedConfigLayers> {
|
||||
#[cfg(target_os = "macos")]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
managed_preferences_base64,
|
||||
} = overrides;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
} = overrides;
|
||||
|
||||
let managed_config_path =
|
||||
managed_config_path.unwrap_or_else(|| managed_config_default_path(codex_home));
|
||||
|
||||
let user_config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
let user_config = read_config_from_path(&user_config_path, true).await?;
|
||||
let managed_config = read_config_from_path(&managed_config_path, false).await?;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
let managed_preferences =
|
||||
load_managed_admin_config_layer(managed_preferences_base64.as_deref()).await?;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let managed_preferences = load_managed_admin_config_layer(None).await?;
|
||||
|
||||
Ok(LoadedConfigLayers {
|
||||
base: user_config.unwrap_or_else(default_empty_table),
|
||||
managed_config,
|
||||
managed_preferences,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read_config_from_path(
|
||||
path: &Path,
|
||||
log_missing_as_info: bool,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
match fs::read_to_string(path).await {
|
||||
Ok(contents) => match toml::from_str::<TomlValue>(&contents) {
|
||||
Ok(value) => Ok(Some(value)),
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse {}: {err}", path.display());
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
},
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
||||
if log_missing_as_info {
|
||||
tracing::info!("{} not found, using defaults", path.display());
|
||||
} else {
|
||||
tracing::debug!("{} not found", path.display());
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to read {}: {err}", path.display());
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge config `overlay` into `base`, giving `overlay` precedence.
|
||||
pub(crate) fn merge_toml_values(base: &mut TomlValue, overlay: &TomlValue) {
|
||||
if let TomlValue::Table(overlay_table) = overlay
|
||||
&& let TomlValue::Table(base_table) = base
|
||||
{
|
||||
for (key, value) in overlay_table {
|
||||
if let Some(existing) = base_table.get_mut(key) {
|
||||
merge_toml_values(existing, value);
|
||||
} else {
|
||||
base_table.insert(key.clone(), value.clone());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
*base = overlay.clone();
|
||||
}
|
||||
}
|
||||
|
||||
fn managed_config_default_path(codex_home: &Path) -> PathBuf {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = codex_home;
|
||||
PathBuf::from(CODEX_MANAGED_CONFIG_SYSTEM_PATH)
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
codex_home.join("managed_config.toml")
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_managed_layers(layers: LoadedConfigLayers) -> TomlValue {
|
||||
let LoadedConfigLayers {
|
||||
mut base,
|
||||
managed_config,
|
||||
managed_preferences,
|
||||
} = layers;
|
||||
|
||||
for overlay in [managed_config, managed_preferences].into_iter().flatten() {
|
||||
merge_toml_values(&mut base, &overlay);
|
||||
}
|
||||
|
||||
base
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn merges_managed_config_layer_on_top() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
std::fs::write(
|
||||
tmp.path().join(CONFIG_TOML_FILE),
|
||||
r#"foo = 1
|
||||
|
||||
[nested]
|
||||
value = "base"
|
||||
"#,
|
||||
)
|
||||
.expect("write base");
|
||||
std::fs::write(
|
||||
&managed_path,
|
||||
r#"foo = 2
|
||||
|
||||
[nested]
|
||||
value = "managed_config"
|
||||
extra = true
|
||||
"#,
|
||||
)
|
||||
.expect("write managed config");
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
};
|
||||
|
||||
let loaded = load_config_as_toml_with_overrides(tmp.path(), overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let table = loaded.as_table().expect("top-level table expected");
|
||||
|
||||
assert_eq!(table.get("foo"), Some(&TomlValue::Integer(2)));
|
||||
let nested = table
|
||||
.get("nested")
|
||||
.and_then(|v| v.as_table())
|
||||
.expect("nested");
|
||||
assert_eq!(
|
||||
nested.get("value"),
|
||||
Some(&TomlValue::String("managed_config".to_string()))
|
||||
);
|
||||
assert_eq!(nested.get("extra"), Some(&TomlValue::Boolean(true)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn returns_empty_when_all_layers_missing() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
};
|
||||
|
||||
let layers = load_config_layers_with_overrides(tmp.path(), overrides)
|
||||
.await
|
||||
.expect("load layers");
|
||||
let base_table = layers.base.as_table().expect("base table expected");
|
||||
assert!(
|
||||
base_table.is_empty(),
|
||||
"expected empty base layer when configs missing"
|
||||
);
|
||||
assert!(
|
||||
layers.managed_config.is_none(),
|
||||
"managed config layer should be absent when file missing"
|
||||
);
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
{
|
||||
let loaded = load_config_as_toml(tmp.path()).await.expect("load config");
|
||||
let table = loaded.as_table().expect("top-level table expected");
|
||||
assert!(
|
||||
table.is_empty(),
|
||||
"expected empty table when configs missing"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_take_highest_precedence() {
|
||||
use base64::Engine;
|
||||
|
||||
let managed_payload = r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#;
|
||||
let encoded = base64::prelude::BASE64_STANDARD.encode(managed_payload.as_bytes());
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
std::fs::write(
|
||||
tmp.path().join(CONFIG_TOML_FILE),
|
||||
r#"[nested]
|
||||
value = "base"
|
||||
"#,
|
||||
)
|
||||
.expect("write base");
|
||||
std::fs::write(
|
||||
&managed_path,
|
||||
r#"[nested]
|
||||
value = "managed_config"
|
||||
flag = true
|
||||
"#,
|
||||
)
|
||||
.expect("write managed config");
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(encoded),
|
||||
};
|
||||
|
||||
let loaded = load_config_as_toml_with_overrides(tmp.path(), overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let nested = loaded
|
||||
.get("nested")
|
||||
.and_then(|v| v.as_table())
|
||||
.expect("nested table");
|
||||
assert_eq!(
|
||||
nested.get("value"),
|
||||
Some(&TomlValue::String("managed".to_string()))
|
||||
);
|
||||
assert_eq!(nested.get("flag"), Some(&TomlValue::Boolean(false)));
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,18 @@ pub struct ConfigProfile {
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
pub experimental_instructions_file: Option<PathBuf>,
|
||||
pub include_plan_tool: Option<bool>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
/// Optional feature toggles scoped to this profile.
|
||||
#[serde(default)]
|
||||
pub features: Option<crate::features::FeaturesToml>,
|
||||
}
|
||||
|
||||
impl From<ConfigProfile> for codex_app_server_protocol::Profile {
|
||||
|
||||
@@ -20,6 +20,10 @@ pub struct McpServerConfig {
|
||||
#[serde(flatten)]
|
||||
pub transport: McpServerTransportConfig,
|
||||
|
||||
/// When `false`, Codex skips initializing this MCP server.
|
||||
#[serde(default = "default_enabled")]
|
||||
pub enabled: bool,
|
||||
|
||||
/// Startup timeout in seconds for initializing MCP server & initially listing tools.
|
||||
#[serde(
|
||||
default,
|
||||
@@ -40,21 +44,34 @@ impl<'de> Deserialize<'de> for McpServerConfig {
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
struct RawMcpServerConfig {
|
||||
// stdio
|
||||
command: Option<String>,
|
||||
#[serde(default)]
|
||||
args: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
env: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
env_vars: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
cwd: Option<PathBuf>,
|
||||
http_headers: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
env_http_headers: Option<HashMap<String, String>>,
|
||||
|
||||
// streamable_http
|
||||
url: Option<String>,
|
||||
bearer_token: Option<String>,
|
||||
bearer_token_env_var: Option<String>,
|
||||
|
||||
// shared
|
||||
#[serde(default)]
|
||||
startup_timeout_sec: Option<f64>,
|
||||
#[serde(default)]
|
||||
startup_timeout_ms: Option<u64>,
|
||||
#[serde(default, with = "option_duration_secs")]
|
||||
tool_timeout_sec: Option<Duration>,
|
||||
#[serde(default)]
|
||||
enabled: Option<bool>,
|
||||
}
|
||||
|
||||
let raw = RawMcpServerConfig::deserialize(deserializer)?;
|
||||
@@ -85,30 +102,58 @@ impl<'de> Deserialize<'de> for McpServerConfig {
|
||||
command: Some(command),
|
||||
args,
|
||||
env,
|
||||
env_vars,
|
||||
cwd,
|
||||
url,
|
||||
bearer_token,
|
||||
bearer_token_env_var,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
..
|
||||
} => {
|
||||
throw_if_set("stdio", "url", url.as_ref())?;
|
||||
throw_if_set("stdio", "bearer_token", bearer_token.as_ref())?;
|
||||
throw_if_set(
|
||||
"stdio",
|
||||
"bearer_token_env_var",
|
||||
bearer_token_env_var.as_ref(),
|
||||
)?;
|
||||
throw_if_set("stdio", "http_headers", http_headers.as_ref())?;
|
||||
throw_if_set("stdio", "env_http_headers", env_http_headers.as_ref())?;
|
||||
McpServerTransportConfig::Stdio {
|
||||
command,
|
||||
args: args.unwrap_or_default(),
|
||||
env,
|
||||
env_vars: env_vars.unwrap_or_default(),
|
||||
cwd,
|
||||
}
|
||||
}
|
||||
RawMcpServerConfig {
|
||||
url: Some(url),
|
||||
bearer_token,
|
||||
bearer_token_env_var,
|
||||
command,
|
||||
args,
|
||||
env,
|
||||
..
|
||||
env_vars,
|
||||
cwd,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
startup_timeout_sec: _,
|
||||
tool_timeout_sec: _,
|
||||
startup_timeout_ms: _,
|
||||
enabled: _,
|
||||
} => {
|
||||
throw_if_set("streamable_http", "command", command.as_ref())?;
|
||||
throw_if_set("streamable_http", "args", args.as_ref())?;
|
||||
throw_if_set("streamable_http", "env", env.as_ref())?;
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token }
|
||||
throw_if_set("streamable_http", "env_vars", env_vars.as_ref())?;
|
||||
throw_if_set("streamable_http", "cwd", cwd.as_ref())?;
|
||||
throw_if_set("streamable_http", "bearer_token", bearer_token.as_ref())?;
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
bearer_token_env_var,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
}
|
||||
}
|
||||
_ => return Err(SerdeError::custom("invalid transport")),
|
||||
};
|
||||
@@ -117,10 +162,15 @@ impl<'de> Deserialize<'de> for McpServerConfig {
|
||||
transport,
|
||||
startup_timeout_sec,
|
||||
tool_timeout_sec: raw.tool_timeout_sec,
|
||||
enabled: raw.enabled.unwrap_or_else(default_enabled),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const fn default_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(untagged, deny_unknown_fields, rename_all = "snake_case")]
|
||||
pub enum McpServerTransportConfig {
|
||||
@@ -131,15 +181,25 @@ pub enum McpServerTransportConfig {
|
||||
args: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
env: Option<HashMap<String, String>>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
env_vars: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
cwd: Option<PathBuf>,
|
||||
},
|
||||
/// https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#streamable-http
|
||||
StreamableHttp {
|
||||
url: String,
|
||||
/// A plain text bearer token to use for authentication.
|
||||
/// This bearer token will be included in the HTTP request header as an `Authorization: Bearer <token>` header.
|
||||
/// This should be used with caution because it lives on disk in clear text.
|
||||
/// Name of the environment variable to read for an HTTP bearer token.
|
||||
/// When set, requests will include the token via `Authorization: Bearer <token>`.
|
||||
/// The actual secret value must be provided via the environment.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
bearer_token: Option<String>,
|
||||
bearer_token_env_var: Option<String>,
|
||||
/// Additional HTTP headers to include in requests to this server.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
http_headers: Option<HashMap<String, String>>,
|
||||
/// HTTP headers where the value is sourced from an environment variable.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
env_http_headers: Option<HashMap<String, String>>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -301,6 +361,20 @@ pub struct Tui {
|
||||
pub notifications: Notifications,
|
||||
}
|
||||
|
||||
/// Settings for notices we display to users via the tui and app-server clients
|
||||
/// (primarily the Codex IDE extension). NOTE: these are different from
|
||||
/// notifications - notices are warnings, NUX screens, acknowledgements, etc.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Notice {
|
||||
/// Tracks whether the user has acknowledged the full access warning prompt.
|
||||
pub hide_full_access_warning: Option<bool>,
|
||||
}
|
||||
|
||||
impl Notice {
|
||||
/// used by set_hide_full_access_warning until we refactor config updates
|
||||
pub(crate) const TABLE_KEY: &'static str = "notice";
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
@@ -447,9 +521,12 @@ mod tests {
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec![],
|
||||
env: None
|
||||
env: None,
|
||||
env_vars: Vec::new(),
|
||||
cwd: None,
|
||||
}
|
||||
);
|
||||
assert!(cfg.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -467,9 +544,12 @@ mod tests {
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec!["hello".to_string(), "world".to_string()],
|
||||
env: None
|
||||
env: None,
|
||||
env_vars: Vec::new(),
|
||||
cwd: None,
|
||||
}
|
||||
);
|
||||
assert!(cfg.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -488,9 +568,69 @@ mod tests {
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec!["hello".to_string(), "world".to_string()],
|
||||
env: Some(HashMap::from([("FOO".to_string(), "BAR".to_string())]))
|
||||
env: Some(HashMap::from([("FOO".to_string(), "BAR".to_string())])),
|
||||
env_vars: Vec::new(),
|
||||
cwd: None,
|
||||
}
|
||||
);
|
||||
assert!(cfg.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_stdio_command_server_config_with_env_vars() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
command = "echo"
|
||||
env_vars = ["FOO", "BAR"]
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize command config with env_vars");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec![],
|
||||
env: None,
|
||||
env_vars: vec!["FOO".to_string(), "BAR".to_string()],
|
||||
cwd: None,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_stdio_command_server_config_with_cwd() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
command = "echo"
|
||||
cwd = "/tmp"
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize command config with cwd");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::Stdio {
|
||||
command: "echo".to_string(),
|
||||
args: vec![],
|
||||
env: None,
|
||||
env_vars: Vec::new(),
|
||||
cwd: Some(PathBuf::from("/tmp")),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_disabled_server_config() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
command = "echo"
|
||||
enabled = false
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize disabled server config");
|
||||
|
||||
assert!(!cfg.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -506,17 +646,20 @@ mod tests {
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
bearer_token: None
|
||||
bearer_token_env_var: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
}
|
||||
);
|
||||
assert!(cfg.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_streamable_http_server_config_with_bearer_token() {
|
||||
fn deserialize_streamable_http_server_config_with_env_var() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
url = "https://example.com/mcp"
|
||||
bearer_token = "secret"
|
||||
bearer_token_env_var = "GITHUB_TOKEN"
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize http config");
|
||||
@@ -525,7 +668,35 @@ mod tests {
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
bearer_token: Some("secret".to_string())
|
||||
bearer_token_env_var: Some("GITHUB_TOKEN".to_string()),
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
}
|
||||
);
|
||||
assert!(cfg.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_streamable_http_server_config_with_headers() {
|
||||
let cfg: McpServerConfig = toml::from_str(
|
||||
r#"
|
||||
url = "https://example.com/mcp"
|
||||
http_headers = { "X-Foo" = "bar" }
|
||||
env_http_headers = { "X-Token" = "TOKEN_ENV" }
|
||||
"#,
|
||||
)
|
||||
.expect("should deserialize http config with headers");
|
||||
|
||||
assert_eq!(
|
||||
cfg.transport,
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
bearer_token_env_var: None,
|
||||
http_headers: Some(HashMap::from([("X-Foo".to_string(), "bar".to_string())])),
|
||||
env_http_headers: Some(HashMap::from([(
|
||||
"X-Token".to_string(),
|
||||
"TOKEN_ENV".to_string()
|
||||
)])),
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -553,13 +724,37 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_rejects_bearer_token_for_stdio_transport() {
|
||||
fn deserialize_rejects_headers_for_stdio() {
|
||||
toml::from_str::<McpServerConfig>(
|
||||
r#"
|
||||
command = "echo"
|
||||
http_headers = { "X-Foo" = "bar" }
|
||||
"#,
|
||||
)
|
||||
.expect_err("should reject http_headers for stdio transport");
|
||||
|
||||
toml::from_str::<McpServerConfig>(
|
||||
r#"
|
||||
command = "echo"
|
||||
env_http_headers = { "X-Foo" = "BAR_ENV" }
|
||||
"#,
|
||||
)
|
||||
.expect_err("should reject env_http_headers for stdio transport");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_rejects_inline_bearer_token_field() {
|
||||
let err = toml::from_str::<McpServerConfig>(
|
||||
r#"
|
||||
url = "https://example.com"
|
||||
bearer_token = "secret"
|
||||
"#,
|
||||
)
|
||||
.expect_err("should reject bearer token for stdio transport");
|
||||
.expect_err("should reject bearer_token field");
|
||||
|
||||
assert!(
|
||||
err.to_string().contains("bearer_token is not supported"),
|
||||
"unexpected error: {err}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,10 +7,16 @@ use crate::codex::compact::content_items_to_text;
|
||||
use crate::codex::compact::is_session_prefix_message;
|
||||
use crate::codex_conversation::CodexConversation;
|
||||
use crate::config::Config;
|
||||
use crate::cross_session::CrossSessionError;
|
||||
use crate::cross_session::CrossSessionHub;
|
||||
use crate::cross_session::RegisteredSession;
|
||||
use crate::cross_session::SessionDefaults;
|
||||
use crate::cross_session::SessionRegistration;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::Op;
|
||||
use crate::protocol::SessionConfiguredEvent;
|
||||
use crate::rollout::RolloutRecorder;
|
||||
use codex_protocol::ConversationId;
|
||||
@@ -22,6 +28,7 @@ use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::warn;
|
||||
|
||||
/// Represents a newly created Codex conversation, including the first event
|
||||
/// (which is [`EventMsg::SessionConfigured`]).
|
||||
@@ -31,10 +38,17 @@ pub struct NewConversation {
|
||||
pub session_configured: SessionConfiguredEvent,
|
||||
}
|
||||
|
||||
pub struct CrossSessionSpawnParams {
|
||||
pub hub: Arc<CrossSessionHub>,
|
||||
pub run_id: Option<String>,
|
||||
pub role: Option<String>,
|
||||
}
|
||||
|
||||
/// [`ConversationManager`] is responsible for creating conversations and
|
||||
/// maintaining them in memory.
|
||||
pub struct ConversationManager {
|
||||
conversations: Arc<RwLock<HashMap<ConversationId, Arc<CodexConversation>>>>,
|
||||
cross_session_registrations: Arc<RwLock<HashMap<ConversationId, RegisteredSession>>>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
session_source: SessionSource,
|
||||
}
|
||||
@@ -43,6 +57,7 @@ impl ConversationManager {
|
||||
pub fn new(auth_manager: Arc<AuthManager>, session_source: SessionSource) -> Self {
|
||||
Self {
|
||||
conversations: Arc::new(RwLock::new(HashMap::new())),
|
||||
cross_session_registrations: Arc::new(RwLock::new(HashMap::new())),
|
||||
auth_manager,
|
||||
session_source,
|
||||
}
|
||||
@@ -58,26 +73,104 @@ impl ConversationManager {
|
||||
}
|
||||
|
||||
pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> {
|
||||
self.spawn_conversation(config, self.auth_manager.clone())
|
||||
.await
|
||||
self.spawn_conversation_with_history(
|
||||
config,
|
||||
self.auth_manager.clone(),
|
||||
InitialHistory::New,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn spawn_conversation(
|
||||
pub async fn new_conversation_with_cross_session(
|
||||
&self,
|
||||
config: Config,
|
||||
params: CrossSessionSpawnParams,
|
||||
) -> CodexResult<NewConversation> {
|
||||
self.spawn_conversation_with_history(
|
||||
config,
|
||||
self.auth_manager.clone(),
|
||||
InitialHistory::New,
|
||||
Some(params),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn spawn_conversation_with_history(
|
||||
&self,
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
initial_history: InitialHistory,
|
||||
cross_session: Option<CrossSessionSpawnParams>,
|
||||
) -> CodexResult<NewConversation> {
|
||||
let cross_session =
|
||||
cross_session.map(|params| (SessionDefaults::from_config(&config), params));
|
||||
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(
|
||||
config,
|
||||
auth_manager,
|
||||
InitialHistory::New,
|
||||
self.session_source,
|
||||
)
|
||||
.await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
} = Codex::spawn(config, auth_manager, initial_history, self.session_source).await?;
|
||||
|
||||
let new_conversation = self.finalize_spawn(codex, conversation_id).await?;
|
||||
|
||||
if let Some((defaults, params)) = cross_session
|
||||
&& let Err(err) = self
|
||||
.register_cross_session(
|
||||
conversation_id,
|
||||
defaults,
|
||||
params,
|
||||
Arc::clone(&new_conversation.conversation),
|
||||
)
|
||||
.await
|
||||
{
|
||||
self.abort_conversation(conversation_id, Arc::clone(&new_conversation.conversation))
|
||||
.await;
|
||||
return Err(CodexErr::Fatal(format!(
|
||||
"failed to register cross-session for conversation {conversation_id}: {err}"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(new_conversation)
|
||||
}
|
||||
|
||||
async fn register_cross_session(
|
||||
&self,
|
||||
conversation_id: ConversationId,
|
||||
defaults: SessionDefaults,
|
||||
params: CrossSessionSpawnParams,
|
||||
conversation: Arc<CodexConversation>,
|
||||
) -> Result<(), CrossSessionError> {
|
||||
let CrossSessionSpawnParams { hub, run_id, role } = params;
|
||||
|
||||
let registration = SessionRegistration {
|
||||
conversation_id,
|
||||
conversation,
|
||||
defaults,
|
||||
run_id,
|
||||
role,
|
||||
};
|
||||
|
||||
let guard = hub.register_session(registration)?;
|
||||
self.cross_session_registrations
|
||||
.write()
|
||||
.await
|
||||
.insert(conversation_id, guard);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn abort_conversation(
|
||||
&self,
|
||||
conversation_id: ConversationId,
|
||||
conversation: Arc<CodexConversation>,
|
||||
) {
|
||||
let _ = self.remove_conversation(&conversation_id).await;
|
||||
if let Err(err) = conversation.submit(Op::Shutdown).await {
|
||||
warn!(
|
||||
%conversation_id,
|
||||
?err,
|
||||
"failed to shutdown conversation after cross-session registration error"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn finalize_spawn(
|
||||
@@ -130,11 +223,35 @@ impl ConversationManager {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
) -> CodexResult<NewConversation> {
|
||||
let initial_history = RolloutRecorder::get_rollout_history(&rollout_path).await?;
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, initial_history, self.session_source).await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
self.spawn_conversation_with_history(config, auth_manager, initial_history, None)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn resume_conversation_from_rollout_with_cross_session(
|
||||
&self,
|
||||
config: Config,
|
||||
rollout_path: PathBuf,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
params: CrossSessionSpawnParams,
|
||||
) -> CodexResult<NewConversation> {
|
||||
let initial_history = RolloutRecorder::get_rollout_history(&rollout_path).await?;
|
||||
self.spawn_conversation_with_history(config, auth_manager, initial_history, Some(params))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn resume_conversation_with_cross_session(
|
||||
&self,
|
||||
config: Config,
|
||||
rollout_path: PathBuf,
|
||||
params: CrossSessionSpawnParams,
|
||||
) -> CodexResult<NewConversation> {
|
||||
self.resume_conversation_from_rollout_with_cross_session(
|
||||
config,
|
||||
rollout_path,
|
||||
self.auth_manager.clone(),
|
||||
params,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Removes the conversation from the manager's internal map, though the
|
||||
@@ -145,6 +262,10 @@ impl ConversationManager {
|
||||
&self,
|
||||
conversation_id: &ConversationId,
|
||||
) -> Option<Arc<CodexConversation>> {
|
||||
self.cross_session_registrations
|
||||
.write()
|
||||
.await
|
||||
.remove(conversation_id);
|
||||
self.conversations.write().await.remove(conversation_id)
|
||||
}
|
||||
|
||||
@@ -164,12 +285,23 @@ impl ConversationManager {
|
||||
|
||||
// Spawn a new conversation with the computed initial history.
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, history, self.session_source).await?;
|
||||
self.spawn_conversation_with_history(config, auth_manager, history, None)
|
||||
.await
|
||||
}
|
||||
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
pub async fn fork_conversation_with_cross_session(
|
||||
&self,
|
||||
nth_user_message: usize,
|
||||
config: Config,
|
||||
path: PathBuf,
|
||||
params: CrossSessionSpawnParams,
|
||||
) -> CodexResult<NewConversation> {
|
||||
let history = RolloutRecorder::get_rollout_history(&path).await?;
|
||||
let history = truncate_before_nth_user_message(history, nth_user_message);
|
||||
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
self.spawn_conversation_with_history(config, auth_manager, history, Some(params))
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,6 +342,7 @@ fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> Initia
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::codex::make_session_and_context;
|
||||
use assert_matches::assert_matches;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -236,7 +369,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn drops_from_last_user_only() {
|
||||
let items = vec![
|
||||
let items = [
|
||||
user_msg("u1"),
|
||||
assistant_msg("a1"),
|
||||
assistant_msg("a2"),
|
||||
@@ -283,7 +416,7 @@ mod tests {
|
||||
.map(RolloutItem::ResponseItem)
|
||||
.collect();
|
||||
let truncated2 = truncate_before_nth_user_message(InitialHistory::Forked(initial2), 2);
|
||||
assert!(matches!(truncated2, InitialHistory::New));
|
||||
assert_matches!(truncated2, InitialHistory::New);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
607
codex-rs/core/src/cross_session.rs
Normal file
607
codex-rs/core/src/cross_session.rs
Normal file
@@ -0,0 +1,607 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex as StdMutex;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::RwLockReadGuard;
|
||||
use std::sync::RwLockWriteGuard;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::Stream;
|
||||
use serde_json::Value;
|
||||
use tokio::sync::Mutex as TokioMutex;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::time;
|
||||
use tokio_stream::wrappers::BroadcastStream;
|
||||
use tokio_stream::wrappers::errors::BroadcastStreamRecvError;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
|
||||
use crate::codex_conversation::CodexConversation;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
use crate::protocol::AgentMessageEvent;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::protocol::Op;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::protocol_config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use crate::protocol_config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::ConversationId;
|
||||
|
||||
/// Default capacity for broadcast channels that fan out session events.
|
||||
const EVENT_BUFFER_LEN: usize = 256;
|
||||
|
||||
/// Encapsulates the defaults needed to submit a new `Op::UserTurn`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SessionDefaults {
|
||||
pub cwd: PathBuf,
|
||||
pub approval_policy: AskForApproval,
|
||||
pub sandbox_policy: SandboxPolicy,
|
||||
pub model: String,
|
||||
pub effort: Option<ReasoningEffortConfig>,
|
||||
pub summary: ReasoningSummaryConfig,
|
||||
}
|
||||
|
||||
impl SessionDefaults {
|
||||
pub fn from_config(config: &Config) -> Self {
|
||||
Self {
|
||||
cwd: config.cwd.clone(),
|
||||
approval_policy: config.approval_policy,
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
model: config.model.clone(),
|
||||
effort: config.model_reasoning_effort,
|
||||
summary: config.model_reasoning_summary,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request payload for posting a user turn to a session.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PostUserTurnRequest {
|
||||
pub target: RoleOrId,
|
||||
pub text: String,
|
||||
pub final_output_json_schema: Option<Value>,
|
||||
}
|
||||
|
||||
/// Identifier used when targeting sessions for cross-session routing.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RoleOrId {
|
||||
Session(ConversationId),
|
||||
RunRole { run_id: String, role: String },
|
||||
}
|
||||
|
||||
/// Handle returned by [`CrossSessionHub::post_user_turn`].
|
||||
pub struct TurnHandle {
|
||||
conversation_id: ConversationId,
|
||||
submission_id: String,
|
||||
receiver: TokioMutex<Option<oneshot::Receiver<AssistantMessage>>>,
|
||||
}
|
||||
|
||||
impl TurnHandle {
|
||||
pub fn conversation_id(&self) -> ConversationId {
|
||||
self.conversation_id
|
||||
}
|
||||
|
||||
pub fn submission_id(&self) -> &str {
|
||||
&self.submission_id
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for TurnHandle {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("TurnHandle")
|
||||
.field("conversation_id", &self.conversation_id)
|
||||
.field("submission_id", &self.submission_id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// First assistant message emitted for a bridged turn.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AssistantMessage {
|
||||
pub conversation_id: ConversationId,
|
||||
pub submission_id: String,
|
||||
pub message: AgentMessageEvent,
|
||||
}
|
||||
|
||||
/// Wrapper around a session event tagged with its conversation id.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SessionEvent {
|
||||
pub conversation_id: ConversationId,
|
||||
pub event: Event,
|
||||
}
|
||||
|
||||
/// Stream of [`SessionEvent`] instances for a particular session.
|
||||
pub struct SessionEventStream {
|
||||
inner: BroadcastStream<SessionEvent>,
|
||||
}
|
||||
|
||||
impl SessionEventStream {
|
||||
fn new(receiver: broadcast::Receiver<SessionEvent>) -> Self {
|
||||
Self {
|
||||
inner: BroadcastStream::new(receiver),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for SessionEventStream {
|
||||
type Item = SessionEvent;
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
match Pin::new(&mut self.inner).poll_next(cx) {
|
||||
std::task::Poll::Ready(Some(Ok(event))) => {
|
||||
return std::task::Poll::Ready(Some(event));
|
||||
}
|
||||
std::task::Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(_)))) => continue,
|
||||
std::task::Poll::Ready(None) => return std::task::Poll::Ready(None),
|
||||
std::task::Poll::Pending => return std::task::Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct RoleKey {
|
||||
run_id: Arc<str>,
|
||||
role: Arc<str>,
|
||||
}
|
||||
|
||||
impl RoleKey {
|
||||
fn new(run_id: String, role: String) -> Self {
|
||||
Self {
|
||||
run_id: Arc::<str>::from(run_id),
|
||||
role: Arc::<str>::from(role),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for RoleKey {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.run_id.as_ref() == other.run_id.as_ref() && self.role.as_ref() == other.role.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for RoleKey {}
|
||||
|
||||
impl std::hash::Hash for RoleKey {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
std::hash::Hash::hash(self.run_id.as_ref(), state);
|
||||
std::hash::Hash::hash(self.role.as_ref(), state);
|
||||
}
|
||||
}
|
||||
|
||||
struct SessionEntry {
|
||||
conversation_id: ConversationId,
|
||||
conversation: Arc<CodexConversation>,
|
||||
defaults: SessionDefaults,
|
||||
role_key: Option<RoleKey>,
|
||||
event_tx: broadcast::Sender<SessionEvent>,
|
||||
turn_watchers: TokioMutex<HashMap<String, oneshot::Sender<AssistantMessage>>>,
|
||||
pending_messages: TokioMutex<HashMap<String, AssistantMessage>>,
|
||||
shutdown_tx: StdMutex<Option<oneshot::Sender<()>>>,
|
||||
}
|
||||
|
||||
impl SessionEntry {
|
||||
fn new(
|
||||
conversation_id: ConversationId,
|
||||
conversation: Arc<CodexConversation>,
|
||||
defaults: SessionDefaults,
|
||||
role_key: Option<RoleKey>,
|
||||
event_tx: broadcast::Sender<SessionEvent>,
|
||||
shutdown_tx: oneshot::Sender<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
conversation_id,
|
||||
conversation,
|
||||
defaults,
|
||||
role_key,
|
||||
event_tx,
|
||||
turn_watchers: TokioMutex::new(HashMap::new()),
|
||||
pending_messages: TokioMutex::new(HashMap::new()),
|
||||
shutdown_tx: StdMutex::new(Some(shutdown_tx)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn register_waiter(
|
||||
&self,
|
||||
submission_id: String,
|
||||
sender: oneshot::Sender<AssistantMessage>,
|
||||
) {
|
||||
{
|
||||
let mut watchers = self.turn_watchers.lock().await;
|
||||
if let Some(message) = {
|
||||
let mut pending = self.pending_messages.lock().await;
|
||||
pending.remove(&submission_id)
|
||||
} {
|
||||
drop(watchers);
|
||||
let _ = sender.send(message);
|
||||
return;
|
||||
}
|
||||
watchers.insert(submission_id, sender);
|
||||
}
|
||||
}
|
||||
|
||||
async fn notify_assistant_message(&self, message: AssistantMessage) {
|
||||
let submission_id = message.submission_id.clone();
|
||||
let sender_opt = {
|
||||
let mut watchers = self.turn_watchers.lock().await;
|
||||
watchers.remove(&submission_id)
|
||||
};
|
||||
|
||||
if let Some(sender) = sender_opt {
|
||||
let _ = sender.send(message);
|
||||
} else {
|
||||
let mut pending = self.pending_messages.lock().await;
|
||||
pending.entry(submission_id).or_insert(message);
|
||||
}
|
||||
}
|
||||
|
||||
fn subscribe(&self) -> broadcast::Receiver<SessionEvent> {
|
||||
self.event_tx.subscribe()
|
||||
}
|
||||
|
||||
fn close(&self) {
|
||||
if let Ok(mut guard) = self.shutdown_tx.lock()
|
||||
&& let Some(tx) = guard.take()
|
||||
{
|
||||
let _ = tx.send(());
|
||||
}
|
||||
}
|
||||
|
||||
fn role_key(&self) -> Option<RoleKey> {
|
||||
self.role_key.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Input for registering a session with the hub.
|
||||
pub struct SessionRegistration {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation: Arc<CodexConversation>,
|
||||
pub defaults: SessionDefaults,
|
||||
pub run_id: Option<String>,
|
||||
pub role: Option<String>,
|
||||
}
|
||||
|
||||
/// Guard that unregisters the session on drop.
|
||||
pub struct RegisteredSession {
|
||||
inner: Arc<Inner>,
|
||||
conversation_id: ConversationId,
|
||||
}
|
||||
|
||||
impl RegisteredSession {
|
||||
pub fn conversation_id(&self) -> ConversationId {
|
||||
self.conversation_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RegisteredSession {
|
||||
fn drop(&mut self) {
|
||||
self.inner.unregister(self.conversation_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct Inner {
|
||||
sessions: RwLock<HashMap<ConversationId, Arc<SessionEntry>>>,
|
||||
roles: RwLock<HashMap<RoleKey, ConversationId>>,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
fn sessions_read(
|
||||
&self,
|
||||
) -> Result<RwLockReadGuard<'_, HashMap<ConversationId, Arc<SessionEntry>>>, CrossSessionError>
|
||||
{
|
||||
self.sessions
|
||||
.read()
|
||||
.map_err(|_| CrossSessionError::LockPoisoned("sessions"))
|
||||
}
|
||||
|
||||
fn sessions_write(
|
||||
&self,
|
||||
) -> Result<RwLockWriteGuard<'_, HashMap<ConversationId, Arc<SessionEntry>>>, CrossSessionError>
|
||||
{
|
||||
self.sessions
|
||||
.write()
|
||||
.map_err(|_| CrossSessionError::LockPoisoned("sessions"))
|
||||
}
|
||||
|
||||
fn roles_read(
|
||||
&self,
|
||||
) -> Result<RwLockReadGuard<'_, HashMap<RoleKey, ConversationId>>, CrossSessionError> {
|
||||
self.roles
|
||||
.read()
|
||||
.map_err(|_| CrossSessionError::LockPoisoned("roles"))
|
||||
}
|
||||
|
||||
fn roles_write(
|
||||
&self,
|
||||
) -> Result<RwLockWriteGuard<'_, HashMap<RoleKey, ConversationId>>, CrossSessionError> {
|
||||
self.roles
|
||||
.write()
|
||||
.map_err(|_| CrossSessionError::LockPoisoned("roles"))
|
||||
}
|
||||
|
||||
fn insert(&self, entry: Arc<SessionEntry>) -> Result<(), CrossSessionError> {
|
||||
{
|
||||
let mut sessions = self.sessions_write()?;
|
||||
if sessions
|
||||
.insert(entry.conversation_id, entry.clone())
|
||||
.is_some()
|
||||
{
|
||||
return Err(CrossSessionError::SessionAlreadyRegistered(
|
||||
entry.conversation_id,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(role_key) = entry.role_key() {
|
||||
let mut roles = self.roles_write()?;
|
||||
if roles.contains_key(&role_key) {
|
||||
self.sessions_write()?.remove(&entry.conversation_id);
|
||||
return Err(CrossSessionError::RoleAlreadyRegistered {
|
||||
run_id: role_key.run_id.to_string(),
|
||||
role: role_key.role.to_string(),
|
||||
});
|
||||
}
|
||||
roles.insert(role_key, entry.conversation_id);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unregister(&self, conversation_id: ConversationId) {
|
||||
if let Some(entry) = self.remove_internal(conversation_id) {
|
||||
entry.close();
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_internal(&self, conversation_id: ConversationId) -> Option<Arc<SessionEntry>> {
|
||||
let (entry, role_key) = {
|
||||
let mut sessions = self.sessions.write().ok()?;
|
||||
let entry = sessions.remove(&conversation_id)?;
|
||||
let role_key = entry.role_key();
|
||||
(entry, role_key)
|
||||
};
|
||||
|
||||
if let Some(role_key) = role_key
|
||||
&& let Ok(mut roles) = self.roles.write()
|
||||
{
|
||||
roles.remove(&role_key);
|
||||
}
|
||||
|
||||
Some(entry)
|
||||
}
|
||||
|
||||
fn resolve_session(
|
||||
&self,
|
||||
conversation_id: ConversationId,
|
||||
) -> Result<Arc<SessionEntry>, CrossSessionError> {
|
||||
self.sessions_read()?
|
||||
.get(&conversation_id)
|
||||
.cloned()
|
||||
.ok_or(CrossSessionError::SessionNotFound(conversation_id))
|
||||
}
|
||||
|
||||
fn resolve_target(&self, target: &RoleOrId) -> Result<Arc<SessionEntry>, CrossSessionError> {
|
||||
match target {
|
||||
RoleOrId::Session(id) => self.resolve_session(*id),
|
||||
RoleOrId::RunRole { run_id, role } => {
|
||||
let conversation_id = {
|
||||
let roles = self.roles_read()?;
|
||||
let key = RoleKey::new(run_id.clone(), role.clone());
|
||||
roles
|
||||
.get(&key)
|
||||
.copied()
|
||||
.ok_or_else(|| CrossSessionError::RoleNotFound {
|
||||
run_id: run_id.clone(),
|
||||
role: role.clone(),
|
||||
})?
|
||||
};
|
||||
self.resolve_session(conversation_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cross-session coordination hub.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct CrossSessionHub {
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
impl CrossSessionHub {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn register_session(
|
||||
&self,
|
||||
registration: SessionRegistration,
|
||||
) -> Result<RegisteredSession, CrossSessionError> {
|
||||
let SessionRegistration {
|
||||
conversation_id,
|
||||
conversation,
|
||||
defaults,
|
||||
run_id,
|
||||
role,
|
||||
} = registration;
|
||||
|
||||
let role_key = match (run_id, role) {
|
||||
(Some(run_id), Some(role)) => Some(RoleKey::new(run_id, role)),
|
||||
(None, None) => None,
|
||||
_ => {
|
||||
return Err(CrossSessionError::IncompleteRoleRegistration);
|
||||
}
|
||||
};
|
||||
|
||||
let (event_tx, _) = broadcast::channel(EVENT_BUFFER_LEN);
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
let entry = Arc::new(SessionEntry::new(
|
||||
conversation_id,
|
||||
Arc::clone(&conversation),
|
||||
defaults,
|
||||
role_key,
|
||||
event_tx,
|
||||
shutdown_tx,
|
||||
));
|
||||
|
||||
self.inner.insert(entry.clone())?;
|
||||
|
||||
self.spawn_event_forwarder(entry, conversation, shutdown_rx);
|
||||
|
||||
Ok(RegisteredSession {
|
||||
inner: Arc::clone(&self.inner),
|
||||
conversation_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn post_user_turn(
|
||||
&self,
|
||||
request: PostUserTurnRequest,
|
||||
) -> Result<TurnHandle, CrossSessionError> {
|
||||
let entry = self.inner.resolve_target(&request.target)?;
|
||||
|
||||
let items = vec![InputItem::Text { text: request.text }];
|
||||
|
||||
let defaults = &entry.defaults;
|
||||
let submission_id = entry
|
||||
.conversation
|
||||
.submit(Op::UserTurn {
|
||||
items,
|
||||
cwd: defaults.cwd.clone(),
|
||||
approval_policy: defaults.approval_policy,
|
||||
sandbox_policy: defaults.sandbox_policy.clone(),
|
||||
model: defaults.model.clone(),
|
||||
effort: defaults.effort,
|
||||
summary: defaults.summary,
|
||||
final_output_json_schema: request.final_output_json_schema,
|
||||
})
|
||||
.await
|
||||
.map_err(CrossSessionError::from)?;
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
entry.register_waiter(submission_id.clone(), tx).await;
|
||||
|
||||
Ok(TurnHandle {
|
||||
conversation_id: entry.conversation_id,
|
||||
submission_id,
|
||||
receiver: TokioMutex::new(Some(rx)),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn await_first_assistant(
|
||||
&self,
|
||||
handle: &TurnHandle,
|
||||
timeout: Duration,
|
||||
) -> Result<AssistantMessage, CrossSessionError> {
|
||||
let receiver = {
|
||||
let mut guard = handle.receiver.lock().await;
|
||||
guard.take().ok_or(CrossSessionError::TurnHandleConsumed)?
|
||||
};
|
||||
|
||||
match time::timeout(timeout, receiver).await {
|
||||
Ok(Ok(message)) => Ok(message),
|
||||
Ok(Err(_)) => Err(CrossSessionError::SessionClosed),
|
||||
Err(_) => Err(CrossSessionError::AwaitTimeout(timeout)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stream_events(
|
||||
&self,
|
||||
conversation_id: ConversationId,
|
||||
) -> Result<SessionEventStream, CrossSessionError> {
|
||||
let entry = self.inner.resolve_session(conversation_id)?;
|
||||
Ok(SessionEventStream::new(entry.subscribe()))
|
||||
}
|
||||
|
||||
fn spawn_event_forwarder(
|
||||
&self,
|
||||
entry: Arc<SessionEntry>,
|
||||
conversation: Arc<CodexConversation>,
|
||||
mut shutdown_rx: oneshot::Receiver<()>,
|
||||
) {
|
||||
let conversation_id = entry.conversation_id;
|
||||
let event_tx = entry.event_tx.clone();
|
||||
let inner = Arc::clone(&self.inner);
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = &mut shutdown_rx => {
|
||||
debug!("CrossSessionHub received shutdown for session {conversation_id}");
|
||||
break;
|
||||
}
|
||||
event = conversation.next_event() => {
|
||||
match event {
|
||||
Ok(event) => {
|
||||
if let EventMsg::AgentMessage(agent_message) = &event.msg {
|
||||
let message = AssistantMessage {
|
||||
conversation_id,
|
||||
submission_id: event.id.clone(),
|
||||
message: agent_message.clone(),
|
||||
};
|
||||
entry.notify_assistant_message(message).await;
|
||||
}
|
||||
|
||||
if let Err(err) = event_tx.send(SessionEvent {
|
||||
conversation_id,
|
||||
event: event.clone(),
|
||||
}) {
|
||||
debug!(
|
||||
"CrossSessionHub dropped event for session {conversation_id}: {err}"
|
||||
);
|
||||
}
|
||||
|
||||
if matches!(event.msg, EventMsg::ShutdownComplete) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("CrossSessionHub event loop error for session {conversation_id}: {err:#?}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inner.unregister(conversation_id);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors surfaced by cross-session orchestration.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum CrossSessionError {
|
||||
#[error("session {0} is already registered with the hub")]
|
||||
SessionAlreadyRegistered(ConversationId),
|
||||
#[error("run {run_id} already has a {role} session registered")]
|
||||
RoleAlreadyRegistered { run_id: String, role: String },
|
||||
#[error("session {0} does not exist")]
|
||||
SessionNotFound(ConversationId),
|
||||
#[error("no session registered for run {run_id} role {role}")]
|
||||
RoleNotFound { run_id: String, role: String },
|
||||
#[error("session role registration must set both run_id and role")]
|
||||
IncompleteRoleRegistration,
|
||||
#[error("turn handle has already been awaited")]
|
||||
TurnHandleConsumed,
|
||||
#[error("session closed before an assistant message was emitted")]
|
||||
SessionClosed,
|
||||
#[error("timed out waiting {0:?} for assistant response")]
|
||||
AwaitTimeout(Duration),
|
||||
#[error("internal lock poisoned: {0}")]
|
||||
LockPoisoned(&'static str),
|
||||
#[error("submit failed: {0}")]
|
||||
SubmitFailed(#[from] CodexErr),
|
||||
}
|
||||
@@ -20,7 +20,7 @@ use std::sync::OnceLock;
|
||||
/// The full user agent string is returned from the mcp initialize response.
|
||||
/// Parenthesis will be added by Codex. This should only specify what goes inside of the parenthesis.
|
||||
pub static USER_AGENT_SUFFIX: LazyLock<Mutex<Option<String>>> = LazyLock::new(|| Mutex::new(None));
|
||||
|
||||
pub const DEFAULT_ORIGINATOR: &str = "codex_cli_rs";
|
||||
pub const CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR: &str = "CODEX_INTERNAL_ORIGINATOR_OVERRIDE";
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Originator {
|
||||
@@ -35,10 +35,11 @@ pub enum SetOriginatorError {
|
||||
AlreadyInitialized,
|
||||
}
|
||||
|
||||
fn init_originator_from_env() -> Originator {
|
||||
let default = "codex_cli_rs";
|
||||
fn get_originator_value(provided: Option<String>) -> Originator {
|
||||
let value = std::env::var(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR)
|
||||
.unwrap_or_else(|_| default.to_string());
|
||||
.ok()
|
||||
.or(provided)
|
||||
.unwrap_or(DEFAULT_ORIGINATOR.to_string());
|
||||
|
||||
match HeaderValue::from_str(&value) {
|
||||
Ok(header_value) => Originator {
|
||||
@@ -48,31 +49,22 @@ fn init_originator_from_env() -> Originator {
|
||||
Err(e) => {
|
||||
tracing::error!("Unable to turn originator override {value} into header value: {e}");
|
||||
Originator {
|
||||
value: default.to_string(),
|
||||
header_value: HeaderValue::from_static(default),
|
||||
value: DEFAULT_ORIGINATOR.to_string(),
|
||||
header_value: HeaderValue::from_static(DEFAULT_ORIGINATOR),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_originator(value: String) -> Result<Originator, SetOriginatorError> {
|
||||
let header_value =
|
||||
HeaderValue::from_str(&value).map_err(|_| SetOriginatorError::InvalidHeaderValue)?;
|
||||
Ok(Originator {
|
||||
value,
|
||||
header_value,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_default_originator(value: &str) -> Result<(), SetOriginatorError> {
|
||||
let originator = build_originator(value.to_string())?;
|
||||
pub fn set_default_originator(value: String) -> Result<(), SetOriginatorError> {
|
||||
let originator = get_originator_value(Some(value));
|
||||
ORIGINATOR
|
||||
.set(originator)
|
||||
.map_err(|_| SetOriginatorError::AlreadyInitialized)
|
||||
}
|
||||
|
||||
pub fn originator() -> &'static Originator {
|
||||
ORIGINATOR.get_or_init(init_originator_from_env)
|
||||
ORIGINATOR.get_or_init(|| get_originator_value(None))
|
||||
}
|
||||
|
||||
pub fn get_codex_user_agent() -> String {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::token_data::KnownPlan;
|
||||
use crate::token_data::PlanType;
|
||||
use crate::truncate::truncate_middle;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
use reqwest::StatusCode;
|
||||
@@ -12,6 +13,9 @@ use tokio::task::JoinError;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, CodexErr>;
|
||||
|
||||
/// Limit UI error messages to a reasonable size while keeping useful context.
|
||||
const ERROR_MESSAGE_UI_MAX_BYTES: usize = 2 * 1024; // 4 KiB
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SandboxErr {
|
||||
/// Error from sandbox execution
|
||||
@@ -55,6 +59,11 @@ pub enum CodexErr {
|
||||
#[error("stream disconnected before completion: {0}")]
|
||||
Stream(String, Option<Duration>),
|
||||
|
||||
#[error(
|
||||
"Codex ran out of room in the model's context window. Start a new conversation or clear earlier history before retrying."
|
||||
)]
|
||||
ContextWindowExceeded,
|
||||
|
||||
#[error("no conversation with id: {0}")]
|
||||
ConversationNotFound(ConversationId),
|
||||
|
||||
@@ -82,6 +91,12 @@ pub enum CodexErr {
|
||||
#[error("{0}")]
|
||||
UsageLimitReached(UsageLimitReachedError),
|
||||
|
||||
#[error("{0}")]
|
||||
ResponseStreamFailed(ResponseStreamFailed),
|
||||
|
||||
#[error("{0}")]
|
||||
ConnectionFailed(ConnectionFailedError),
|
||||
|
||||
#[error(
|
||||
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
|
||||
)]
|
||||
@@ -108,15 +123,15 @@ pub enum CodexErr {
|
||||
#[error("unsupported operation: {0}")]
|
||||
UnsupportedOperation(String),
|
||||
|
||||
#[error("Fatal error: {0}")]
|
||||
Fatal(String),
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
// Automatic conversions for common external error types
|
||||
// -----------------------------------------------------------------
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
Reqwest(#[from] reqwest::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
Json(#[from] serde_json::Error),
|
||||
|
||||
@@ -135,6 +150,37 @@ pub enum CodexErr {
|
||||
EnvVar(EnvVarError),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectionFailedError {
|
||||
pub source: reqwest::Error,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ConnectionFailedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Connection failed: {}", self.source)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ResponseStreamFailed {
|
||||
pub source: reqwest::Error,
|
||||
pub request_id: Option<String>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ResponseStreamFailed {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Error while reading the server response: {}{}",
|
||||
self.source,
|
||||
self.request_id
|
||||
.as_ref()
|
||||
.map(|id| format!(", request id: {id}"))
|
||||
.unwrap_or_default()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UnexpectedResponseError {
|
||||
pub status: StatusCode,
|
||||
@@ -296,21 +342,44 @@ impl CodexErr {
|
||||
}
|
||||
|
||||
pub fn get_error_message_ui(e: &CodexErr) -> String {
|
||||
match e {
|
||||
CodexErr::Sandbox(SandboxErr::Denied { output }) => output.stderr.text.clone(),
|
||||
let message = match e {
|
||||
CodexErr::Sandbox(SandboxErr::Denied { output }) => {
|
||||
let aggregated = output.aggregated_output.text.trim();
|
||||
if !aggregated.is_empty() {
|
||||
output.aggregated_output.text.clone()
|
||||
} else {
|
||||
let stderr = output.stderr.text.trim();
|
||||
let stdout = output.stdout.text.trim();
|
||||
match (stderr.is_empty(), stdout.is_empty()) {
|
||||
(false, false) => format!("{stderr}\n{stdout}"),
|
||||
(false, true) => output.stderr.text.clone(),
|
||||
(true, false) => output.stdout.text.clone(),
|
||||
(true, true) => format!(
|
||||
"command failed inside sandbox with exit code {}",
|
||||
output.exit_code
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
// Timeouts are not sandbox errors from a UX perspective; present them plainly
|
||||
CodexErr::Sandbox(SandboxErr::Timeout { output }) => format!(
|
||||
"error: command timed out after {} ms",
|
||||
output.duration.as_millis()
|
||||
),
|
||||
CodexErr::Sandbox(SandboxErr::Timeout { output }) => {
|
||||
format!(
|
||||
"error: command timed out after {} ms",
|
||||
output.duration.as_millis()
|
||||
)
|
||||
}
|
||||
_ => e.to_string(),
|
||||
}
|
||||
};
|
||||
|
||||
truncate_middle(&message, ERROR_MESSAGE_UI_MAX_BYTES).0
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::exec::StreamOutput;
|
||||
use codex_protocol::protocol::RateLimitWindow;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn rate_limit_snapshot() -> RateLimitSnapshot {
|
||||
RateLimitSnapshot {
|
||||
@@ -340,6 +409,73 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_denied_uses_aggregated_output_when_stderr_empty() {
|
||||
let output = ExecToolCallOutput {
|
||||
exit_code: 77,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new("aggregate detail".to_string()),
|
||||
duration: Duration::from_millis(10),
|
||||
timed_out: false,
|
||||
};
|
||||
let err = CodexErr::Sandbox(SandboxErr::Denied {
|
||||
output: Box::new(output),
|
||||
});
|
||||
assert_eq!(get_error_message_ui(&err), "aggregate detail");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_denied_reports_both_streams_when_available() {
|
||||
let output = ExecToolCallOutput {
|
||||
exit_code: 9,
|
||||
stdout: StreamOutput::new("stdout detail".to_string()),
|
||||
stderr: StreamOutput::new("stderr detail".to_string()),
|
||||
aggregated_output: StreamOutput::new(String::new()),
|
||||
duration: Duration::from_millis(10),
|
||||
timed_out: false,
|
||||
};
|
||||
let err = CodexErr::Sandbox(SandboxErr::Denied {
|
||||
output: Box::new(output),
|
||||
});
|
||||
assert_eq!(get_error_message_ui(&err), "stderr detail\nstdout detail");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_denied_reports_stdout_when_no_stderr() {
|
||||
let output = ExecToolCallOutput {
|
||||
exit_code: 11,
|
||||
stdout: StreamOutput::new("stdout only".to_string()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new(String::new()),
|
||||
duration: Duration::from_millis(8),
|
||||
timed_out: false,
|
||||
};
|
||||
let err = CodexErr::Sandbox(SandboxErr::Denied {
|
||||
output: Box::new(output),
|
||||
});
|
||||
assert_eq!(get_error_message_ui(&err), "stdout only");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_denied_reports_exit_code_when_no_output_available() {
|
||||
let output = ExecToolCallOutput {
|
||||
exit_code: 13,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new(String::new()),
|
||||
duration: Duration::from_millis(5),
|
||||
timed_out: false,
|
||||
};
|
||||
let err = CodexErr::Sandbox(SandboxErr::Denied {
|
||||
output: Box::new(output),
|
||||
});
|
||||
assert_eq!(
|
||||
get_error_message_ui(&err),
|
||||
"command failed inside sandbox with exit code 13"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_free_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
|
||||
@@ -127,6 +127,7 @@ mod tests {
|
||||
use super::map_response_item_to_event_messages;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::InputMessageKind;
|
||||
use assert_matches::assert_matches;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use pretty_assertions::assert_eq;
|
||||
@@ -158,7 +159,7 @@ mod tests {
|
||||
match &events[0] {
|
||||
EventMsg::UserMessage(user) => {
|
||||
assert_eq!(user.message, "Hello world");
|
||||
assert!(matches!(user.kind, Some(InputMessageKind::Plain)));
|
||||
assert_matches!(user.kind, Some(InputMessageKind::Plain));
|
||||
assert_eq!(user.images, Some(vec![img1, img2]));
|
||||
}
|
||||
other => panic!("expected UserMessage, got {other:?}"),
|
||||
|
||||
@@ -177,7 +177,7 @@ pub async fn process_exec_tool_call(
|
||||
}));
|
||||
}
|
||||
|
||||
if exit_code != 0 && is_likely_sandbox_denied(sandbox_type, exit_code) {
|
||||
if is_likely_sandbox_denied(sandbox_type, &exec_output) {
|
||||
return Err(CodexErr::Sandbox(SandboxErr::Denied {
|
||||
output: Box::new(exec_output),
|
||||
}));
|
||||
@@ -195,21 +195,57 @@ pub async fn process_exec_tool_call(
|
||||
/// We don't have a fully deterministic way to tell if our command failed
|
||||
/// because of the sandbox - a command in the user's zshrc file might hit an
|
||||
/// error, but the command itself might fail or succeed for other reasons.
|
||||
/// For now, we conservatively check for 'command not found' (exit code 127),
|
||||
/// and can add additional cases as necessary.
|
||||
fn is_likely_sandbox_denied(sandbox_type: SandboxType, exit_code: i32) -> bool {
|
||||
if sandbox_type == SandboxType::None {
|
||||
/// For now, we conservatively check for well known command failure exit codes and
|
||||
/// also look for common sandbox denial keywords in the command output.
|
||||
fn is_likely_sandbox_denied(sandbox_type: SandboxType, exec_output: &ExecToolCallOutput) -> bool {
|
||||
if sandbox_type == SandboxType::None || exec_output.exit_code == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Quick rejects: well-known non-sandbox shell exit codes
|
||||
// 127: command not found, 2: misuse of shell builtins
|
||||
if exit_code == 127 {
|
||||
// 2: misuse of shell builtins
|
||||
// 126: permission denied
|
||||
// 127: command not found
|
||||
const QUICK_REJECT_EXIT_CODES: [i32; 3] = [2, 126, 127];
|
||||
if QUICK_REJECT_EXIT_CODES.contains(&exec_output.exit_code) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// For all other cases, we assume the sandbox is the cause
|
||||
true
|
||||
const SANDBOX_DENIED_KEYWORDS: [&str; 6] = [
|
||||
"operation not permitted",
|
||||
"permission denied",
|
||||
"read-only file system",
|
||||
"seccomp",
|
||||
"sandbox",
|
||||
"landlock",
|
||||
];
|
||||
|
||||
if [
|
||||
&exec_output.stderr.text,
|
||||
&exec_output.stdout.text,
|
||||
&exec_output.aggregated_output.text,
|
||||
]
|
||||
.into_iter()
|
||||
.any(|section| {
|
||||
let lower = section.to_lowercase();
|
||||
SANDBOX_DENIED_KEYWORDS
|
||||
.iter()
|
||||
.any(|needle| lower.contains(needle))
|
||||
}) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
const SIGSYS_CODE: i32 = libc::SIGSYS;
|
||||
if sandbox_type == SandboxType::LinuxSeccomp
|
||||
&& exec_output.exit_code == EXIT_CODE_SIGNAL_BASE + SIGSYS_CODE
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -436,3 +472,77 @@ fn synthetic_exit_status(code: i32) -> ExitStatus {
|
||||
#[expect(clippy::unwrap_used)]
|
||||
std::process::ExitStatus::from_raw(code.try_into().unwrap())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
fn make_exec_output(
|
||||
exit_code: i32,
|
||||
stdout: &str,
|
||||
stderr: &str,
|
||||
aggregated: &str,
|
||||
) -> ExecToolCallOutput {
|
||||
ExecToolCallOutput {
|
||||
exit_code,
|
||||
stdout: StreamOutput::new(stdout.to_string()),
|
||||
stderr: StreamOutput::new(stderr.to_string()),
|
||||
aggregated_output: StreamOutput::new(aggregated.to_string()),
|
||||
duration: Duration::from_millis(1),
|
||||
timed_out: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_detection_requires_keywords() {
|
||||
let output = make_exec_output(1, "", "", "");
|
||||
assert!(!is_likely_sandbox_denied(
|
||||
SandboxType::LinuxSeccomp,
|
||||
&output
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_detection_identifies_keyword_in_stderr() {
|
||||
let output = make_exec_output(1, "", "Operation not permitted", "");
|
||||
assert!(is_likely_sandbox_denied(SandboxType::LinuxSeccomp, &output));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_detection_respects_quick_reject_exit_codes() {
|
||||
let output = make_exec_output(127, "", "command not found", "");
|
||||
assert!(!is_likely_sandbox_denied(
|
||||
SandboxType::LinuxSeccomp,
|
||||
&output
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_detection_ignores_non_sandbox_mode() {
|
||||
let output = make_exec_output(1, "", "Operation not permitted", "");
|
||||
assert!(!is_likely_sandbox_denied(SandboxType::None, &output));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_detection_uses_aggregated_output() {
|
||||
let output = make_exec_output(
|
||||
101,
|
||||
"",
|
||||
"",
|
||||
"cargo failed: Read-only file system when writing target",
|
||||
);
|
||||
assert!(is_likely_sandbox_denied(
|
||||
SandboxType::MacosSeatbelt,
|
||||
&output
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn sandbox_detection_flags_sigsys_exit_code() {
|
||||
let exit_code = EXIT_CODE_SIGNAL_BASE + libc::SIGSYS;
|
||||
let output = make_exec_output(exit_code, "", "", "");
|
||||
assert!(is_likely_sandbox_denied(SandboxType::LinuxSeccomp, &output));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::client_common::tools::ResponsesApiTool;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
|
||||
pub const EXEC_COMMAND_TOOL_NAME: &str = "exec_command";
|
||||
pub const WRITE_STDIN_TOOL_NAME: &str = "write_stdin";
|
||||
|
||||
109
codex-rs/core/src/executor/backends.rs
Normal file
109
codex-rs/core/src/executor/backends.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::CODEX_APPLY_PATCH_ARG1;
|
||||
use crate::apply_patch::ApplyPatchExec;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::executor::ExecutorConfig;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
|
||||
pub(crate) enum ExecutionMode {
|
||||
Shell,
|
||||
ApplyPatch(ApplyPatchExec),
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
/// Backend-specific hooks that prepare and post-process execution requests for a
|
||||
/// given [`ExecutionMode`].
|
||||
pub(crate) trait ExecutionBackend: Send + Sync {
|
||||
fn prepare(
|
||||
&self,
|
||||
params: ExecParams,
|
||||
// Required for downcasting the apply_patch.
|
||||
mode: &ExecutionMode,
|
||||
config: &ExecutorConfig,
|
||||
) -> Result<ExecParams, FunctionCallError>;
|
||||
|
||||
fn stream_stdout(&self, _mode: &ExecutionMode) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
static SHELL_BACKEND: ShellBackend = ShellBackend;
|
||||
static APPLY_PATCH_BACKEND: ApplyPatchBackend = ApplyPatchBackend;
|
||||
|
||||
pub(crate) fn backend_for_mode(mode: &ExecutionMode) -> &'static dyn ExecutionBackend {
|
||||
match mode {
|
||||
ExecutionMode::Shell => &SHELL_BACKEND,
|
||||
ExecutionMode::ApplyPatch(_) => &APPLY_PATCH_BACKEND,
|
||||
}
|
||||
}
|
||||
|
||||
struct ShellBackend;
|
||||
|
||||
#[async_trait]
|
||||
impl ExecutionBackend for ShellBackend {
|
||||
fn prepare(
|
||||
&self,
|
||||
params: ExecParams,
|
||||
mode: &ExecutionMode,
|
||||
_config: &ExecutorConfig,
|
||||
) -> Result<ExecParams, FunctionCallError> {
|
||||
match mode {
|
||||
ExecutionMode::Shell => Ok(params),
|
||||
_ => Err(FunctionCallError::RespondToModel(
|
||||
"shell backend invoked with non-shell mode".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ApplyPatchBackend;
|
||||
|
||||
#[async_trait]
|
||||
impl ExecutionBackend for ApplyPatchBackend {
|
||||
fn prepare(
|
||||
&self,
|
||||
params: ExecParams,
|
||||
mode: &ExecutionMode,
|
||||
config: &ExecutorConfig,
|
||||
) -> Result<ExecParams, FunctionCallError> {
|
||||
match mode {
|
||||
ExecutionMode::ApplyPatch(exec) => {
|
||||
let path_to_codex = if let Some(exe_path) = &config.codex_exe {
|
||||
exe_path.to_string_lossy().to_string()
|
||||
} else {
|
||||
env::current_exe()
|
||||
.ok()
|
||||
.map(|p| p.to_string_lossy().to_string())
|
||||
.ok_or_else(|| {
|
||||
FunctionCallError::RespondToModel(
|
||||
"failed to determine path to codex executable".to_string(),
|
||||
)
|
||||
})?
|
||||
};
|
||||
|
||||
let patch = exec.action.patch.clone();
|
||||
Ok(ExecParams {
|
||||
command: vec![path_to_codex, CODEX_APPLY_PATCH_ARG1.to_string(), patch],
|
||||
cwd: exec.action.cwd.clone(),
|
||||
timeout_ms: params.timeout_ms,
|
||||
// Run apply_patch with a minimal environment for determinism and to
|
||||
// avoid leaking host environment variables into the patch process.
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: params.with_escalated_permissions,
|
||||
justification: params.justification,
|
||||
})
|
||||
}
|
||||
ExecutionMode::Shell => Err(FunctionCallError::RespondToModel(
|
||||
"apply_patch backend invoked without patch context".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn stream_stdout(&self, _mode: &ExecutionMode) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
51
codex-rs/core/src/executor/cache.rs
Normal file
51
codex-rs/core/src/executor/cache.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
/// Thread-safe store of user approvals so repeated commands can reuse
|
||||
/// previously granted trust.
|
||||
pub(crate) struct ApprovalCache {
|
||||
inner: Arc<Mutex<HashSet<Vec<String>>>>,
|
||||
}
|
||||
|
||||
impl ApprovalCache {
|
||||
pub(crate) fn insert(&self, command: Vec<String>) {
|
||||
if command.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Ok(mut guard) = self.inner.lock() {
|
||||
guard.insert(command);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn snapshot(&self) -> HashSet<Vec<String>> {
|
||||
self.inner.lock().map(|g| g.clone()).unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn insert_ignores_empty_and_dedupes() {
|
||||
let cache = ApprovalCache::default();
|
||||
|
||||
// Empty should be ignored
|
||||
cache.insert(vec![]);
|
||||
assert!(cache.snapshot().is_empty());
|
||||
|
||||
// Insert a command and verify snapshot contains it
|
||||
let cmd = vec!["foo".to_string(), "bar".to_string()];
|
||||
cache.insert(cmd.clone());
|
||||
let snap1 = cache.snapshot();
|
||||
assert!(snap1.contains(&cmd));
|
||||
|
||||
// Reinserting should not create duplicates
|
||||
cache.insert(cmd);
|
||||
let snap2 = cache.snapshot();
|
||||
assert_eq!(snap1, snap2);
|
||||
}
|
||||
}
|
||||
68
codex-rs/core/src/executor/mod.rs
Normal file
68
codex-rs/core/src/executor/mod.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
mod backends;
|
||||
mod cache;
|
||||
mod runner;
|
||||
mod sandbox;
|
||||
|
||||
pub(crate) use backends::ExecutionMode;
|
||||
pub(crate) use runner::ExecutionRequest;
|
||||
pub(crate) use runner::Executor;
|
||||
pub(crate) use runner::ExecutorConfig;
|
||||
pub(crate) use runner::normalize_exec_result;
|
||||
|
||||
pub(crate) mod linkers {
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec::StdoutStream;
|
||||
use crate::executor::backends::ExecutionMode;
|
||||
use crate::executor::runner::ExecutionRequest;
|
||||
use crate::tools::context::ExecCommandContext;
|
||||
|
||||
pub struct PreparedExec {
|
||||
pub(crate) context: ExecCommandContext,
|
||||
pub(crate) request: ExecutionRequest,
|
||||
}
|
||||
|
||||
impl PreparedExec {
|
||||
pub fn new(
|
||||
context: ExecCommandContext,
|
||||
params: ExecParams,
|
||||
approval_command: Vec<String>,
|
||||
mode: ExecutionMode,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
use_shell_profile: bool,
|
||||
) -> Self {
|
||||
let request = ExecutionRequest {
|
||||
params,
|
||||
approval_command,
|
||||
mode,
|
||||
stdout_stream,
|
||||
use_shell_profile,
|
||||
};
|
||||
|
||||
Self { context, request }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod errors {
|
||||
use crate::error::CodexErr;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ExecError {
|
||||
#[error(transparent)]
|
||||
Function(#[from] FunctionCallError),
|
||||
#[error(transparent)]
|
||||
Codex(#[from] CodexErr),
|
||||
}
|
||||
|
||||
impl ExecError {
|
||||
pub(crate) fn rejection(msg: impl Into<String>) -> Self {
|
||||
FunctionCallError::RespondToModel(msg.into()).into()
|
||||
}
|
||||
|
||||
pub(crate) fn denied(msg: impl Into<String>) -> Self {
|
||||
FunctionCallError::Denied(msg.into()).into()
|
||||
}
|
||||
}
|
||||
}
|
||||
433
codex-rs/core/src/executor/runner.rs
Normal file
433
codex-rs/core/src/executor/runner.rs
Normal file
@@ -0,0 +1,433 @@
|
||||
use std::future::Future;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::backends::ExecutionMode;
|
||||
use super::backends::backend_for_mode;
|
||||
use super::cache::ApprovalCache;
|
||||
use crate::codex::Session;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::SandboxErr;
|
||||
use crate::error::get_error_message_ui;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::exec::SandboxType;
|
||||
use crate::exec::StdoutStream;
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::exec::process_exec_tool_call;
|
||||
use crate::executor::errors::ExecError;
|
||||
use crate::executor::sandbox::select_sandbox;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell;
|
||||
use crate::tools::context::ExecCommandContext;
|
||||
use codex_otel::otel_event_manager::ToolDecisionSource;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct ExecutorConfig {
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
pub(crate) sandbox_cwd: PathBuf,
|
||||
pub(crate) codex_exe: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl ExecutorConfig {
|
||||
pub(crate) fn new(
|
||||
sandbox_policy: SandboxPolicy,
|
||||
sandbox_cwd: PathBuf,
|
||||
codex_exe: Option<PathBuf>,
|
||||
) -> Self {
|
||||
Self {
|
||||
sandbox_policy,
|
||||
sandbox_cwd,
|
||||
codex_exe,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Coordinates sandbox selection, backend-specific preparation, and command
|
||||
/// execution for tool calls requested by the model.
|
||||
pub(crate) struct Executor {
|
||||
approval_cache: ApprovalCache,
|
||||
config: Arc<RwLock<ExecutorConfig>>,
|
||||
}
|
||||
|
||||
impl Executor {
|
||||
pub(crate) fn new(config: ExecutorConfig) -> Self {
|
||||
Self {
|
||||
approval_cache: ApprovalCache::default(),
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the sandbox policy and working directory used for future
|
||||
/// executions without recreating the executor.
|
||||
pub(crate) fn update_environment(&self, sandbox_policy: SandboxPolicy, sandbox_cwd: PathBuf) {
|
||||
if let Ok(mut cfg) = self.config.write() {
|
||||
cfg.sandbox_policy = sandbox_policy;
|
||||
cfg.sandbox_cwd = sandbox_cwd;
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs a prepared execution request end-to-end: prepares parameters, decides on
|
||||
/// sandbox placement (prompting the user when necessary), launches the command,
|
||||
/// and lets the backend post-process the final output.
|
||||
pub(crate) async fn run<F, Fut>(
|
||||
&self,
|
||||
mut request: ExecutionRequest,
|
||||
session: &Session,
|
||||
approval_policy: AskForApproval,
|
||||
context: &ExecCommandContext,
|
||||
on_exec_begin: F,
|
||||
) -> Result<ExecToolCallOutput, ExecError>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: Future<Output = ()>,
|
||||
{
|
||||
if matches!(request.mode, ExecutionMode::Shell) {
|
||||
request.params =
|
||||
maybe_translate_shell_command(request.params, session, request.use_shell_profile);
|
||||
}
|
||||
|
||||
// Step 1: Snapshot sandbox configuration so it stays stable for this run.
|
||||
let config = self
|
||||
.config
|
||||
.read()
|
||||
.map_err(|_| ExecError::rejection("executor config poisoned"))?
|
||||
.clone();
|
||||
|
||||
// Step 2: Normalise parameters via the selected backend.
|
||||
let backend = backend_for_mode(&request.mode);
|
||||
let stdout_stream = if backend.stream_stdout(&request.mode) {
|
||||
request.stdout_stream.clone()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
request.params = backend
|
||||
.prepare(request.params, &request.mode, &config)
|
||||
.map_err(ExecError::from)?;
|
||||
|
||||
// Step 3: Decide sandbox placement, prompting for approval when needed.
|
||||
let sandbox_decision = select_sandbox(
|
||||
&request,
|
||||
approval_policy,
|
||||
self.approval_cache.snapshot(),
|
||||
&config,
|
||||
session,
|
||||
&context.sub_id,
|
||||
&context.call_id,
|
||||
&context.otel_event_manager,
|
||||
)
|
||||
.await?;
|
||||
if sandbox_decision.record_session_approval {
|
||||
self.approval_cache.insert(request.approval_command.clone());
|
||||
}
|
||||
on_exec_begin().await;
|
||||
// Step 4: Launch the command within the chosen sandbox.
|
||||
let first_attempt = self
|
||||
.spawn(
|
||||
request.params.clone(),
|
||||
sandbox_decision.initial_sandbox,
|
||||
&config,
|
||||
stdout_stream.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Step 5: Handle sandbox outcomes, optionally escalating to an unsandboxed retry.
|
||||
match first_attempt {
|
||||
Ok(output) => Ok(output),
|
||||
Err(CodexErr::Sandbox(SandboxErr::Timeout { output })) => {
|
||||
Err(CodexErr::Sandbox(SandboxErr::Timeout { output }).into())
|
||||
}
|
||||
Err(CodexErr::Sandbox(error)) => {
|
||||
if sandbox_decision.escalate_on_failure {
|
||||
self.retry_without_sandbox(
|
||||
&request,
|
||||
&config,
|
||||
session,
|
||||
context,
|
||||
stdout_stream,
|
||||
error,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
let message = sandbox_failure_message(error);
|
||||
Err(ExecError::rejection(message))
|
||||
}
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Fallback path invoked when a sandboxed run is denied so the user can
|
||||
/// approve rerunning without isolation.
|
||||
async fn retry_without_sandbox(
|
||||
&self,
|
||||
request: &ExecutionRequest,
|
||||
config: &ExecutorConfig,
|
||||
session: &Session,
|
||||
context: &ExecCommandContext,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
sandbox_error: SandboxErr,
|
||||
) -> Result<ExecToolCallOutput, ExecError> {
|
||||
session
|
||||
.notify_background_event(
|
||||
&context.sub_id,
|
||||
format!("Execution failed: {sandbox_error}"),
|
||||
)
|
||||
.await;
|
||||
let decision = session
|
||||
.request_command_approval(
|
||||
context.sub_id.to_string(),
|
||||
context.call_id.to_string(),
|
||||
request.approval_command.clone(),
|
||||
request.params.cwd.clone(),
|
||||
Some("command failed; retry without sandbox?".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
context.otel_event_manager.tool_decision(
|
||||
&context.tool_name,
|
||||
&context.call_id,
|
||||
decision,
|
||||
ToolDecisionSource::User,
|
||||
);
|
||||
match decision {
|
||||
ReviewDecision::Approved | ReviewDecision::ApprovedForSession => {
|
||||
if matches!(decision, ReviewDecision::ApprovedForSession) {
|
||||
self.approval_cache.insert(request.approval_command.clone());
|
||||
}
|
||||
session
|
||||
.notify_background_event(&context.sub_id, "retrying command without sandbox")
|
||||
.await;
|
||||
|
||||
let retry_output = self
|
||||
.spawn(
|
||||
request.params.clone(),
|
||||
SandboxType::None,
|
||||
config,
|
||||
stdout_stream,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(retry_output)
|
||||
}
|
||||
ReviewDecision::Denied | ReviewDecision::Abort => {
|
||||
Err(ExecError::denied("exec command rejected by user"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn spawn(
|
||||
&self,
|
||||
params: ExecParams,
|
||||
sandbox: SandboxType,
|
||||
config: &ExecutorConfig,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<ExecToolCallOutput, CodexErr> {
|
||||
process_exec_tool_call(
|
||||
params,
|
||||
sandbox,
|
||||
&config.sandbox_policy,
|
||||
&config.sandbox_cwd,
|
||||
&config.codex_exe,
|
||||
stdout_stream,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
fn maybe_translate_shell_command(
|
||||
params: ExecParams,
|
||||
session: &Session,
|
||||
use_shell_profile: bool,
|
||||
) -> ExecParams {
|
||||
let should_translate =
|
||||
matches!(session.user_shell(), shell::Shell::PowerShell(_)) || use_shell_profile;
|
||||
|
||||
if should_translate
|
||||
&& let Some(command) = session
|
||||
.user_shell()
|
||||
.format_default_shell_invocation(params.command.clone())
|
||||
{
|
||||
return ExecParams { command, ..params };
|
||||
}
|
||||
|
||||
params
|
||||
}
|
||||
|
||||
fn sandbox_failure_message(error: SandboxErr) -> String {
|
||||
let codex_error = CodexErr::Sandbox(error);
|
||||
let friendly = get_error_message_ui(&codex_error);
|
||||
format!("failed in sandbox: {friendly}")
|
||||
}
|
||||
|
||||
pub(crate) struct ExecutionRequest {
|
||||
pub params: ExecParams,
|
||||
pub approval_command: Vec<String>,
|
||||
pub mode: ExecutionMode,
|
||||
pub stdout_stream: Option<StdoutStream>,
|
||||
pub use_shell_profile: bool,
|
||||
}
|
||||
|
||||
pub(crate) struct NormalizedExecOutput<'a> {
|
||||
borrowed: Option<&'a ExecToolCallOutput>,
|
||||
synthetic: Option<ExecToolCallOutput>,
|
||||
}
|
||||
|
||||
impl<'a> NormalizedExecOutput<'a> {
|
||||
pub(crate) fn event_output(&'a self) -> &'a ExecToolCallOutput {
|
||||
match (self.borrowed, self.synthetic.as_ref()) {
|
||||
(Some(output), _) => output,
|
||||
(None, Some(output)) => output,
|
||||
(None, None) => unreachable!("normalized exec output missing data"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a raw execution result into a uniform view that always exposes an
|
||||
/// [`ExecToolCallOutput`], synthesizing error output when the command fails
|
||||
/// before producing a response.
|
||||
pub(crate) fn normalize_exec_result(
|
||||
result: &Result<ExecToolCallOutput, ExecError>,
|
||||
) -> NormalizedExecOutput<'_> {
|
||||
match result {
|
||||
Ok(output) => NormalizedExecOutput {
|
||||
borrowed: Some(output),
|
||||
synthetic: None,
|
||||
},
|
||||
Err(ExecError::Codex(CodexErr::Sandbox(SandboxErr::Timeout { output }))) => {
|
||||
NormalizedExecOutput {
|
||||
borrowed: Some(output.as_ref()),
|
||||
synthetic: None,
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let message = match err {
|
||||
ExecError::Function(FunctionCallError::RespondToModel(msg))
|
||||
| ExecError::Function(FunctionCallError::Denied(msg)) => msg.clone(),
|
||||
ExecError::Codex(e) => get_error_message_ui(e),
|
||||
err => err.to_string(),
|
||||
};
|
||||
let synthetic = ExecToolCallOutput {
|
||||
exit_code: -1,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(message.clone()),
|
||||
aggregated_output: StreamOutput::new(message),
|
||||
duration: Duration::default(),
|
||||
timed_out: false,
|
||||
};
|
||||
NormalizedExecOutput {
|
||||
borrowed: None,
|
||||
synthetic: Some(synthetic),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::EnvVarError;
|
||||
use crate::error::SandboxErr;
|
||||
use crate::exec::StreamOutput;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn make_output(text: &str) -> ExecToolCallOutput {
|
||||
ExecToolCallOutput {
|
||||
exit_code: 1,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new(text.to_string()),
|
||||
duration: Duration::from_millis(123),
|
||||
timed_out: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_success_borrows() {
|
||||
let out = make_output("ok");
|
||||
let result: Result<ExecToolCallOutput, ExecError> = Ok(out);
|
||||
let normalized = normalize_exec_result(&result);
|
||||
assert_eq!(normalized.event_output().aggregated_output.text, "ok");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_timeout_borrows_embedded_output() {
|
||||
let out = make_output("timed out payload");
|
||||
let err = CodexErr::Sandbox(SandboxErr::Timeout {
|
||||
output: Box::new(out),
|
||||
});
|
||||
let result: Result<ExecToolCallOutput, ExecError> = Err(ExecError::Codex(err));
|
||||
let normalized = normalize_exec_result(&result);
|
||||
assert_eq!(
|
||||
normalized.event_output().aggregated_output.text,
|
||||
"timed out payload"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_failure_message_uses_denied_stderr() {
|
||||
let output = ExecToolCallOutput {
|
||||
exit_code: 101,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new("sandbox stderr".to_string()),
|
||||
aggregated_output: StreamOutput::new(String::new()),
|
||||
duration: Duration::from_millis(10),
|
||||
timed_out: false,
|
||||
};
|
||||
let err = SandboxErr::Denied {
|
||||
output: Box::new(output),
|
||||
};
|
||||
let message = sandbox_failure_message(err);
|
||||
assert_eq!(message, "failed in sandbox: sandbox stderr");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_failure_message_falls_back_to_aggregated_output() {
|
||||
let output = ExecToolCallOutput {
|
||||
exit_code: 101,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new("aggregate text".to_string()),
|
||||
duration: Duration::from_millis(10),
|
||||
timed_out: false,
|
||||
};
|
||||
let err = SandboxErr::Denied {
|
||||
output: Box::new(output),
|
||||
};
|
||||
let message = sandbox_failure_message(err);
|
||||
assert_eq!(message, "failed in sandbox: aggregate text");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_function_error_synthesizes_payload() {
|
||||
let err = FunctionCallError::RespondToModel("boom".to_string());
|
||||
let result: Result<ExecToolCallOutput, ExecError> = Err(ExecError::Function(err));
|
||||
let normalized = normalize_exec_result(&result);
|
||||
assert_eq!(normalized.event_output().aggregated_output.text, "boom");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_codex_error_synthesizes_user_message() {
|
||||
// Use a simple EnvVar error which formats to a clear message
|
||||
let e = CodexErr::EnvVar(EnvVarError {
|
||||
var: "FOO".to_string(),
|
||||
instructions: Some("set it".to_string()),
|
||||
});
|
||||
let result: Result<ExecToolCallOutput, ExecError> = Err(ExecError::Codex(e));
|
||||
let normalized = normalize_exec_result(&result);
|
||||
assert!(
|
||||
normalized
|
||||
.event_output()
|
||||
.aggregated_output
|
||||
.text
|
||||
.contains("Missing environment variable: `FOO`"),
|
||||
"expected synthesized user-friendly message"
|
||||
);
|
||||
}
|
||||
}
|
||||
405
codex-rs/core/src/executor/sandbox.rs
Normal file
405
codex-rs/core/src/executor/sandbox.rs
Normal file
@@ -0,0 +1,405 @@
|
||||
use crate::apply_patch::ApplyPatchExec;
|
||||
use crate::codex::Session;
|
||||
use crate::exec::SandboxType;
|
||||
use crate::executor::ExecutionMode;
|
||||
use crate::executor::ExecutionRequest;
|
||||
use crate::executor::ExecutorConfig;
|
||||
use crate::executor::errors::ExecError;
|
||||
use crate::safety::SafetyCheck;
|
||||
use crate::safety::assess_command_safety;
|
||||
use crate::safety::assess_patch_safety;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_otel::otel_event_manager::ToolDecisionSource;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::ReviewDecision;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Sandbox placement options selected for an execution run, including whether
|
||||
/// to escalate after failures and whether approvals should persist.
|
||||
pub(crate) struct SandboxDecision {
|
||||
pub(crate) initial_sandbox: SandboxType,
|
||||
pub(crate) escalate_on_failure: bool,
|
||||
pub(crate) record_session_approval: bool,
|
||||
}
|
||||
|
||||
impl SandboxDecision {
|
||||
fn auto(sandbox: SandboxType, escalate_on_failure: bool) -> Self {
|
||||
Self {
|
||||
initial_sandbox: sandbox,
|
||||
escalate_on_failure,
|
||||
record_session_approval: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn user_override(record_session_approval: bool) -> Self {
|
||||
Self {
|
||||
initial_sandbox: SandboxType::None,
|
||||
escalate_on_failure: false,
|
||||
record_session_approval,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn should_escalate_on_failure(approval: AskForApproval, sandbox: SandboxType) -> bool {
|
||||
matches!(
|
||||
(approval, sandbox),
|
||||
(
|
||||
AskForApproval::UnlessTrusted | AskForApproval::OnFailure,
|
||||
SandboxType::MacosSeatbelt | SandboxType::LinuxSeccomp
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
/// Determines how a command should be sandboxed, prompting the user when
|
||||
/// policy requires explicit approval.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn select_sandbox(
|
||||
request: &ExecutionRequest,
|
||||
approval_policy: AskForApproval,
|
||||
approval_cache: HashSet<Vec<String>>,
|
||||
config: &ExecutorConfig,
|
||||
session: &Session,
|
||||
sub_id: &str,
|
||||
call_id: &str,
|
||||
otel_event_manager: &OtelEventManager,
|
||||
) -> Result<SandboxDecision, ExecError> {
|
||||
match &request.mode {
|
||||
ExecutionMode::Shell => {
|
||||
select_shell_sandbox(
|
||||
request,
|
||||
approval_policy,
|
||||
approval_cache,
|
||||
config,
|
||||
session,
|
||||
sub_id,
|
||||
call_id,
|
||||
otel_event_manager,
|
||||
)
|
||||
.await
|
||||
}
|
||||
ExecutionMode::ApplyPatch(exec) => {
|
||||
select_apply_patch_sandbox(exec, approval_policy, config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn select_shell_sandbox(
|
||||
request: &ExecutionRequest,
|
||||
approval_policy: AskForApproval,
|
||||
approved_snapshot: HashSet<Vec<String>>,
|
||||
config: &ExecutorConfig,
|
||||
session: &Session,
|
||||
sub_id: &str,
|
||||
call_id: &str,
|
||||
otel_event_manager: &OtelEventManager,
|
||||
) -> Result<SandboxDecision, ExecError> {
|
||||
let command_for_safety = if request.approval_command.is_empty() {
|
||||
request.params.command.clone()
|
||||
} else {
|
||||
request.approval_command.clone()
|
||||
};
|
||||
|
||||
let safety = assess_command_safety(
|
||||
&command_for_safety,
|
||||
approval_policy,
|
||||
&config.sandbox_policy,
|
||||
&approved_snapshot,
|
||||
request.params.with_escalated_permissions.unwrap_or(false),
|
||||
);
|
||||
|
||||
match safety {
|
||||
SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved,
|
||||
} => {
|
||||
let mut decision = SandboxDecision::auto(
|
||||
sandbox_type,
|
||||
should_escalate_on_failure(approval_policy, sandbox_type),
|
||||
);
|
||||
if user_explicitly_approved {
|
||||
decision.record_session_approval = true;
|
||||
}
|
||||
let (decision_for_event, source) = if user_explicitly_approved {
|
||||
(ReviewDecision::ApprovedForSession, ToolDecisionSource::User)
|
||||
} else {
|
||||
(ReviewDecision::Approved, ToolDecisionSource::Config)
|
||||
};
|
||||
otel_event_manager.tool_decision("local_shell", call_id, decision_for_event, source);
|
||||
Ok(decision)
|
||||
}
|
||||
SafetyCheck::AskUser => {
|
||||
let decision = session
|
||||
.request_command_approval(
|
||||
sub_id.to_string(),
|
||||
call_id.to_string(),
|
||||
request.approval_command.clone(),
|
||||
request.params.cwd.clone(),
|
||||
request.params.justification.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
otel_event_manager.tool_decision(
|
||||
"local_shell",
|
||||
call_id,
|
||||
decision,
|
||||
ToolDecisionSource::User,
|
||||
);
|
||||
match decision {
|
||||
ReviewDecision::Approved => Ok(SandboxDecision::user_override(false)),
|
||||
ReviewDecision::ApprovedForSession => Ok(SandboxDecision::user_override(true)),
|
||||
ReviewDecision::Denied | ReviewDecision::Abort => {
|
||||
Err(ExecError::denied("exec command rejected by user"))
|
||||
}
|
||||
}
|
||||
}
|
||||
SafetyCheck::Reject { reason } => Err(ExecError::rejection(format!(
|
||||
"exec command rejected: {reason}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn select_apply_patch_sandbox(
|
||||
exec: &ApplyPatchExec,
|
||||
approval_policy: AskForApproval,
|
||||
config: &ExecutorConfig,
|
||||
) -> Result<SandboxDecision, ExecError> {
|
||||
if exec.user_explicitly_approved_this_action {
|
||||
return Ok(SandboxDecision::user_override(false));
|
||||
}
|
||||
|
||||
match assess_patch_safety(
|
||||
&exec.action,
|
||||
approval_policy,
|
||||
&config.sandbox_policy,
|
||||
&config.sandbox_cwd,
|
||||
) {
|
||||
SafetyCheck::AutoApprove { sandbox_type, .. } => Ok(SandboxDecision::auto(
|
||||
sandbox_type,
|
||||
should_escalate_on_failure(approval_policy, sandbox_type),
|
||||
)),
|
||||
SafetyCheck::AskUser => Err(ExecError::rejection(
|
||||
"patch requires approval but none was recorded",
|
||||
)),
|
||||
SafetyCheck::Reject { reason } => {
|
||||
Err(ExecError::rejection(format!("patch rejected: {reason}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::codex::make_session_and_context;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use codex_apply_patch::ApplyPatchAction;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[tokio::test]
|
||||
async fn select_apply_patch_user_override_when_explicit() {
|
||||
let (session, ctx) = make_session_and_context();
|
||||
let tmp = tempfile::tempdir().expect("tmp");
|
||||
let p = tmp.path().join("a.txt");
|
||||
let action = ApplyPatchAction::new_add_for_test(&p, "hello".to_string());
|
||||
let exec = ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: true,
|
||||
};
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["apply_patch".into()],
|
||||
cwd: std::env::temp_dir(),
|
||||
timeout_ms: None,
|
||||
env: std::collections::HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
},
|
||||
approval_command: vec!["apply_patch".into()],
|
||||
mode: ExecutionMode::ApplyPatch(exec),
|
||||
stdout_stream: None,
|
||||
use_shell_profile: false,
|
||||
};
|
||||
let otel_event_manager = ctx.client.get_otel_event_manager();
|
||||
let decision = select_sandbox(
|
||||
&request,
|
||||
AskForApproval::OnRequest,
|
||||
Default::default(),
|
||||
&cfg,
|
||||
&session,
|
||||
"sub",
|
||||
"call",
|
||||
&otel_event_manager,
|
||||
)
|
||||
.await
|
||||
.expect("ok");
|
||||
// Explicit user override runs without sandbox
|
||||
assert_eq!(decision.initial_sandbox, SandboxType::None);
|
||||
assert_eq!(decision.escalate_on_failure, false);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn select_apply_patch_autoapprove_in_danger() {
|
||||
let (session, ctx) = make_session_and_context();
|
||||
let tmp = tempfile::tempdir().expect("tmp");
|
||||
let p = tmp.path().join("a.txt");
|
||||
let action = ApplyPatchAction::new_add_for_test(&p, "hello".to_string());
|
||||
let exec = ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: false,
|
||||
};
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::DangerFullAccess, std::env::temp_dir(), None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["apply_patch".into()],
|
||||
cwd: std::env::temp_dir(),
|
||||
timeout_ms: None,
|
||||
env: std::collections::HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
},
|
||||
approval_command: vec!["apply_patch".into()],
|
||||
mode: ExecutionMode::ApplyPatch(exec),
|
||||
stdout_stream: None,
|
||||
use_shell_profile: false,
|
||||
};
|
||||
let otel_event_manager = ctx.client.get_otel_event_manager();
|
||||
let decision = select_sandbox(
|
||||
&request,
|
||||
AskForApproval::OnRequest,
|
||||
Default::default(),
|
||||
&cfg,
|
||||
&session,
|
||||
"sub",
|
||||
"call",
|
||||
&otel_event_manager,
|
||||
)
|
||||
.await
|
||||
.expect("ok");
|
||||
// On platforms with a sandbox, DangerFullAccess still prefers it
|
||||
let expected = crate::safety::get_platform_sandbox().unwrap_or(SandboxType::None);
|
||||
assert_eq!(decision.initial_sandbox, expected);
|
||||
assert_eq!(decision.escalate_on_failure, false);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn select_apply_patch_requires_approval_on_unless_trusted() {
|
||||
let (session, ctx) = make_session_and_context();
|
||||
let tempdir = tempfile::tempdir().expect("tmpdir");
|
||||
let p = tempdir.path().join("a.txt");
|
||||
let action = ApplyPatchAction::new_add_for_test(&p, "hello".to_string());
|
||||
let exec = ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: false,
|
||||
};
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["apply_patch".into()],
|
||||
cwd: std::env::temp_dir(),
|
||||
timeout_ms: None,
|
||||
env: std::collections::HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
},
|
||||
approval_command: vec!["apply_patch".into()],
|
||||
mode: ExecutionMode::ApplyPatch(exec),
|
||||
stdout_stream: None,
|
||||
use_shell_profile: false,
|
||||
};
|
||||
let otel_event_manager = ctx.client.get_otel_event_manager();
|
||||
let result = select_sandbox(
|
||||
&request,
|
||||
AskForApproval::UnlessTrusted,
|
||||
Default::default(),
|
||||
&cfg,
|
||||
&session,
|
||||
"sub",
|
||||
"call",
|
||||
&otel_event_manager,
|
||||
)
|
||||
.await;
|
||||
match result {
|
||||
Ok(_) => panic!("expected error"),
|
||||
Err(ExecError::Function(FunctionCallError::RespondToModel(msg))) => {
|
||||
assert!(msg.contains("requires approval"))
|
||||
}
|
||||
Err(other) => panic!("unexpected error: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn select_shell_autoapprove_in_danger_mode() {
|
||||
let (session, ctx) = make_session_and_context();
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::DangerFullAccess, std::env::temp_dir(), None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["some-unknown".into()],
|
||||
cwd: std::env::temp_dir(),
|
||||
timeout_ms: None,
|
||||
env: std::collections::HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
},
|
||||
approval_command: vec!["some-unknown".into()],
|
||||
mode: ExecutionMode::Shell,
|
||||
stdout_stream: None,
|
||||
use_shell_profile: false,
|
||||
};
|
||||
let otel_event_manager = ctx.client.get_otel_event_manager();
|
||||
let decision = select_sandbox(
|
||||
&request,
|
||||
AskForApproval::OnRequest,
|
||||
Default::default(),
|
||||
&cfg,
|
||||
&session,
|
||||
"sub",
|
||||
"call",
|
||||
&otel_event_manager,
|
||||
)
|
||||
.await
|
||||
.expect("ok");
|
||||
assert_eq!(decision.initial_sandbox, SandboxType::None);
|
||||
assert_eq!(decision.escalate_on_failure, false);
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "linux"))]
|
||||
#[tokio::test]
|
||||
async fn select_shell_escalates_on_failure_with_platform_sandbox() {
|
||||
let (session, ctx) = make_session_and_context();
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
// Unknown command => untrusted but not flagged dangerous
|
||||
command: vec!["some-unknown".into()],
|
||||
cwd: std::env::temp_dir(),
|
||||
timeout_ms: None,
|
||||
env: std::collections::HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
},
|
||||
approval_command: vec!["some-unknown".into()],
|
||||
mode: ExecutionMode::Shell,
|
||||
stdout_stream: None,
|
||||
use_shell_profile: false,
|
||||
};
|
||||
let otel_event_manager = ctx.client.get_otel_event_manager();
|
||||
let decision = select_sandbox(
|
||||
&request,
|
||||
AskForApproval::OnFailure,
|
||||
Default::default(),
|
||||
&cfg,
|
||||
&session,
|
||||
"sub",
|
||||
"call",
|
||||
&otel_event_manager,
|
||||
)
|
||||
.await
|
||||
.expect("ok");
|
||||
// On macOS/Linux we should have a platform sandbox and escalate on failure
|
||||
assert_ne!(decision.initial_sandbox, SandboxType::None);
|
||||
assert_eq!(decision.escalate_on_failure, true);
|
||||
}
|
||||
}
|
||||
258
codex-rs/core/src/features.rs
Normal file
258
codex-rs/core/src/features.rs
Normal file
@@ -0,0 +1,258 @@
|
||||
//! Centralized feature flags and metadata.
|
||||
//!
|
||||
//! This module defines a small set of toggles that gate experimental and
|
||||
//! optional behavior across the codebase. Instead of wiring individual
|
||||
//! booleans through multiple types, call sites consult a single `Features`
|
||||
//! container attached to `Config`.
|
||||
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config_profile::ConfigProfile;
|
||||
use serde::Deserialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
mod legacy;
|
||||
pub(crate) use legacy::LegacyFeatureToggles;
|
||||
|
||||
/// High-level lifecycle stage for a feature.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Stage {
|
||||
Experimental,
|
||||
Beta,
|
||||
Stable,
|
||||
Deprecated,
|
||||
Removed,
|
||||
}
|
||||
|
||||
/// Unique features toggled via configuration.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum Feature {
|
||||
/// Use the single unified PTY-backed exec tool.
|
||||
UnifiedExec,
|
||||
/// Use the streamable exec-command/write-stdin tool pair.
|
||||
StreamableShell,
|
||||
/// Use the official Rust MCP client (rmcp).
|
||||
RmcpClient,
|
||||
/// Include the plan tool.
|
||||
PlanTool,
|
||||
/// Include the freeform apply_patch tool.
|
||||
ApplyPatchFreeform,
|
||||
/// Include the view_image tool.
|
||||
ViewImageTool,
|
||||
/// Allow the model to request web searches.
|
||||
WebSearchRequest,
|
||||
/// Automatically approve all approval requests from the harness.
|
||||
ApproveAll,
|
||||
}
|
||||
|
||||
impl Feature {
|
||||
pub fn key(self) -> &'static str {
|
||||
self.info().key
|
||||
}
|
||||
|
||||
pub fn stage(self) -> Stage {
|
||||
self.info().stage
|
||||
}
|
||||
|
||||
pub fn default_enabled(self) -> bool {
|
||||
self.info().default_enabled
|
||||
}
|
||||
|
||||
fn info(self) -> &'static FeatureSpec {
|
||||
FEATURES
|
||||
.iter()
|
||||
.find(|spec| spec.id == self)
|
||||
.unwrap_or_else(|| unreachable!("missing FeatureSpec for {:?}", self))
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds the effective set of enabled features.
|
||||
#[derive(Debug, Clone, Default, PartialEq)]
|
||||
pub struct Features {
|
||||
enabled: BTreeSet<Feature>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct FeatureOverrides {
|
||||
pub include_plan_tool: Option<bool>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub web_search_request: Option<bool>,
|
||||
}
|
||||
|
||||
impl FeatureOverrides {
|
||||
fn apply(self, features: &mut Features) {
|
||||
LegacyFeatureToggles {
|
||||
include_plan_tool: self.include_plan_tool,
|
||||
include_apply_patch_tool: self.include_apply_patch_tool,
|
||||
include_view_image_tool: self.include_view_image_tool,
|
||||
tools_web_search: self.web_search_request,
|
||||
..Default::default()
|
||||
}
|
||||
.apply(features);
|
||||
}
|
||||
}
|
||||
|
||||
impl Features {
|
||||
/// Starts with built-in defaults.
|
||||
pub fn with_defaults() -> Self {
|
||||
let mut set = BTreeSet::new();
|
||||
for spec in FEATURES {
|
||||
if spec.default_enabled {
|
||||
set.insert(spec.id);
|
||||
}
|
||||
}
|
||||
Self { enabled: set }
|
||||
}
|
||||
|
||||
pub fn enabled(&self, f: Feature) -> bool {
|
||||
self.enabled.contains(&f)
|
||||
}
|
||||
|
||||
pub fn enable(&mut self, f: Feature) {
|
||||
self.enabled.insert(f);
|
||||
}
|
||||
|
||||
pub fn disable(&mut self, f: Feature) {
|
||||
self.enabled.remove(&f);
|
||||
}
|
||||
|
||||
/// Apply a table of key -> bool toggles (e.g. from TOML).
|
||||
pub fn apply_map(&mut self, m: &BTreeMap<String, bool>) {
|
||||
for (k, v) in m {
|
||||
match feature_for_key(k) {
|
||||
Some(feat) => {
|
||||
if *v {
|
||||
self.enable(feat);
|
||||
} else {
|
||||
self.disable(feat);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
tracing::warn!("unknown feature key in config: {k}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_config(
|
||||
cfg: &ConfigToml,
|
||||
config_profile: &ConfigProfile,
|
||||
overrides: FeatureOverrides,
|
||||
) -> Self {
|
||||
let mut features = Features::with_defaults();
|
||||
|
||||
let base_legacy = LegacyFeatureToggles {
|
||||
experimental_use_freeform_apply_patch: cfg.experimental_use_freeform_apply_patch,
|
||||
experimental_use_exec_command_tool: cfg.experimental_use_exec_command_tool,
|
||||
experimental_use_unified_exec_tool: cfg.experimental_use_unified_exec_tool,
|
||||
experimental_use_rmcp_client: cfg.experimental_use_rmcp_client,
|
||||
tools_web_search: cfg.tools.as_ref().and_then(|t| t.web_search),
|
||||
tools_view_image: cfg.tools.as_ref().and_then(|t| t.view_image),
|
||||
..Default::default()
|
||||
};
|
||||
base_legacy.apply(&mut features);
|
||||
|
||||
if let Some(base_features) = cfg.features.as_ref() {
|
||||
features.apply_map(&base_features.entries);
|
||||
}
|
||||
|
||||
let profile_legacy = LegacyFeatureToggles {
|
||||
include_plan_tool: config_profile.include_plan_tool,
|
||||
include_apply_patch_tool: config_profile.include_apply_patch_tool,
|
||||
include_view_image_tool: config_profile.include_view_image_tool,
|
||||
experimental_use_freeform_apply_patch: config_profile
|
||||
.experimental_use_freeform_apply_patch,
|
||||
experimental_use_exec_command_tool: config_profile.experimental_use_exec_command_tool,
|
||||
experimental_use_unified_exec_tool: config_profile.experimental_use_unified_exec_tool,
|
||||
experimental_use_rmcp_client: config_profile.experimental_use_rmcp_client,
|
||||
tools_web_search: config_profile.tools_web_search,
|
||||
tools_view_image: config_profile.tools_view_image,
|
||||
};
|
||||
profile_legacy.apply(&mut features);
|
||||
if let Some(profile_features) = config_profile.features.as_ref() {
|
||||
features.apply_map(&profile_features.entries);
|
||||
}
|
||||
|
||||
overrides.apply(&mut features);
|
||||
|
||||
features
|
||||
}
|
||||
}
|
||||
|
||||
/// Keys accepted in `[features]` tables.
|
||||
fn feature_for_key(key: &str) -> Option<Feature> {
|
||||
for spec in FEATURES {
|
||||
if spec.key == key {
|
||||
return Some(spec.id);
|
||||
}
|
||||
}
|
||||
legacy::feature_for_key(key)
|
||||
}
|
||||
|
||||
/// Deserializable features table for TOML.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct FeaturesToml {
|
||||
#[serde(flatten)]
|
||||
pub entries: BTreeMap<String, bool>,
|
||||
}
|
||||
|
||||
/// Single, easy-to-read registry of all feature definitions.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct FeatureSpec {
|
||||
pub id: Feature,
|
||||
pub key: &'static str,
|
||||
pub stage: Stage,
|
||||
pub default_enabled: bool,
|
||||
}
|
||||
|
||||
pub const FEATURES: &[FeatureSpec] = &[
|
||||
FeatureSpec {
|
||||
id: Feature::UnifiedExec,
|
||||
key: "unified_exec",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::StreamableShell,
|
||||
key: "streamable_shell",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::RmcpClient,
|
||||
key: "rmcp_client",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::PlanTool,
|
||||
key: "plan_tool",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ApplyPatchFreeform,
|
||||
key: "apply_patch_freeform",
|
||||
stage: Stage::Beta,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ViewImageTool,
|
||||
key: "view_image_tool",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: true,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::WebSearchRequest,
|
||||
key: "web_search_request",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ApproveAll,
|
||||
key: "approve_all",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
];
|
||||
158
codex-rs/core/src/features/legacy.rs
Normal file
158
codex-rs/core/src/features/legacy.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
use super::Feature;
|
||||
use super::Features;
|
||||
use tracing::info;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct Alias {
|
||||
legacy_key: &'static str,
|
||||
feature: Feature,
|
||||
}
|
||||
|
||||
const ALIASES: &[Alias] = &[
|
||||
Alias {
|
||||
legacy_key: "experimental_use_unified_exec_tool",
|
||||
feature: Feature::UnifiedExec,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "experimental_use_exec_command_tool",
|
||||
feature: Feature::StreamableShell,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "experimental_use_rmcp_client",
|
||||
feature: Feature::RmcpClient,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "experimental_use_freeform_apply_patch",
|
||||
feature: Feature::ApplyPatchFreeform,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "include_apply_patch_tool",
|
||||
feature: Feature::ApplyPatchFreeform,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "include_plan_tool",
|
||||
feature: Feature::PlanTool,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "include_view_image_tool",
|
||||
feature: Feature::ViewImageTool,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "web_search",
|
||||
feature: Feature::WebSearchRequest,
|
||||
},
|
||||
];
|
||||
|
||||
pub(crate) fn feature_for_key(key: &str) -> Option<Feature> {
|
||||
ALIASES
|
||||
.iter()
|
||||
.find(|alias| alias.legacy_key == key)
|
||||
.map(|alias| {
|
||||
log_alias(alias.legacy_key, alias.feature);
|
||||
alias.feature
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LegacyFeatureToggles {
|
||||
pub include_plan_tool: Option<bool>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub include_view_image_tool: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub experimental_use_exec_command_tool: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
}
|
||||
|
||||
impl LegacyFeatureToggles {
|
||||
pub fn apply(self, features: &mut Features) {
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::PlanTool,
|
||||
self.include_plan_tool,
|
||||
"include_plan_tool",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::ApplyPatchFreeform,
|
||||
self.include_apply_patch_tool,
|
||||
"include_apply_patch_tool",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::ApplyPatchFreeform,
|
||||
self.experimental_use_freeform_apply_patch,
|
||||
"experimental_use_freeform_apply_patch",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::StreamableShell,
|
||||
self.experimental_use_exec_command_tool,
|
||||
"experimental_use_exec_command_tool",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::UnifiedExec,
|
||||
self.experimental_use_unified_exec_tool,
|
||||
"experimental_use_unified_exec_tool",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::RmcpClient,
|
||||
self.experimental_use_rmcp_client,
|
||||
"experimental_use_rmcp_client",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::WebSearchRequest,
|
||||
self.tools_web_search,
|
||||
"tools.web_search",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::ViewImageTool,
|
||||
self.include_view_image_tool,
|
||||
"include_view_image_tool",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::ViewImageTool,
|
||||
self.tools_view_image,
|
||||
"tools.view_image",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_if_some(
|
||||
features: &mut Features,
|
||||
feature: Feature,
|
||||
maybe_value: Option<bool>,
|
||||
alias_key: &'static str,
|
||||
) {
|
||||
if let Some(enabled) = maybe_value {
|
||||
set_feature(features, feature, enabled);
|
||||
log_alias(alias_key, feature);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_feature(features: &mut Features, feature: Feature, enabled: bool) {
|
||||
if enabled {
|
||||
features.enable(feature);
|
||||
} else {
|
||||
features.disable(feature);
|
||||
}
|
||||
}
|
||||
|
||||
fn log_alias(alias: &str, feature: Feature) {
|
||||
let canonical = feature.key();
|
||||
if alias == canonical {
|
||||
return;
|
||||
}
|
||||
info!(
|
||||
%alias,
|
||||
canonical,
|
||||
"legacy feature toggle detected; prefer `[features].{canonical}`"
|
||||
);
|
||||
}
|
||||
@@ -4,4 +4,10 @@ use thiserror::Error;
|
||||
pub enum FunctionCallError {
|
||||
#[error("{0}")]
|
||||
RespondToModel(String),
|
||||
#[error("{0}")]
|
||||
Denied(String),
|
||||
#[error("LocalShellCall without call_id or id")]
|
||||
MissingLocalShellCallId,
|
||||
#[error("Fatal error: {0}")]
|
||||
Fatal(String),
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user