mirror of
https://github.com/openai/codex.git
synced 2026-02-05 00:13:42 +00:00
Compare commits
52 Commits
plan-defau
...
add-admin-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f08cb68a62 | ||
|
|
357612da38 | ||
|
|
07442e4533 | ||
|
|
9f850f8bb6 | ||
|
|
662bc2c9ab | ||
|
|
81ec812bcc | ||
|
|
deebfb9d37 | ||
|
|
7d3cf212e1 | ||
|
|
ec9bf6f53e | ||
|
|
2c668fa4a1 | ||
|
|
a5b7675e42 | ||
|
|
9823de3cc6 | ||
|
|
c32e9cfe86 | ||
|
|
1d17ca1fa3 | ||
|
|
bfe3328129 | ||
|
|
e0b38bd7a2 | ||
|
|
153338c20f | ||
|
|
3495a7dc37 | ||
|
|
042d4d55d9 | ||
|
|
5af08e0719 | ||
|
|
33d3ecbccc | ||
|
|
69cb72f842 | ||
|
|
69ac5153d4 | ||
|
|
16b6951648 | ||
|
|
231c36f8d3 | ||
|
|
1e4541b982 | ||
|
|
7be3b484ad | ||
|
|
9617b69c8a | ||
|
|
1d94b9111c | ||
|
|
2d6cd6951a | ||
|
|
310e3c32e5 | ||
|
|
37786593a0 | ||
|
|
819a5782b6 | ||
|
|
c0a84473a4 | ||
|
|
591a8ecc16 | ||
|
|
c405d8c06c | ||
|
|
138be0fd73 | ||
|
|
25a2e15ec5 | ||
|
|
62cc8a4b8d | ||
|
|
f895d4cbb3 | ||
|
|
ed5d656fa8 | ||
|
|
c43a561916 | ||
|
|
b93cc0f431 | ||
|
|
4c566d484a | ||
|
|
06e34d4607 | ||
|
|
45936f8fbd | ||
|
|
ec98445abf | ||
|
|
b07aafa5f5 | ||
|
|
b727d3f98a | ||
|
|
2f6fb37d72 | ||
|
|
35c76ad47d | ||
|
|
c07fb71186 |
18
.github/prompts/issue-deduplicator.txt
vendored
Normal file
18
.github/prompts/issue-deduplicator.txt
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
You will receive the following JSON files located in the current working directory:
|
||||
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
||||
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
|
||||
|
||||
Instructions:
|
||||
- Load both files as JSON and review their contents carefully. The codex-existing-issues.json file is large, ensure you explore all of it.
|
||||
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
|
||||
- Only consider an issue a potential duplicate if there is a clear overlap in symptoms, feature requests, reproduction steps, or error messages.
|
||||
- Prioritize newer issues when similarity is comparable.
|
||||
- Ignore pull requests and issues whose similarity is tenuous.
|
||||
- When unsure, prefer returning fewer matches.
|
||||
|
||||
Output requirements:
|
||||
- Respond with a JSON array of issue numbers (integers), ordered from most likely duplicate to least.
|
||||
- Include at most five numbers.
|
||||
- If you find no plausible duplicates, respond with `[]`.
|
||||
26
.github/prompts/issue-labeler.txt
vendored
Normal file
26
.github/prompts/issue-labeler.txt
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
You are an assistant that reviews GitHub issues for the repository.
|
||||
|
||||
Your job is to choose the most appropriate existing labels for the issue described later in this prompt.
|
||||
Follow these rules:
|
||||
- Only pick labels out of the list below.
|
||||
- Prefer a small set of precise labels over many broad ones.
|
||||
- If none of the labels fit, respond with an empty JSON array: []
|
||||
- Output must be a JSON array of label names (strings) with no additional commentary.
|
||||
|
||||
Labels to apply:
|
||||
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
|
||||
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
|
||||
3. extension — VS Code (or other IDE) extension-specific issues.
|
||||
4. windows-os — Bugs or friction specific to Windows environments (PowerShell behavior, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
5. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
6. codex-web — Issues targeting the Codex web UI/Cloud experience.
|
||||
8. azure — Problems or requests tied to Azure OpenAI deployments.
|
||||
9. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
|
||||
10. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
|
||||
|
||||
Issue information is available in environment variables:
|
||||
|
||||
ISSUE_NUMBER
|
||||
ISSUE_TITLE
|
||||
ISSUE_BODY
|
||||
REPO_FULL_NAME
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -60,3 +60,6 @@ jobs:
|
||||
run: ./scripts/asciicheck.py codex-cli/README.md
|
||||
- name: Check codex-cli/README ToC
|
||||
run: python3 scripts/readme_toc.py codex-cli/README.md
|
||||
|
||||
- name: Prettier (run `pnpm run format:fix` to fix)
|
||||
run: pnpm run format
|
||||
|
||||
130
.github/workflows/issue-deduplicator.yml
vendored
Normal file
130
.github/workflows/issue-deduplicator.yml
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
name: Issue Deduplicator
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
gather-duplicates:
|
||||
name: Identify potential duplicates
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final_message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare Codex inputs
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
CURRENT_ISSUE_FILE=codex-current-issue.json
|
||||
EXISTING_ISSUES_FILE=codex-existing-issues.json
|
||||
|
||||
gh issue list --repo "${{ github.repository }}" \
|
||||
--json number,title,body,createdAt \
|
||||
--limit 1000 \
|
||||
--state all \
|
||||
--search "sort:created-desc" \
|
||||
| jq '.' \
|
||||
> "$EXISTING_ISSUES_FILE"
|
||||
|
||||
gh issue view "${{ github.event.issue.number }}" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--json number,title,body \
|
||||
| jq '.' \
|
||||
> "$CURRENT_ISSUE_FILE"
|
||||
|
||||
- id: codex
|
||||
uses: openai/codex-action@main
|
||||
with:
|
||||
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
require_repo_write: false
|
||||
model: gpt-5
|
||||
prompt: |
|
||||
You are an assistant that triages new GitHub issues by identifying potential duplicates.
|
||||
|
||||
You will receive the following JSON files located in the current working directory:
|
||||
- `codex-current-issue.json`: JSON object describing the newly created issue (fields: number, title, body).
|
||||
- `codex-existing-issues.json`: JSON array of recent issues (each element includes number, title, body, createdAt).
|
||||
|
||||
Instructions:
|
||||
- Load both files as JSON and review their contents carefully. The codex-existing-issues.json file is large, ensure you explore all of it.
|
||||
- Compare the current issue against the existing issues to find up to five that appear to describe the same underlying problem or request.
|
||||
- When unsure, prefer returning fewer matches.
|
||||
- Include at most five numbers.
|
||||
|
||||
output_schema: |
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issues": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["issues"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
|
||||
comment-on-issue:
|
||||
name: Comment with potential duplicates
|
||||
needs: gather-duplicates
|
||||
if: ${{ needs.gather-duplicates.result != 'skipped' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
steps:
|
||||
- name: Comment on issue
|
||||
uses: actions/github-script@v7
|
||||
env:
|
||||
CODEX_OUTPUT: ${{ needs.gather-duplicates.outputs.codex_output }}
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
script: |
|
||||
const raw = process.env.CODEX_OUTPUT ?? '';
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(raw);
|
||||
} catch (error) {
|
||||
core.info(`Codex output was not valid JSON. Raw output: ${raw}`);
|
||||
core.info(`Parse error: ${error.message}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const issues = Array.isArray(parsed?.issues) ? parsed.issues : [];
|
||||
if (issues.length === 0) {
|
||||
core.info('Codex reported no potential duplicates.');
|
||||
return;
|
||||
}
|
||||
|
||||
const lines = [
|
||||
'Potential duplicates detected:',
|
||||
...issues.map((value) => `- #${String(value)}`),
|
||||
'',
|
||||
'*Powered by [Codex Action](https://github.com/openai/codex-action)*'];
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.payload.issue.number,
|
||||
body: lines.join("\n"),
|
||||
});
|
||||
|
||||
- name: Remove codex-deduplicate label
|
||||
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate' }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
run: |
|
||||
gh issue edit "${{ github.event.issue.number }}" --remove-label codex-deduplicate || true
|
||||
echo "Attempted to remove label: codex-deduplicate"
|
||||
115
.github/workflows/issue-labeler.yml
vendored
Normal file
115
.github/workflows/issue-labeler.yml
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
name: Issue Labeler
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
gather-labels:
|
||||
name: Generate label suggestions
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
outputs:
|
||||
codex_output: ${{ steps.codex.outputs.final_message }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- id: codex
|
||||
uses: openai/codex-action@main
|
||||
with:
|
||||
openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
require_repo_write: false
|
||||
prompt: |
|
||||
You are an assistant that reviews GitHub issues for the repository.
|
||||
|
||||
Your job is to choose the most appropriate existing labels for the issue described later in this prompt.
|
||||
Follow these rules:
|
||||
- Only pick labels out of the list below.
|
||||
- Prefer a small set of precise labels over many broad ones.
|
||||
|
||||
Labels to apply:
|
||||
1. bug — Reproducible defects in Codex products (CLI, VS Code extension, web, auth).
|
||||
2. enhancement — Feature requests or usability improvements that ask for new capabilities, better ergonomics, or quality-of-life tweaks.
|
||||
3. extension — VS Code (or other IDE) extension-specific issues.
|
||||
4. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
5. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
6. codex-web — Issues targeting the Codex web UI/Cloud experience.
|
||||
8. azure — Problems or requests tied to Azure OpenAI deployments.
|
||||
9. documentation — Updates or corrections needed in docs/README/config references (broken links, missing examples, outdated keys, clarification requests).
|
||||
10. model-behavior — Undesirable LLM behavior: forgetting goals, refusing work, hallucinating environment details, quota misreports, or other reasoning/performance anomalies.
|
||||
|
||||
Issue number: ${{ github.event.issue.number }}
|
||||
|
||||
Issue title:
|
||||
${{ github.event.issue.title }}
|
||||
|
||||
Issue body:
|
||||
${{ github.event.issue.body }}
|
||||
|
||||
Repository full name:
|
||||
${{ github.repository }}
|
||||
|
||||
output_schema: |
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["labels"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
|
||||
apply-labels:
|
||||
name: Apply labels from Codex output
|
||||
needs: gather-labels
|
||||
if: ${{ needs.gather-labels.result != 'skipped' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
CODEX_OUTPUT: ${{ needs.gather-labels.outputs.codex_output }}
|
||||
steps:
|
||||
- name: Apply labels
|
||||
run: |
|
||||
json=${CODEX_OUTPUT//$'\r'/}
|
||||
if [ -z "$json" ]; then
|
||||
echo "Codex produced no output. Skipping label application."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if ! printf '%s' "$json" | jq -e 'type == "object" and (.labels | type == "array")' >/dev/null 2>&1; then
|
||||
echo "Codex output did not include a labels array. Raw output: $json"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
labels=$(printf '%s' "$json" | jq -r '.labels[] | tostring')
|
||||
if [ -z "$labels" ]; then
|
||||
echo "Codex returned an empty array. Nothing to do."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cmd=(gh issue edit "$ISSUE_NUMBER")
|
||||
while IFS= read -r label; do
|
||||
cmd+=(--add-label "$label")
|
||||
done <<< "$labels"
|
||||
|
||||
"${cmd[@]}" || true
|
||||
|
||||
- name: Remove codex-label trigger
|
||||
if: ${{ always() && github.event.action == 'labeled' && github.event.label.name == 'codex-label' }}
|
||||
run: |
|
||||
gh issue edit "$ISSUE_NUMBER" --remove-label codex-label || true
|
||||
echo "Attempted to remove label: codex-label"
|
||||
10
AGENTS.md
10
AGENTS.md
@@ -8,11 +8,16 @@ In the codex-rs folder where the rust code lives:
|
||||
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
|
||||
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
|
||||
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
|
||||
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
|
||||
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
|
||||
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
|
||||
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
||||
|
||||
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace‑wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
|
||||
|
||||
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
|
||||
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
|
||||
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
|
||||
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
|
||||
|
||||
## TUI style conventions
|
||||
|
||||
@@ -28,6 +33,7 @@ See `codex-rs/tui/styles.md`.
|
||||
- Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]
|
||||
|
||||
### TUI Styling (ratatui)
|
||||
|
||||
- Prefer Stylize helpers: use "text".dim(), .bold(), .cyan(), .italic(), .underlined() instead of manual Style where possible.
|
||||
- Prefer simple conversions: use "text".into() for spans and vec![…].into() for lines; when inference is ambiguous (e.g., Paragraph::new/Cell::from), use Line::from(spans) or Span::from(text).
|
||||
- Computed styles: if the Style is computed at runtime, using `Span::styled` is OK (`Span::from(text).set_style(style)` is also acceptable).
|
||||
@@ -39,6 +45,7 @@ See `codex-rs/tui/styles.md`.
|
||||
- Compactness: prefer the form that stays on one line after rustfmt; if only one of Line::from(vec![…]) or vec![…].into() avoids wrapping, choose that. If both wrap, pick the one with fewer wrapped lines.
|
||||
|
||||
### Text wrapping
|
||||
|
||||
- Always use textwrap::wrap to wrap plain strings.
|
||||
- If you have a ratatui Line and you want to wrap it, use the helpers in tui/src/wrapping.rs, e.g. word_wrap_lines / word_wrap_line.
|
||||
- If you need to indent wrapped lines, use the initial_indent / subsequent_indent options from RtOptions if you can, rather than writing custom logic.
|
||||
@@ -60,6 +67,7 @@ This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to va
|
||||
- `cargo insta accept -p codex-tui`
|
||||
|
||||
If you don’t have the tool:
|
||||
|
||||
- `cargo install cargo-insta`
|
||||
|
||||
### Test assertions
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
|
||||
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
@@ -64,7 +63,6 @@ You can also use Codex with an API key, but this requires [additional setup](./d
|
||||
|
||||
Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp). Enable by adding an `mcp_servers` section to your `~/.codex/config.toml`.
|
||||
|
||||
|
||||
### Configuration
|
||||
|
||||
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||
@@ -83,6 +81,7 @@ Codex CLI supports a rich set of configuration options, with preferences stored
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- [**Non-interactive mode**](./docs/exec.md)
|
||||
- [**Advanced**](./docs/advanced.md)
|
||||
- [Non-interactive / CI mode](./docs/advanced.md#non-interactive--ci-mode)
|
||||
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||
|
||||
1425
codex-rs/Cargo.lock
generated
1425
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -32,6 +32,7 @@ members = [
|
||||
"git-apply",
|
||||
"utils/json-to-toml",
|
||||
"utils/readiness",
|
||||
"utils/string",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -71,6 +72,7 @@ codex-rmcp-client = { path = "rmcp-client" }
|
||||
codex-tui = { path = "tui" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
codex-utils-readiness = { path = "utils/readiness" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
mcp-types = { path = "mcp-types" }
|
||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||
@@ -85,6 +87,7 @@ assert_cmd = "2"
|
||||
async-channel = "2.3.1"
|
||||
async-stream = "0.3.6"
|
||||
async-trait = "0.1.89"
|
||||
axum = { version = "0.8", default-features = false }
|
||||
base64 = "0.22.1"
|
||||
bytes = "1.10.1"
|
||||
chrono = "0.4.42"
|
||||
@@ -102,7 +105,8 @@ env-flags = "0.1.1"
|
||||
env_logger = "0.11.5"
|
||||
escargot = "0.5"
|
||||
eventsource-stream = "0.2.3"
|
||||
futures = "0.3"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
fd-lock = "4.0.4"
|
||||
icu_decimal = "2.0.0"
|
||||
icu_locale_core = "2.0.0"
|
||||
ignore = "0.4.23"
|
||||
@@ -110,6 +114,7 @@ image = { version = "^0.25.8", default-features = false }
|
||||
indexmap = "2.6.0"
|
||||
insta = "1.43.2"
|
||||
itertools = "0.14.0"
|
||||
keyring = "3.6"
|
||||
landlock = "0.4.1"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.175"
|
||||
@@ -138,13 +143,16 @@ rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
regex-lite = "0.1.7"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.7.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
seccompiler = "0.5.0"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_with = "3.14"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10"
|
||||
shellexpand = "3.1.0"
|
||||
shlex = "1.3.0"
|
||||
similar = "2.7.0"
|
||||
starlark = "0.13.0"
|
||||
|
||||
@@ -725,6 +725,7 @@ pub struct FuzzyFileSearchParams {
|
||||
pub struct FuzzyFileSearchResult {
|
||||
pub root: String,
|
||||
pub path: String,
|
||||
pub file_name: String,
|
||||
pub score: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub indices: Option<Vec<u32>>,
|
||||
|
||||
15
codex-rs/app-server/README.md
Normal file
15
codex-rs/app-server/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# codex-app-server
|
||||
|
||||
`codex app-server` is the harness Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). The message schema is currently unstable, but those who wish to build experimental UIs on top of Codex may find it valuable.
|
||||
|
||||
## Protocol
|
||||
|
||||
Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted.
|
||||
|
||||
## Message Schema
|
||||
|
||||
Currently, you can dump a TypeScript version of the schema using `codex generate-ts`. It is specific to the version of Codex you used to run `generate-ts`, so the two are guaranteed to be compatible.
|
||||
|
||||
```
|
||||
codex generate-ts --out DIR
|
||||
```
|
||||
@@ -53,6 +53,7 @@ use codex_core::AuthManager;
|
||||
use codex_core::CodexConversation;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::Cursor as RolloutCursor;
|
||||
use codex_core::INTERACTIVE_SESSION_SOURCES;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::RolloutRecorder;
|
||||
use codex_core::SessionMeta;
|
||||
@@ -499,7 +500,7 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn get_user_saved_config(&self, request_id: RequestId) {
|
||||
let toml_value = match load_config_as_toml(&self.config.codex_home) {
|
||||
let toml_value = match load_config_as_toml(&self.config.codex_home).await {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -652,18 +653,19 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn process_new_conversation(&self, request_id: RequestId, params: NewConversationParams) {
|
||||
let config = match derive_config_from_params(params, self.codex_linux_sandbox_exe.clone()) {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let config =
|
||||
match derive_config_from_params(params, self.codex_linux_sandbox_exe.clone()).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match self.conversation_manager.new_conversation(config).await {
|
||||
Ok(conversation_id) => {
|
||||
@@ -708,6 +710,7 @@ impl CodexMessageProcessor {
|
||||
&self.config.codex_home,
|
||||
page_size,
|
||||
cursor_ref,
|
||||
INTERACTIVE_SESSION_SOURCES,
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -750,7 +753,7 @@ impl CodexMessageProcessor {
|
||||
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
|
||||
let config = match params.overrides {
|
||||
Some(overrides) => {
|
||||
derive_config_from_params(overrides, self.codex_linux_sandbox_exe.clone())
|
||||
derive_config_from_params(overrides, self.codex_linux_sandbox_exe.clone()).await
|
||||
}
|
||||
None => Ok(self.config.as_ref().clone()),
|
||||
};
|
||||
@@ -1318,7 +1321,7 @@ async fn apply_bespoke_event_handling(
|
||||
}
|
||||
}
|
||||
|
||||
fn derive_config_from_params(
|
||||
async fn derive_config_from_params(
|
||||
params: NewConversationParams,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
) -> std::io::Result<Config> {
|
||||
@@ -1356,7 +1359,7 @@ fn derive_config_from_params(
|
||||
.map(|(k, v)| (k, json_to_toml(v)))
|
||||
.collect();
|
||||
|
||||
Config::load_with_cli_overrides(cli_overrides, overrides)
|
||||
Config::load_with_cli_overrides(cli_overrides, overrides).await
|
||||
}
|
||||
|
||||
async fn on_patch_approval_response(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::num::NonZero;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -56,9 +57,16 @@ pub(crate) async fn run_fuzzy_file_search(
|
||||
match res {
|
||||
Ok(Ok((root, res))) => {
|
||||
for m in res.matches {
|
||||
let path = m.path;
|
||||
//TODO(shijie): Move file name generation to file_search lib.
|
||||
let file_name = Path::new(&path)
|
||||
.file_name()
|
||||
.map(|name| name.to_string_lossy().into_owned())
|
||||
.unwrap_or_else(|| path.clone());
|
||||
let result = FuzzyFileSearchResult {
|
||||
root: root.clone(),
|
||||
path: m.path,
|
||||
path,
|
||||
file_name,
|
||||
score: m.score,
|
||||
indices: m.indices,
|
||||
};
|
||||
|
||||
@@ -81,6 +81,7 @@ pub async fn run_main(
|
||||
)
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
|
||||
@@ -17,6 +17,7 @@ use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::default_client::USER_AGENT_SUFFIX;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub(crate) struct MessageProcessor {
|
||||
@@ -34,8 +35,11 @@ impl MessageProcessor {
|
||||
config: Arc<Config>,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone());
|
||||
let conversation_manager = Arc::new(ConversationManager::new(auth_manager.clone()));
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone(), false);
|
||||
let conversation_manager = Arc::new(ConversationManager::new(
|
||||
auth_manager.clone(),
|
||||
SessionSource::VSCode,
|
||||
));
|
||||
let codex_message_processor = CodexMessageProcessor::new(
|
||||
auth_manager,
|
||||
conversation_manager,
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
@@ -9,30 +11,41 @@ use tokio::time::timeout;
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn test_fuzzy_file_search_sorts_and_includes_indices() {
|
||||
async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
|
||||
// Prepare a temporary Codex home and a separate root with test files.
|
||||
let codex_home = TempDir::new().expect("create temp codex home");
|
||||
let root = TempDir::new().expect("create temp search root");
|
||||
let codex_home = TempDir::new().context("create temp codex home")?;
|
||||
let root = TempDir::new().context("create temp search root")?;
|
||||
|
||||
// Create files designed to have deterministic ordering for query "abc".
|
||||
std::fs::write(root.path().join("abc"), "x").expect("write file abc");
|
||||
std::fs::write(root.path().join("abcde"), "x").expect("write file abcx");
|
||||
std::fs::write(root.path().join("abexy"), "x").expect("write file abcx");
|
||||
std::fs::write(root.path().join("zzz.txt"), "x").expect("write file zzz");
|
||||
// Create files designed to have deterministic ordering for query "abe".
|
||||
std::fs::write(root.path().join("abc"), "x").context("write file abc")?;
|
||||
std::fs::write(root.path().join("abcde"), "x").context("write file abcde")?;
|
||||
std::fs::write(root.path().join("abexy"), "x").context("write file abexy")?;
|
||||
std::fs::write(root.path().join("zzz.txt"), "x").context("write file zzz")?;
|
||||
let sub_dir = root.path().join("sub");
|
||||
std::fs::create_dir_all(&sub_dir).context("create sub dir")?;
|
||||
let sub_abce_path = sub_dir.join("abce");
|
||||
std::fs::write(&sub_abce_path, "x").context("write file sub/abce")?;
|
||||
let sub_abce_rel = sub_abce_path
|
||||
.strip_prefix(root.path())
|
||||
.context("strip root prefix from sub/abce")?
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
|
||||
// Start MCP server and initialize.
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await.expect("spawn mcp");
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.context("spawn mcp")?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
.context("init timeout")?
|
||||
.context("init failed")?;
|
||||
|
||||
let root_path = root.path().to_string_lossy().to_string();
|
||||
// Send fuzzyFileSearch request.
|
||||
let request_id = mcp
|
||||
.send_fuzzy_file_search_request("abe", vec![root_path.clone()], None)
|
||||
.await
|
||||
.expect("send fuzzyFileSearch");
|
||||
.context("send fuzzyFileSearch")?;
|
||||
|
||||
// Read response and verify shape and ordering.
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
@@ -40,39 +53,65 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() {
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await
|
||||
.expect("fuzzyFileSearch timeout")
|
||||
.expect("fuzzyFileSearch resp");
|
||||
.context("fuzzyFileSearch timeout")?
|
||||
.context("fuzzyFileSearch resp")?;
|
||||
|
||||
let value = resp.result;
|
||||
// The path separator on Windows affects the score.
|
||||
let expected_score = if cfg!(windows) { 69 } else { 72 };
|
||||
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"files": [
|
||||
{ "root": root_path.clone(), "path": "abexy", "score": 88, "indices": [0, 1, 2] },
|
||||
{ "root": root_path.clone(), "path": "abcde", "score": 74, "indices": [0, 1, 4] },
|
||||
{
|
||||
"root": root_path.clone(),
|
||||
"path": "abexy",
|
||||
"file_name": "abexy",
|
||||
"score": 88,
|
||||
"indices": [0, 1, 2],
|
||||
},
|
||||
{
|
||||
"root": root_path.clone(),
|
||||
"path": "abcde",
|
||||
"file_name": "abcde",
|
||||
"score": 74,
|
||||
"indices": [0, 1, 4],
|
||||
},
|
||||
{
|
||||
"root": root_path.clone(),
|
||||
"path": sub_abce_rel,
|
||||
"file_name": "abce",
|
||||
"score": expected_score,
|
||||
"indices": [4, 5, 7],
|
||||
},
|
||||
]
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn test_fuzzy_file_search_accepts_cancellation_token() {
|
||||
let codex_home = TempDir::new().expect("create temp codex home");
|
||||
let root = TempDir::new().expect("create temp search root");
|
||||
async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> {
|
||||
let codex_home = TempDir::new().context("create temp codex home")?;
|
||||
let root = TempDir::new().context("create temp search root")?;
|
||||
|
||||
std::fs::write(root.path().join("alpha.txt"), "contents").expect("write alpha");
|
||||
std::fs::write(root.path().join("alpha.txt"), "contents").context("write alpha")?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await.expect("spawn mcp");
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
.context("spawn mcp")?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
|
||||
.await
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
.context("init timeout")?
|
||||
.context("init failed")?;
|
||||
|
||||
let root_path = root.path().to_string_lossy().to_string();
|
||||
let request_id = mcp
|
||||
.send_fuzzy_file_search_request("alp", vec![root_path.clone()], None)
|
||||
.await
|
||||
.expect("send fuzzyFileSearch");
|
||||
.context("send fuzzyFileSearch")?;
|
||||
|
||||
let request_id_2 = mcp
|
||||
.send_fuzzy_file_search_request(
|
||||
@@ -81,24 +120,27 @@ async fn test_fuzzy_file_search_accepts_cancellation_token() {
|
||||
Some(request_id.to_string()),
|
||||
)
|
||||
.await
|
||||
.expect("send fuzzyFileSearch");
|
||||
.context("send fuzzyFileSearch")?;
|
||||
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id_2)),
|
||||
)
|
||||
.await
|
||||
.expect("fuzzyFileSearch timeout")
|
||||
.expect("fuzzyFileSearch resp");
|
||||
.context("fuzzyFileSearch timeout")?
|
||||
.context("fuzzyFileSearch resp")?;
|
||||
|
||||
let files = resp
|
||||
.result
|
||||
.get("files")
|
||||
.and_then(|value| value.as_array())
|
||||
.cloned()
|
||||
.expect("files array");
|
||||
.context("files key missing")?
|
||||
.as_array()
|
||||
.context("files not array")?
|
||||
.clone();
|
||||
|
||||
assert_eq!(files.len(), 1);
|
||||
assert_eq!(files[0]["root"], root_path);
|
||||
assert_eq!(files[0]["path"], "alpha.txt");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -29,7 +29,8 @@ pub async fn run_apply_command(
|
||||
.parse_overrides()
|
||||
.map_err(anyhow::Error::msg)?,
|
||||
ConfigOverrides::default(),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
init_chatgpt_token_from_auth(&config.codex_home).await?;
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ codex-app-server-protocol = { workspace = true }
|
||||
codex-protocol-ts = { workspace = true }
|
||||
codex-responses-api-proxy = { workspace = true }
|
||||
codex-tui = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-cloud-tasks = { path = "../cloud-tasks" }
|
||||
ctor = { workspace = true }
|
||||
owo-colors = { workspace = true }
|
||||
|
||||
@@ -73,7 +73,8 @@ async fn run_command_under_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// In practice, this should be `std::env::current_dir()` because this CLI
|
||||
// does not support `--cwd`, but let's use the config value for consistency.
|
||||
|
||||
@@ -9,6 +9,8 @@ use codex_core::config::ConfigOverrides;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::run_device_code_login;
|
||||
use codex_login::run_login_server;
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
@@ -24,7 +26,7 @@ pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
}
|
||||
|
||||
pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match login_with_chatgpt(config.codex_home).await {
|
||||
Ok(_) => {
|
||||
@@ -42,7 +44,7 @@ pub async fn run_login_with_api_key(
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
api_key: String,
|
||||
) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match login_with_api_key(&config.codex_home, &api_key) {
|
||||
Ok(_) => {
|
||||
@@ -56,13 +58,40 @@ pub async fn run_login_with_api_key(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_api_key_from_stdin() -> String {
|
||||
let mut stdin = std::io::stdin();
|
||||
|
||||
if stdin.is_terminal() {
|
||||
eprintln!(
|
||||
"--with-api-key expects the API key on stdin. Try piping it, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`."
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
eprintln!("Reading API key from stdin...");
|
||||
|
||||
let mut buffer = String::new();
|
||||
if let Err(err) = stdin.read_to_string(&mut buffer) {
|
||||
eprintln!("Failed to read API key from stdin: {err}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let api_key = buffer.trim().to_string();
|
||||
if api_key.is_empty() {
|
||||
eprintln!("No API key provided via stdin.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
api_key
|
||||
}
|
||||
|
||||
/// Login using the OAuth device code flow.
|
||||
pub async fn run_login_with_device_code(
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
issuer_base_url: Option<String>,
|
||||
client_id: Option<String>,
|
||||
) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
let mut opts = ServerOptions::new(
|
||||
config.codex_home,
|
||||
client_id.unwrap_or(CLIENT_ID.to_string()),
|
||||
@@ -83,7 +112,7 @@ pub async fn run_login_with_device_code(
|
||||
}
|
||||
|
||||
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match CodexAuth::from_codex_home(&config.codex_home) {
|
||||
Ok(Some(auth)) => match auth.mode {
|
||||
@@ -114,7 +143,7 @@ pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
}
|
||||
|
||||
pub async fn run_logout(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
let config = load_config_or_exit(cli_config_overrides).await;
|
||||
|
||||
match logout(&config.codex_home) {
|
||||
Ok(true) => {
|
||||
@@ -132,7 +161,7 @@ pub async fn run_logout(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
}
|
||||
}
|
||||
|
||||
fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config {
|
||||
async fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config {
|
||||
let cli_overrides = match cli_config_overrides.parse_overrides() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
@@ -142,7 +171,7 @@ fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config {
|
||||
};
|
||||
|
||||
let config_overrides = ConfigOverrides::default();
|
||||
match Config::load_with_cli_overrides(cli_overrides, config_overrides) {
|
||||
match Config::load_with_cli_overrides(cli_overrides, config_overrides).await {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
eprintln!("Error loading configuration: {e}");
|
||||
|
||||
@@ -7,6 +7,7 @@ use codex_chatgpt::apply_command::ApplyCommand;
|
||||
use codex_chatgpt::apply_command::run_apply_command;
|
||||
use codex_cli::LandlockCommand;
|
||||
use codex_cli::SeatbeltCommand;
|
||||
use codex_cli::login::read_api_key_from_stdin;
|
||||
use codex_cli::login::run_login_status;
|
||||
use codex_cli::login::run_login_with_api_key;
|
||||
use codex_cli::login::run_login_with_chatgpt;
|
||||
@@ -139,7 +140,18 @@ struct LoginCommand {
|
||||
#[clap(skip)]
|
||||
config_overrides: CliConfigOverrides,
|
||||
|
||||
#[arg(long = "api-key", value_name = "API_KEY")]
|
||||
#[arg(
|
||||
long = "with-api-key",
|
||||
help = "Read the API key from stdin (e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`)"
|
||||
)]
|
||||
with_api_key: bool,
|
||||
|
||||
#[arg(
|
||||
long = "api-key",
|
||||
value_name = "API_KEY",
|
||||
help = "(deprecated) Previously accepted the API key directly; now exits with guidance to use --with-api-key",
|
||||
hide = true
|
||||
)]
|
||||
api_key: Option<String>,
|
||||
|
||||
/// EXPERIMENTAL: Use device code flow (not yet supported)
|
||||
@@ -298,7 +310,13 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
login_cli.client_id,
|
||||
)
|
||||
.await;
|
||||
} else if let Some(api_key) = login_cli.api_key {
|
||||
} else if login_cli.api_key.is_some() {
|
||||
eprintln!(
|
||||
"The --api-key flag is no longer supported. Pipe the key instead, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`."
|
||||
);
|
||||
std::process::exit(1);
|
||||
} else if login_cli.with_api_key {
|
||||
let api_key = read_api_key_from_stdin();
|
||||
run_login_with_api_key(login_cli.config_overrides, api_key).await;
|
||||
} else {
|
||||
run_login_with_chatgpt(login_cli.config_overrides).await;
|
||||
|
||||
@@ -12,6 +12,8 @@ use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::write_global_mcp_servers;
|
||||
use codex_core::config_types::McpServerConfig;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
use codex_rmcp_client::delete_oauth_tokens;
|
||||
use codex_rmcp_client::perform_oauth_login;
|
||||
|
||||
/// [experimental] Launch Codex as an MCP server or manage configured MCP servers.
|
||||
///
|
||||
@@ -43,6 +45,14 @@ pub enum McpSubcommand {
|
||||
|
||||
/// [experimental] Remove a global MCP server entry.
|
||||
Remove(RemoveArgs),
|
||||
|
||||
/// [experimental] Authenticate with a configured MCP server via OAuth.
|
||||
/// Requires experimental_use_rmcp_client = true in config.toml.
|
||||
Login(LoginArgs),
|
||||
|
||||
/// [experimental] Remove stored OAuth credentials for a server.
|
||||
/// Requires experimental_use_rmcp_client = true in config.toml.
|
||||
Logout(LogoutArgs),
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
@@ -82,6 +92,18 @@ pub struct RemoveArgs {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct LoginArgs {
|
||||
/// Name of the MCP server to authenticate with oauth.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
pub struct LogoutArgs {
|
||||
/// Name of the MCP server to deauthenticate.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl McpCli {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
let McpCli {
|
||||
@@ -91,16 +113,22 @@ impl McpCli {
|
||||
|
||||
match subcommand {
|
||||
McpSubcommand::List(args) => {
|
||||
run_list(&config_overrides, args)?;
|
||||
run_list(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Get(args) => {
|
||||
run_get(&config_overrides, args)?;
|
||||
run_get(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Add(args) => {
|
||||
run_add(&config_overrides, args)?;
|
||||
run_add(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Remove(args) => {
|
||||
run_remove(&config_overrides, args)?;
|
||||
run_remove(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Login(args) => {
|
||||
run_login(&config_overrides, args).await?;
|
||||
}
|
||||
McpSubcommand::Logout(args) => {
|
||||
run_logout(&config_overrides, args).await?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,7 +136,7 @@ impl McpCli {
|
||||
}
|
||||
}
|
||||
|
||||
fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
|
||||
async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> {
|
||||
// Validate any provided overrides even though they are not currently applied.
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
|
||||
@@ -134,6 +162,7 @@ fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<(
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.await
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let new_entry = McpServerConfig {
|
||||
@@ -156,7 +185,7 @@ fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
|
||||
async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> {
|
||||
config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
|
||||
let RemoveArgs { name } = remove_args;
|
||||
@@ -165,6 +194,7 @@ fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) ->
|
||||
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let mut servers = load_global_mcp_servers(&codex_home)
|
||||
.await
|
||||
.with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?;
|
||||
|
||||
let removed = servers.remove(&name).is_some();
|
||||
@@ -183,9 +213,65 @@ fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) ->
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
|
||||
async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
if !config.use_experimental_use_rmcp_client {
|
||||
bail!(
|
||||
"OAuth login is only supported when experimental_use_rmcp_client is true in config.toml."
|
||||
);
|
||||
}
|
||||
|
||||
let LoginArgs { name } = login_args;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
bail!("No MCP server named '{name}' found.");
|
||||
};
|
||||
|
||||
let url = match &server.transport {
|
||||
McpServerTransportConfig::StreamableHttp { url, .. } => url.clone(),
|
||||
_ => bail!("OAuth login is only supported for streamable HTTP servers."),
|
||||
};
|
||||
|
||||
perform_oauth_login(&name, &url).await?;
|
||||
println!("Successfully logged in to MCP server '{name}'.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let LogoutArgs { name } = logout_args;
|
||||
|
||||
let server = config
|
||||
.mcp_servers
|
||||
.get(&name)
|
||||
.ok_or_else(|| anyhow!("No MCP server named '{name}' found in configuration."))?;
|
||||
|
||||
let url = match &server.transport {
|
||||
McpServerTransportConfig::StreamableHttp { url, .. } => url.clone(),
|
||||
_ => bail!("OAuth logout is only supported for streamable_http transports."),
|
||||
};
|
||||
|
||||
match delete_oauth_tokens(&name, &url) {
|
||||
Ok(true) => println!("Removed OAuth credentials for '{name}'."),
|
||||
Ok(false) => println!("No OAuth credentials stored for '{name}'."),
|
||||
Err(err) => return Err(anyhow!("failed to delete OAuth credentials: {err}")),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let mut entries: Vec<_> = config.mcp_servers.iter().collect();
|
||||
@@ -343,9 +429,10 @@ fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Resul
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
|
||||
async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> {
|
||||
let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?;
|
||||
let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&get_args.name) else {
|
||||
|
||||
@@ -13,8 +13,8 @@ fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
@@ -24,7 +24,7 @@ fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
.success()
|
||||
.stdout(contains("Added global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert_eq!(servers.len(), 1);
|
||||
let docs = servers.get("docs").expect("server should exist");
|
||||
match &docs.transport {
|
||||
@@ -43,7 +43,7 @@ fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
.success()
|
||||
.stdout(contains("Removed global MCP server 'docs'."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
let mut remove_again_cmd = codex_command(codex_home.path())?;
|
||||
@@ -53,14 +53,14 @@ fn add_and_remove_server_updates_global_config() -> Result<()> {
|
||||
.success()
|
||||
.stdout(contains("No MCP server named 'docs' found."));
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut add_cmd = codex_command(codex_home.path())?;
|
||||
@@ -80,7 +80,7 @@ fn add_with_env_preserves_key_order_and_values() -> Result<()> {
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let envy = servers.get("envy").expect("server should exist");
|
||||
let env = match &envy.transport {
|
||||
McpServerTransportConfig::Stdio { env: Some(env), .. } => env,
|
||||
|
||||
@@ -190,7 +190,7 @@ pub async fn run_main(_cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> a
|
||||
// Require ChatGPT login (SWIC). Exit with a clear message if missing.
|
||||
let _token = match codex_core::config::find_codex_home()
|
||||
.ok()
|
||||
.map(codex_login::AuthManager::new)
|
||||
.map(|home| codex_login::AuthManager::new(home, false))
|
||||
.and_then(|am| am.auth())
|
||||
{
|
||||
Some(auth) => {
|
||||
|
||||
@@ -70,7 +70,7 @@ pub async fn build_chatgpt_headers() -> HeaderMap {
|
||||
HeaderValue::from_str(&ua).unwrap_or(HeaderValue::from_static("codex-cli")),
|
||||
);
|
||||
if let Ok(home) = codex_core::config::find_codex_home() {
|
||||
let am = codex_login::AuthManager::new(home);
|
||||
let am = codex_login::AuthManager::new(home, false);
|
||||
if let Some(auth) = am.auth()
|
||||
&& let Ok(tok) = auth.get_token().await
|
||||
&& !tok.is_empty()
|
||||
|
||||
@@ -20,49 +20,49 @@ const PRESETS: &[ModelPreset] = &[
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-low",
|
||||
label: "gpt-5-codex low",
|
||||
description: "",
|
||||
description: "Fastest responses with limited reasoning",
|
||||
model: "gpt-5-codex",
|
||||
effort: Some(ReasoningEffort::Low),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-medium",
|
||||
label: "gpt-5-codex medium",
|
||||
description: "",
|
||||
description: "Dynamically adjusts reasoning based on the task",
|
||||
model: "gpt-5-codex",
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-codex-high",
|
||||
label: "gpt-5-codex high",
|
||||
description: "",
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems",
|
||||
model: "gpt-5-codex",
|
||||
effort: Some(ReasoningEffort::High),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-minimal",
|
||||
label: "gpt-5 minimal",
|
||||
description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks",
|
||||
description: "Fastest responses with little reasoning",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::Minimal),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-low",
|
||||
label: "gpt-5 low",
|
||||
description: "— balances speed with some reasoning; useful for straightforward queries and short explanations",
|
||||
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::Low),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-medium",
|
||||
label: "gpt-5 medium",
|
||||
description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
||||
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-high",
|
||||
label: "gpt-5 high",
|
||||
description: "— maximizes reasoning depth for complex or ambiguous problems",
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems",
|
||||
model: "gpt-5",
|
||||
effort: Some(ReasoningEffort::High),
|
||||
},
|
||||
|
||||
@@ -19,18 +19,21 @@ async-trait = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-apply-patch = { workspace = true }
|
||||
codex-file-search = { workspace = true }
|
||||
codex-mcp-client = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-otel = { workspace = true, features = ["otel"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-utils-string = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
dunce = { workspace = true }
|
||||
env-flags = { workspace = true }
|
||||
eventsource-stream = { workspace = true }
|
||||
fd-lock = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = "0.4"
|
||||
indexmap = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
@@ -39,6 +42,7 @@ portable-pty = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
regex-lite = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["json", "stream"] }
|
||||
shellexpand = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha1 = { workspace = true }
|
||||
@@ -75,6 +79,9 @@ wildmatch = { workspace = true }
|
||||
landlock = { workspace = true }
|
||||
seccompiler = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
core-foundation = "0.9"
|
||||
|
||||
# Build OpenSSL from source for musl builds.
|
||||
[target.x86_64-unknown-linux-musl.dependencies]
|
||||
openssl-sys = { workspace = true, features = ["vendored"] }
|
||||
@@ -90,11 +97,12 @@ escargot = { workspace = true }
|
||||
maplit = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
tracing-test = { workspace = true, features = ["no-env-filter"] }
|
||||
walkdir = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
tracing-test = { workspace = true, features = ["no-env-filter"] }
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
ignored = ["openssl-sys"]
|
||||
|
||||
@@ -89,7 +89,7 @@ You are producing plain text that will later be styled by the CLI. Follow these
|
||||
- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help.
|
||||
- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent.
|
||||
- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **.
|
||||
- Code samples or multi-line snippets should be wrapped in fenced code blocks; add a language hint whenever obvious.
|
||||
- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible.
|
||||
- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task.
|
||||
- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording.
|
||||
- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
|
||||
|
||||
448
codex-rs/core/src/admin_controls.rs
Normal file
448
codex-rs/core/src/admin_controls.rs
Normal file
@@ -0,0 +1,448 @@
|
||||
use crate::config_types::AdminAuditEventKind;
|
||||
use crate::config_types::AdminAuditToml;
|
||||
use crate::config_types::AdminConfigToml;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec::SandboxType;
|
||||
use crate::path_utils::expand_tilde;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use fd_lock::RwLock;
|
||||
use gethostname::gethostname;
|
||||
use reqwest::Client;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::io::{self};
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tokio::runtime::Handle;
|
||||
use tracing::warn;
|
||||
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
pub struct AdminControls {
|
||||
pub danger: DangerControls,
|
||||
pub audit: Option<AdminAuditConfig>,
|
||||
pub pending: Vec<PendingAdminAction>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
pub struct DangerControls {
|
||||
pub disallow_full_access: bool,
|
||||
pub allow_with_reason: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct AdminAuditConfig {
|
||||
pub log_file: Option<PathBuf>,
|
||||
pub log_endpoint: Option<String>,
|
||||
pub log_events: HashSet<AdminAuditEventKind>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum PendingAdminAction {
|
||||
Danger(DangerPending),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct DangerPending {
|
||||
pub source: DangerRequestSource,
|
||||
pub requested_sandbox: SandboxPolicy,
|
||||
pub requested_approval: AskForApproval,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum DangerRequestSource {
|
||||
Startup,
|
||||
Resume,
|
||||
Approvals,
|
||||
ExecCli,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum DangerDecision {
|
||||
Allowed,
|
||||
RequiresJustification,
|
||||
Denied,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum DangerAuditAction {
|
||||
Requested,
|
||||
Approved,
|
||||
Cancelled,
|
||||
Denied,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(tag = "audit_kind", rename_all = "snake_case")]
|
||||
pub enum AdminAuditPayload {
|
||||
Danger {
|
||||
action: DangerAuditAction,
|
||||
justification: Option<String>,
|
||||
requested_by: DangerRequestSource,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
approval_policy: AskForApproval,
|
||||
},
|
||||
Command {
|
||||
command: Vec<String>,
|
||||
command_cwd: PathBuf,
|
||||
cli_cwd: PathBuf,
|
||||
sandbox_type: SandboxType,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
escalated: bool,
|
||||
justification: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct AdminAuditRecord {
|
||||
timestamp: DateTime<Utc>,
|
||||
username: String,
|
||||
hostname: String,
|
||||
#[serde(flatten)]
|
||||
payload: AdminAuditPayload,
|
||||
}
|
||||
|
||||
impl AdminControls {
|
||||
pub fn from_toml(raw: Option<AdminConfigToml>) -> io::Result<Self> {
|
||||
let raw = raw.unwrap_or_default();
|
||||
let danger = DangerControls {
|
||||
disallow_full_access: raw.disallow_danger_full_access.unwrap_or(false),
|
||||
allow_with_reason: raw.allow_danger_with_reason.unwrap_or(false),
|
||||
};
|
||||
let audit = match raw.audit {
|
||||
Some(audit_raw) => AdminAuditConfig::from_toml(audit_raw)?,
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
danger,
|
||||
audit,
|
||||
pending: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn decision_for_danger(&self) -> DangerDecision {
|
||||
if !self.danger.disallow_full_access {
|
||||
DangerDecision::Allowed
|
||||
} else if self.danger.allow_with_reason {
|
||||
DangerDecision::RequiresJustification
|
||||
} else {
|
||||
DangerDecision::Denied
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_pending_danger(&self) -> bool {
|
||||
self.pending
|
||||
.iter()
|
||||
.any(|action| matches!(action, PendingAdminAction::Danger(_)))
|
||||
}
|
||||
|
||||
pub fn take_pending_danger(&mut self) -> Option<DangerPending> {
|
||||
self.pending
|
||||
.extract_if(.., |action| matches!(action, PendingAdminAction::Danger(_)))
|
||||
.next()
|
||||
.map(|action| match action {
|
||||
PendingAdminAction::Danger(pending) => pending,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn peek_pending_danger(&self) -> Option<&DangerPending> {
|
||||
self.pending
|
||||
.iter()
|
||||
.map(|action| match action {
|
||||
PendingAdminAction::Danger(pending) => pending,
|
||||
})
|
||||
.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminAuditConfig {
|
||||
pub fn from_toml(raw: AdminAuditToml) -> io::Result<Option<Self>> {
|
||||
let AdminAuditToml {
|
||||
log_file,
|
||||
log_endpoint,
|
||||
log_events,
|
||||
} = raw;
|
||||
|
||||
let log_file = match log_file {
|
||||
Some(path) => {
|
||||
let trimmed = path.trim();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(expand_tilde(trimmed)?)
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
let log_endpoint = log_endpoint
|
||||
.map(|endpoint| endpoint.trim().to_string())
|
||||
.filter(|s| !s.is_empty());
|
||||
|
||||
if log_file.is_none() && log_endpoint.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let log_events = log_events.into_iter().collect();
|
||||
|
||||
Ok(Some(Self {
|
||||
log_file,
|
||||
log_endpoint,
|
||||
log_events,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn should_log(&self, kind: AdminAuditEventKind) -> bool {
|
||||
self.log_events.is_empty() || self.log_events.contains(&kind)
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminAuditPayload {
|
||||
pub fn kind(&self) -> AdminAuditEventKind {
|
||||
match self {
|
||||
AdminAuditPayload::Danger { .. } => AdminAuditEventKind::Danger,
|
||||
AdminAuditPayload::Command { .. } => AdminAuditEventKind::Command,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminAuditRecord {
|
||||
fn new(payload: AdminAuditPayload) -> Self {
|
||||
Self {
|
||||
timestamp: Utc::now(),
|
||||
username: current_username(),
|
||||
hostname: current_hostname(),
|
||||
payload,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn log_admin_event(config: &AdminAuditConfig, payload: AdminAuditPayload) {
|
||||
let kind = payload.kind();
|
||||
if !config.should_log(kind) {
|
||||
return;
|
||||
}
|
||||
|
||||
let record = AdminAuditRecord::new(payload);
|
||||
|
||||
if let Some(path) = &config.log_file
|
||||
&& let Err(err) = append_record_to_file(path, &record)
|
||||
{
|
||||
warn!(
|
||||
"failed to write admin audit event to {}: {err:?}",
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(endpoint) = &config.log_endpoint {
|
||||
if Handle::try_current().is_ok() {
|
||||
let endpoint = endpoint.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = send_record_to_endpoint(&endpoint, record).await {
|
||||
warn!("failed to post admin audit event to {endpoint}: {err:?}");
|
||||
}
|
||||
});
|
||||
} else {
|
||||
warn!(
|
||||
"admin audit HTTP logging requested for {endpoint}, but no async runtime is available",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn append_record_to_file(path: &Path, record: &AdminAuditRecord) -> io::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
let mut options = OpenOptions::new();
|
||||
options.create(true).append(true).write(true);
|
||||
#[cfg(unix)]
|
||||
{
|
||||
options.mode(0o600);
|
||||
}
|
||||
|
||||
let file = options.open(path)?;
|
||||
let mut lock = RwLock::new(file);
|
||||
let mut guard = lock.write()?;
|
||||
let line = serde_json::to_string(record).map_err(io::Error::other)?;
|
||||
guard.write_all(line.as_bytes())?;
|
||||
guard.write_all(b"\n")?;
|
||||
guard.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_record_to_endpoint(
|
||||
endpoint: &str,
|
||||
record: AdminAuditRecord,
|
||||
) -> Result<(), reqwest::Error> {
|
||||
Client::new().post(endpoint).json(&record).send().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn current_username() -> String {
|
||||
env_var("USER")
|
||||
.or_else(|| env_var("USERNAME"))
|
||||
.unwrap_or_else(|| "unknown".to_string())
|
||||
}
|
||||
|
||||
fn current_hostname() -> String {
|
||||
gethostname()
|
||||
.into_string()
|
||||
.ok()
|
||||
.filter(|value| !value.is_empty())
|
||||
.or_else(|| env_var("HOSTNAME"))
|
||||
.or_else(|| env_var("COMPUTERNAME"))
|
||||
.unwrap_or_else(|| "unknown".to_string())
|
||||
}
|
||||
|
||||
fn env_var(key: &str) -> Option<String> {
|
||||
std::env::var(key).ok().filter(|value| !value.is_empty())
|
||||
}
|
||||
|
||||
pub fn build_danger_audit_payload(
|
||||
pending: &DangerPending,
|
||||
action: DangerAuditAction,
|
||||
justification: Option<String>,
|
||||
) -> AdminAuditPayload {
|
||||
AdminAuditPayload::Danger {
|
||||
action,
|
||||
justification,
|
||||
requested_by: pending.source,
|
||||
sandbox_policy: pending.requested_sandbox.clone(),
|
||||
approval_policy: pending.requested_approval,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_command_audit_payload(
|
||||
params: &ExecParams,
|
||||
sandbox_type: SandboxType,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
cli_cwd: &Path,
|
||||
) -> AdminAuditPayload {
|
||||
AdminAuditPayload::Command {
|
||||
command: params.command.clone(),
|
||||
command_cwd: params.cwd.clone(),
|
||||
cli_cwd: cli_cwd.to_path_buf(),
|
||||
sandbox_type,
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
escalated: params.with_escalated_permissions.unwrap_or(false),
|
||||
justification: params.justification.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn danger_payload_serializes_expected_fields() {
|
||||
let pending = DangerPending {
|
||||
source: DangerRequestSource::Approvals,
|
||||
requested_sandbox: SandboxPolicy::DangerFullAccess,
|
||||
requested_approval: AskForApproval::Never,
|
||||
};
|
||||
|
||||
let payload = build_danger_audit_payload(
|
||||
&pending,
|
||||
DangerAuditAction::Requested,
|
||||
Some("reason".to_string()),
|
||||
);
|
||||
let record = AdminAuditRecord::new(payload);
|
||||
let value = serde_json::to_value(record).expect("serialize record");
|
||||
|
||||
assert_eq!(
|
||||
value.get("audit_kind"),
|
||||
Some(&Value::String("danger".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("action"),
|
||||
Some(&Value::String("requested".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("requested_by"),
|
||||
Some(&Value::String("approvals".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("approval_policy"),
|
||||
Some(&Value::String("never".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("sandbox_policy").and_then(|sp| sp.get("mode")),
|
||||
Some(&Value::String("danger-full-access".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("justification"),
|
||||
Some(&Value::String("reason".to_string()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_payload_serializes_expected_fields() {
|
||||
let mut env = HashMap::new();
|
||||
env.insert("PATH".to_string(), "/usr/bin".to_string());
|
||||
let params = ExecParams {
|
||||
command: vec!["echo".to_string(), "hello".to_string()],
|
||||
cwd: PathBuf::from("/tmp"),
|
||||
timeout_ms: Some(1000),
|
||||
env,
|
||||
with_escalated_permissions: Some(true),
|
||||
justification: Some("investigation".to_string()),
|
||||
};
|
||||
|
||||
let sandbox_policy = SandboxPolicy::new_workspace_write_policy();
|
||||
let payload = build_command_audit_payload(
|
||||
¶ms,
|
||||
SandboxType::MacosSeatbelt,
|
||||
&sandbox_policy,
|
||||
Path::new("/workspace"),
|
||||
);
|
||||
let record = AdminAuditRecord::new(payload);
|
||||
let value = serde_json::to_value(record).expect("serialize record");
|
||||
|
||||
assert_eq!(
|
||||
value.get("audit_kind"),
|
||||
Some(&Value::String("command".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("command"),
|
||||
Some(&serde_json::json!(["echo", "hello"]))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("command_cwd"),
|
||||
Some(&Value::String("/tmp".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("cli_cwd"),
|
||||
Some(&Value::String("/workspace".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("sandbox_type"),
|
||||
Some(&Value::String("macos-seatbelt".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
value.get("sandbox_policy").and_then(|sp| sp.get("mode")),
|
||||
Some(&Value::String("workspace-write".to_string()))
|
||||
);
|
||||
assert_eq!(value.get("escalated"), Some(&Value::Bool(true)));
|
||||
assert_eq!(
|
||||
value.get("justification"),
|
||||
Some(&Value::String("investigation".to_string()))
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -73,7 +73,7 @@ impl CodexAuth {
|
||||
|
||||
/// Loads the available auth information from the auth.json.
|
||||
pub fn from_codex_home(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
|
||||
load_auth(codex_home)
|
||||
load_auth(codex_home, false)
|
||||
}
|
||||
|
||||
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
|
||||
@@ -188,6 +188,7 @@ impl CodexAuth {
|
||||
}
|
||||
|
||||
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
|
||||
pub const CODEX_API_KEY_ENV_VAR: &str = "CODEX_API_KEY";
|
||||
|
||||
pub fn read_openai_api_key_from_env() -> Option<String> {
|
||||
env::var(OPENAI_API_KEY_ENV_VAR)
|
||||
@@ -196,6 +197,13 @@ pub fn read_openai_api_key_from_env() -> Option<String> {
|
||||
.filter(|value| !value.is_empty())
|
||||
}
|
||||
|
||||
pub fn read_codex_api_key_from_env() -> Option<String> {
|
||||
env::var(CODEX_API_KEY_ENV_VAR)
|
||||
.ok()
|
||||
.map(|value| value.trim().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
}
|
||||
|
||||
pub fn get_auth_file(codex_home: &Path) -> PathBuf {
|
||||
codex_home.join("auth.json")
|
||||
}
|
||||
@@ -221,7 +229,18 @@ pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<(
|
||||
write_auth_json(&get_auth_file(codex_home), &auth_dot_json)
|
||||
}
|
||||
|
||||
fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
|
||||
fn load_auth(
|
||||
codex_home: &Path,
|
||||
enable_codex_api_key_env: bool,
|
||||
) -> std::io::Result<Option<CodexAuth>> {
|
||||
if enable_codex_api_key_env && let Some(api_key) = read_codex_api_key_from_env() {
|
||||
let client = crate::default_client::create_client();
|
||||
return Ok(Some(CodexAuth::from_api_key_with_client(
|
||||
api_key.as_str(),
|
||||
client,
|
||||
)));
|
||||
}
|
||||
|
||||
let auth_file = get_auth_file(codex_home);
|
||||
let client = crate::default_client::create_client();
|
||||
let auth_dot_json = match try_read_auth_json(&auth_file) {
|
||||
@@ -455,7 +474,7 @@ mod tests {
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path()).unwrap().unwrap();
|
||||
} = super::load_auth(codex_home.path(), false).unwrap().unwrap();
|
||||
assert_eq!(None, api_key);
|
||||
assert_eq!(AuthMode::ChatGPT, mode);
|
||||
|
||||
@@ -494,7 +513,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let auth = super::load_auth(dir.path()).unwrap().unwrap();
|
||||
let auth = super::load_auth(dir.path(), false).unwrap().unwrap();
|
||||
assert_eq!(auth.mode, AuthMode::ApiKey);
|
||||
assert_eq!(auth.api_key, Some("sk-test-key".to_string()));
|
||||
|
||||
@@ -577,6 +596,7 @@ mod tests {
|
||||
pub struct AuthManager {
|
||||
codex_home: PathBuf,
|
||||
inner: RwLock<CachedAuth>,
|
||||
enable_codex_api_key_env: bool,
|
||||
}
|
||||
|
||||
impl AuthManager {
|
||||
@@ -584,11 +604,14 @@ impl AuthManager {
|
||||
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
|
||||
/// simply return `None` in that case so callers can treat it as an
|
||||
/// unauthenticated state.
|
||||
pub fn new(codex_home: PathBuf) -> Self {
|
||||
let auth = CodexAuth::from_codex_home(&codex_home).ok().flatten();
|
||||
pub fn new(codex_home: PathBuf, enable_codex_api_key_env: bool) -> Self {
|
||||
let auth = load_auth(&codex_home, enable_codex_api_key_env)
|
||||
.ok()
|
||||
.flatten();
|
||||
Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(CachedAuth { auth }),
|
||||
enable_codex_api_key_env,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -598,6 +621,7 @@ impl AuthManager {
|
||||
Arc::new(Self {
|
||||
codex_home: PathBuf::new(),
|
||||
inner: RwLock::new(cached),
|
||||
enable_codex_api_key_env: false,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -609,7 +633,9 @@ impl AuthManager {
|
||||
/// Force a reload of the auth information from auth.json. Returns
|
||||
/// whether the auth value changed.
|
||||
pub fn reload(&self) -> bool {
|
||||
let new_auth = CodexAuth::from_codex_home(&self.codex_home).ok().flatten();
|
||||
let new_auth = load_auth(&self.codex_home, self.enable_codex_api_key_env)
|
||||
.ok()
|
||||
.flatten();
|
||||
if let Ok(mut guard) = self.inner.write() {
|
||||
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
|
||||
guard.auth = new_auth;
|
||||
@@ -628,8 +654,8 @@ impl AuthManager {
|
||||
}
|
||||
|
||||
/// Convenience constructor returning an `Arc` wrapper.
|
||||
pub fn shared(codex_home: PathBuf) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home))
|
||||
pub fn shared(codex_home: PathBuf, enable_codex_api_key_env: bool) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home, enable_codex_api_key_env))
|
||||
}
|
||||
|
||||
/// Attempt to refresh the current auth token (if any). On success, reload
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::error::Result;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::TokenUsage;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
@@ -29,7 +29,7 @@ pub struct Prompt {
|
||||
|
||||
/// Tools available to the model, including additional tools sourced from
|
||||
/// external MCP servers.
|
||||
pub(crate) tools: Vec<OpenAiTool>,
|
||||
pub(crate) tools: Vec<ToolSpec>,
|
||||
|
||||
/// Optional override for the built-in BASE_INSTRUCTIONS.
|
||||
pub base_instructions_override: Option<String>,
|
||||
@@ -49,8 +49,8 @@ impl Prompt {
|
||||
// AND
|
||||
// - there is no apply_patch tool present
|
||||
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
|
||||
OpenAiTool::Function(f) => f.name == "apply_patch",
|
||||
OpenAiTool::Freeform(f) => f.name == "apply_patch",
|
||||
ToolSpec::Function(f) => f.name == "apply_patch",
|
||||
ToolSpec::Freeform(f) => f.name == "apply_patch",
|
||||
_ => false,
|
||||
});
|
||||
if self.base_instructions_override.is_none()
|
||||
@@ -160,6 +160,54 @@ pub(crate) struct ResponsesApiRequest<'a> {
|
||||
pub(crate) text: Option<TextControls>,
|
||||
}
|
||||
|
||||
pub(crate) mod tools {
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
/// When serialized as JSON, this produces a valid "Tool" in the OpenAI
|
||||
/// Responses API.
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
#[serde(tag = "type")]
|
||||
pub(crate) enum ToolSpec {
|
||||
#[serde(rename = "function")]
|
||||
Function(ResponsesApiTool),
|
||||
#[serde(rename = "local_shell")]
|
||||
LocalShell {},
|
||||
// TODO: Understand why we get an error on web_search although the API docs say it's supported.
|
||||
// https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#:~:text=%7B%20type%3A%20%22web_search%22%20%7D%2C
|
||||
#[serde(rename = "web_search")]
|
||||
WebSearch {},
|
||||
#[serde(rename = "custom")]
|
||||
Freeform(FreeformTool),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformTool {
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
pub(crate) format: FreeformToolFormat,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct FreeformToolFormat {
|
||||
pub(crate) r#type: String,
|
||||
pub(crate) syntax: String,
|
||||
pub(crate) definition: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
pub struct ResponsesApiTool {
|
||||
pub(crate) name: String,
|
||||
pub(crate) description: String,
|
||||
/// TODO: Validation. When strict is set to true, the JSON schema,
|
||||
/// `required` and `additional_properties` must be present. All fields in
|
||||
/// `properties` must be present in `required`.
|
||||
pub(crate) strict: bool,
|
||||
pub(crate) parameters: JsonSchema,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create_reasoning_param_for_request(
|
||||
model_family: &ModelFamily,
|
||||
effort: Option<ReasoningEffortConfig>,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,17 @@
|
||||
use crate::admin_controls::AdminControls;
|
||||
use crate::admin_controls::DangerAuditAction;
|
||||
use crate::admin_controls::DangerDecision;
|
||||
use crate::admin_controls::DangerPending;
|
||||
use crate::admin_controls::DangerRequestSource;
|
||||
use crate::admin_controls::PendingAdminAction;
|
||||
use crate::admin_controls::build_danger_audit_payload;
|
||||
use crate::admin_controls::log_admin_event;
|
||||
use crate::config_loader::LoadedConfigLayers;
|
||||
pub use crate::config_loader::load_config_as_toml;
|
||||
use crate::config_loader::load_config_layers_with_overrides;
|
||||
use crate::config_loader::merge_toml_values;
|
||||
use crate::config_profile::ConfigProfile;
|
||||
use crate::config_types::AdminConfigToml;
|
||||
use crate::config_types::DEFAULT_OTEL_ENVIRONMENT;
|
||||
use crate::config_types::History;
|
||||
use crate::config_types::McpServerConfig;
|
||||
@@ -209,53 +222,44 @@ pub struct Config {
|
||||
|
||||
/// OTEL configuration (exporter type, endpoint, headers, etc.).
|
||||
pub otel: crate::config_types::OtelConfig,
|
||||
|
||||
/// Administrator-controlled options and audit configuration.
|
||||
pub admin: AdminControls,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Load configuration with *generic* CLI overrides (`-c key=value`) applied
|
||||
/// **in between** the values parsed from `config.toml` and the
|
||||
/// strongly-typed overrides specified via [`ConfigOverrides`].
|
||||
///
|
||||
/// The precedence order is therefore: `config.toml` < `-c` overrides <
|
||||
/// `ConfigOverrides`.
|
||||
pub fn load_with_cli_overrides(
|
||||
pub async fn load_with_cli_overrides(
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
overrides: ConfigOverrides,
|
||||
) -> std::io::Result<Self> {
|
||||
// Resolve the directory that stores Codex state (e.g. ~/.codex or the
|
||||
// value of $CODEX_HOME) so we can embed it into the resulting
|
||||
// `Config` instance.
|
||||
let codex_home = find_codex_home()?;
|
||||
|
||||
// Step 1: parse `config.toml` into a generic JSON value.
|
||||
let mut root_value = load_config_as_toml(&codex_home)?;
|
||||
let root_value = load_resolved_config(
|
||||
&codex_home,
|
||||
cli_overrides,
|
||||
crate::config_loader::LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Step 2: apply the `-c` overrides.
|
||||
for (path, value) in cli_overrides.into_iter() {
|
||||
apply_toml_override(&mut root_value, &path, value);
|
||||
}
|
||||
|
||||
// Step 3: deserialize into `ConfigToml` so that Serde can enforce the
|
||||
// correct types.
|
||||
let cfg: ConfigToml = root_value.try_into().map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
std::io::Error::new(std::io::ErrorKind::InvalidData, e)
|
||||
})?;
|
||||
|
||||
// Step 4: merge with the strongly-typed overrides.
|
||||
Self::load_from_base_config_with_overrides(cfg, overrides, codex_home)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_config_as_toml_with_cli_overrides(
|
||||
pub async fn load_config_as_toml_with_cli_overrides(
|
||||
codex_home: &Path,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
) -> std::io::Result<ConfigToml> {
|
||||
let mut root_value = load_config_as_toml(codex_home)?;
|
||||
|
||||
for (path, value) in cli_overrides.into_iter() {
|
||||
apply_toml_override(&mut root_value, &path, value);
|
||||
}
|
||||
let root_value = load_resolved_config(
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
crate::config_loader::LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cfg: ConfigToml = root_value.try_into().map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
@@ -265,33 +269,40 @@ pub fn load_config_as_toml_with_cli_overrides(
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
/// Read `CODEX_HOME/config.toml` and return it as a generic TOML value. Returns
|
||||
/// an empty TOML table when the file does not exist.
|
||||
pub fn load_config_as_toml(codex_home: &Path) -> std::io::Result<TomlValue> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
match std::fs::read_to_string(&config_path) {
|
||||
Ok(contents) => match toml::from_str::<TomlValue>(&contents) {
|
||||
Ok(val) => Ok(val),
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to parse config.toml: {e}");
|
||||
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e))
|
||||
}
|
||||
},
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
tracing::info!("config.toml not found, using defaults");
|
||||
Ok(TomlValue::Table(Default::default()))
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to read config.toml: {e}");
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
async fn load_resolved_config(
|
||||
codex_home: &Path,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
overrides: crate::config_loader::LoaderOverrides,
|
||||
) -> std::io::Result<TomlValue> {
|
||||
let layers = load_config_layers_with_overrides(codex_home, overrides).await?;
|
||||
Ok(apply_overlays(layers, cli_overrides))
|
||||
}
|
||||
|
||||
pub fn load_global_mcp_servers(
|
||||
fn apply_overlays(
|
||||
layers: LoadedConfigLayers,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
) -> TomlValue {
|
||||
let LoadedConfigLayers {
|
||||
mut base,
|
||||
managed_config,
|
||||
managed_preferences,
|
||||
} = layers;
|
||||
|
||||
for (path, value) in cli_overrides.into_iter() {
|
||||
apply_toml_override(&mut base, &path, value);
|
||||
}
|
||||
|
||||
for overlay in [managed_config, managed_preferences].into_iter().flatten() {
|
||||
merge_toml_values(&mut base, &overlay);
|
||||
}
|
||||
|
||||
base
|
||||
}
|
||||
|
||||
pub async fn load_global_mcp_servers(
|
||||
codex_home: &Path,
|
||||
) -> std::io::Result<BTreeMap<String, McpServerConfig>> {
|
||||
let root_value = load_config_as_toml(codex_home)?;
|
||||
let root_value = load_config_as_toml(codex_home).await?;
|
||||
let Some(servers_value) = root_value.get("mcp_servers") else {
|
||||
return Ok(BTreeMap::new());
|
||||
};
|
||||
@@ -735,6 +746,10 @@ pub struct ConfigToml {
|
||||
|
||||
/// OTEL configuration.
|
||||
pub otel: Option<crate::config_types::OtelConfigToml>,
|
||||
|
||||
/// Administrator-level controls applied to all users on this host.
|
||||
#[serde(default)]
|
||||
pub admin: Option<AdminConfigToml>,
|
||||
}
|
||||
|
||||
impl From<ConfigToml> for UserSavedConfig {
|
||||
@@ -923,7 +938,68 @@ impl Config {
|
||||
None => ConfigProfile::default(),
|
||||
};
|
||||
|
||||
let sandbox_policy = cfg.derive_sandbox_policy(sandbox_mode);
|
||||
let resolved_approval_policy = approval_policy
|
||||
.or(config_profile.approval_policy)
|
||||
.or(cfg.approval_policy)
|
||||
.unwrap_or_else(AskForApproval::default);
|
||||
|
||||
let mut admin = AdminControls::from_toml(cfg.admin.clone())?;
|
||||
|
||||
let mut sandbox_policy = cfg.derive_sandbox_policy(sandbox_mode);
|
||||
|
||||
if matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) {
|
||||
match admin.decision_for_danger() {
|
||||
DangerDecision::Allowed => {
|
||||
if let Some(audit) = admin.audit.as_ref() {
|
||||
let pending = DangerPending {
|
||||
source: DangerRequestSource::Startup,
|
||||
requested_sandbox: SandboxPolicy::DangerFullAccess,
|
||||
requested_approval: resolved_approval_policy,
|
||||
};
|
||||
log_admin_event(
|
||||
audit,
|
||||
build_danger_audit_payload(&pending, DangerAuditAction::Approved, None),
|
||||
);
|
||||
}
|
||||
}
|
||||
DangerDecision::RequiresJustification => {
|
||||
let pending = DangerPending {
|
||||
source: DangerRequestSource::Startup,
|
||||
requested_sandbox: SandboxPolicy::DangerFullAccess,
|
||||
requested_approval: resolved_approval_policy,
|
||||
};
|
||||
if let Some(audit) = admin.audit.as_ref() {
|
||||
log_admin_event(
|
||||
audit,
|
||||
build_danger_audit_payload(
|
||||
&pending,
|
||||
DangerAuditAction::Requested,
|
||||
None,
|
||||
),
|
||||
);
|
||||
}
|
||||
admin.pending.push(PendingAdminAction::Danger(pending));
|
||||
sandbox_policy = SandboxPolicy::new_workspace_write_policy();
|
||||
}
|
||||
DangerDecision::Denied => {
|
||||
if let Some(audit) = admin.audit.as_ref() {
|
||||
let pending = DangerPending {
|
||||
source: DangerRequestSource::Startup,
|
||||
requested_sandbox: SandboxPolicy::DangerFullAccess,
|
||||
requested_approval: resolved_approval_policy,
|
||||
};
|
||||
log_admin_event(
|
||||
audit,
|
||||
build_danger_audit_payload(&pending, DangerAuditAction::Denied, None),
|
||||
);
|
||||
}
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::PermissionDenied,
|
||||
"danger-full-access is disabled by administrator policy",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut model_providers = built_in_model_providers();
|
||||
// Merge user-defined providers into the built-in list.
|
||||
@@ -1032,10 +1108,7 @@ impl Config {
|
||||
model_provider_id,
|
||||
model_provider,
|
||||
cwd: resolved_cwd,
|
||||
approval_policy: approval_policy
|
||||
.or(config_profile.approval_policy)
|
||||
.or(cfg.approval_policy)
|
||||
.unwrap_or_else(AskForApproval::default),
|
||||
approval_policy: resolved_approval_policy,
|
||||
sandbox_policy,
|
||||
shell_environment_policy,
|
||||
notify: cfg.notify,
|
||||
@@ -1079,7 +1152,7 @@ impl Config {
|
||||
.chatgpt_base_url
|
||||
.or(cfg.chatgpt_base_url)
|
||||
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
|
||||
include_plan_tool: include_plan_tool.unwrap_or(true),
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
|
||||
tools_web_search_request,
|
||||
use_experimental_streamable_shell_tool: cfg
|
||||
@@ -1110,6 +1183,7 @@ impl Config {
|
||||
exporter,
|
||||
}
|
||||
},
|
||||
admin,
|
||||
};
|
||||
Ok(config)
|
||||
}
|
||||
@@ -1219,6 +1293,7 @@ pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::admin_controls::AdminControls;
|
||||
use crate::config_types::HistoryPersistence;
|
||||
use crate::config_types::Notifications;
|
||||
|
||||
@@ -1329,18 +1404,18 @@ exclude_slash_tmp = true
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_global_mcp_servers_returns_empty_if_missing() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn load_global_mcp_servers_returns_empty_if_missing() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(servers.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_global_mcp_servers_round_trips_entries() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn write_global_mcp_servers_round_trips_entries() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut servers = BTreeMap::new();
|
||||
@@ -1359,7 +1434,7 @@ exclude_slash_tmp = true
|
||||
|
||||
write_global_mcp_servers(codex_home.path(), &servers)?;
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert_eq!(loaded.len(), 1);
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
@@ -1375,14 +1450,47 @@ exclude_slash_tmp = true
|
||||
|
||||
let empty = BTreeMap::new();
|
||||
write_global_mcp_servers(codex_home.path(), &empty)?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path()).await?;
|
||||
assert!(loaded.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_global_mcp_servers_accepts_legacy_ms_field() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn managed_config_wins_over_cli_overrides() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let managed_path = codex_home.path().join("managed_config.toml");
|
||||
|
||||
std::fs::write(
|
||||
codex_home.path().join(CONFIG_TOML_FILE),
|
||||
"model = \"base\"\n",
|
||||
)?;
|
||||
std::fs::write(&managed_path, "model = \"managed_config\"\n")?;
|
||||
|
||||
let overrides = crate::config_loader::LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
};
|
||||
|
||||
let root_value = load_resolved_config(
|
||||
codex_home.path(),
|
||||
vec![("model".to_string(), TomlValue::String("cli".to_string()))],
|
||||
overrides,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cfg: ConfigToml = root_value.try_into().map_err(|e| {
|
||||
tracing::error!("Failed to deserialize overridden config: {e}");
|
||||
std::io::Error::new(std::io::ErrorKind::InvalidData, e)
|
||||
})?;
|
||||
|
||||
assert_eq!(cfg.model.as_deref(), Some("managed_config"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn load_global_mcp_servers_accepts_legacy_ms_field() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
|
||||
@@ -1396,15 +1504,15 @@ startup_timeout_ms = 2500
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let servers = load_global_mcp_servers(codex_home.path())?;
|
||||
let servers = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let docs = servers.get("docs").expect("docs entry");
|
||||
assert_eq!(docs.startup_timeout_sec, Some(Duration::from_millis(2500)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_global_mcp_servers_serializes_env_sorted() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn write_global_mcp_servers_serializes_env_sorted() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let servers = BTreeMap::from([(
|
||||
@@ -1439,7 +1547,7 @@ ZIG_VAR = "3"
|
||||
"#
|
||||
);
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::Stdio { command, args, env } => {
|
||||
@@ -1457,8 +1565,8 @@ ZIG_VAR = "3"
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_global_mcp_servers_serializes_streamable_http() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn write_global_mcp_servers_serializes_streamable_http() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
let mut servers = BTreeMap::from([(
|
||||
@@ -1486,7 +1594,7 @@ startup_timeout_sec = 2.0
|
||||
"#
|
||||
);
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
@@ -1518,7 +1626,7 @@ url = "https://example.com/mcp"
|
||||
"#
|
||||
);
|
||||
|
||||
let loaded = load_global_mcp_servers(codex_home.path())?;
|
||||
let loaded = load_global_mcp_servers(codex_home.path()).await?;
|
||||
let docs = loaded.get("docs").expect("docs entry");
|
||||
match &docs.transport {
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
@@ -1842,7 +1950,7 @@ model_verbosity = "high"
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
include_plan_tool: true,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
@@ -1853,6 +1961,7 @@ model_verbosity = "high"
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: OtelConfig::default(),
|
||||
admin: AdminControls::default(),
|
||||
},
|
||||
o3_profile_config
|
||||
);
|
||||
@@ -1903,7 +2012,7 @@ model_verbosity = "high"
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
include_plan_tool: true,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
@@ -1914,6 +2023,7 @@ model_verbosity = "high"
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: OtelConfig::default(),
|
||||
admin: AdminControls::default(),
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
|
||||
@@ -1979,7 +2089,7 @@ model_verbosity = "high"
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
include_plan_tool: true,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
@@ -1990,6 +2100,7 @@ model_verbosity = "high"
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: OtelConfig::default(),
|
||||
admin: AdminControls::default(),
|
||||
};
|
||||
|
||||
assert_eq!(expected_zdr_profile_config, zdr_profile_config);
|
||||
@@ -2041,7 +2152,7 @@ model_verbosity = "high"
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
include_plan_tool: true,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
@@ -2052,6 +2163,7 @@ model_verbosity = "high"
|
||||
disable_paste_burst: false,
|
||||
tui_notifications: Default::default(),
|
||||
otel: OtelConfig::default(),
|
||||
admin: AdminControls::default(),
|
||||
};
|
||||
|
||||
assert_eq!(expected_gpt5_profile_config, gpt5_profile_config);
|
||||
|
||||
118
codex-rs/core/src/config_loader/macos.rs
Normal file
118
codex-rs/core/src/config_loader/macos.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::io;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
mod native {
|
||||
use super::*;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
use core_foundation::string::CFString;
|
||||
use core_foundation::string::CFStringRef;
|
||||
use std::ffi::c_void;
|
||||
use tokio::task;
|
||||
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
return if trimmed.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
};
|
||||
}
|
||||
|
||||
const LOAD_ERROR: &str = "Failed to load managed preferences configuration";
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_config).await {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed preferences load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed preferences load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other(LOAD_ERROR))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn load_managed_admin_config() -> io::Result<Option<TomlValue>> {
|
||||
#[link(name = "CoreFoundation", kind = "framework")]
|
||||
unsafe extern "C" {
|
||||
fn CFPreferencesCopyAppValue(
|
||||
key: CFStringRef,
|
||||
application_id: CFStringRef,
|
||||
) -> *mut c_void;
|
||||
}
|
||||
|
||||
const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex";
|
||||
const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
|
||||
let application_id = CFString::new(MANAGED_PREFERENCES_APPLICATION_ID);
|
||||
let key = CFString::new(MANAGED_PREFERENCES_CONFIG_KEY);
|
||||
|
||||
let value_ref = unsafe {
|
||||
CFPreferencesCopyAppValue(
|
||||
key.as_concrete_TypeRef(),
|
||||
application_id.as_concrete_TypeRef(),
|
||||
)
|
||||
};
|
||||
|
||||
if value_ref.is_null() {
|
||||
tracing::debug!(
|
||||
"Managed preferences for {} key {} not found",
|
||||
MANAGED_PREFERENCES_APPLICATION_ID,
|
||||
MANAGED_PREFERENCES_CONFIG_KEY
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) };
|
||||
let contents = value.to_string();
|
||||
let trimmed = contents.trim();
|
||||
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
}
|
||||
|
||||
pub(super) fn parse_managed_preferences_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
let decoded = BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed preferences as base64: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
let decoded_str = String::from_utf8(decoded).map_err(|err| {
|
||||
tracing::error!("Managed preferences base64 contents were not valid UTF-8: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
match toml::from_str::<TomlValue>(&decoded_str) {
|
||||
Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)),
|
||||
Ok(other) => {
|
||||
tracing::error!(
|
||||
"Managed preferences TOML must have a table at the root, found {other:?}",
|
||||
);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"managed preferences root must be a table",
|
||||
))
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse managed preferences TOML: {err}");
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
pub(crate) use native::load_managed_admin_config_layer;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
_override_base64: Option<&str>,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
Ok(None)
|
||||
}
|
||||
311
codex-rs/core/src/config_loader/mod.rs
Normal file
311
codex-rs/core/src/config_loader/mod.rs
Normal file
@@ -0,0 +1,311 @@
|
||||
mod macos;
|
||||
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use macos::load_managed_admin_config_layer;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[cfg(unix)]
|
||||
const CODEX_MANAGED_CONFIG_SYSTEM_PATH: &str = "/etc/codex/managed_config.toml";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct LoadedConfigLayers {
|
||||
pub base: TomlValue,
|
||||
pub managed_config: Option<TomlValue>,
|
||||
pub managed_preferences: Option<TomlValue>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct LoaderOverrides {
|
||||
pub managed_config_path: Option<PathBuf>,
|
||||
#[cfg(target_os = "macos")]
|
||||
pub managed_preferences_base64: Option<String>,
|
||||
}
|
||||
|
||||
// Configuration layering pipeline (top overrides bottom):
|
||||
//
|
||||
// +-------------------------+
|
||||
// | Managed preferences (*) |
|
||||
// +-------------------------+
|
||||
// ^
|
||||
// |
|
||||
// +-------------------------+
|
||||
// | managed_config.toml |
|
||||
// +-------------------------+
|
||||
// ^
|
||||
// |
|
||||
// +-------------------------+
|
||||
// | config.toml (base) |
|
||||
// +-------------------------+
|
||||
//
|
||||
// (*) Only available on macOS via managed device profiles.
|
||||
|
||||
pub async fn load_config_as_toml(codex_home: &Path) -> io::Result<TomlValue> {
|
||||
load_config_as_toml_with_overrides(codex_home, LoaderOverrides::default()).await
|
||||
}
|
||||
|
||||
fn default_empty_table() -> TomlValue {
|
||||
TomlValue::Table(Default::default())
|
||||
}
|
||||
|
||||
pub(crate) async fn load_config_layers_with_overrides(
|
||||
codex_home: &Path,
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<LoadedConfigLayers> {
|
||||
load_config_layers_internal(codex_home, overrides).await
|
||||
}
|
||||
|
||||
async fn load_config_as_toml_with_overrides(
|
||||
codex_home: &Path,
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<TomlValue> {
|
||||
let layers = load_config_layers_internal(codex_home, overrides).await?;
|
||||
Ok(apply_managed_layers(layers))
|
||||
}
|
||||
|
||||
async fn load_config_layers_internal(
|
||||
codex_home: &Path,
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<LoadedConfigLayers> {
|
||||
#[cfg(target_os = "macos")]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
managed_preferences_base64,
|
||||
} = overrides;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
} = overrides;
|
||||
|
||||
let managed_config_path =
|
||||
managed_config_path.unwrap_or_else(|| managed_config_default_path(codex_home));
|
||||
|
||||
let user_config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
let user_config = read_config_from_path(&user_config_path, true).await?;
|
||||
let managed_config = read_config_from_path(&managed_config_path, false).await?;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
let managed_preferences =
|
||||
load_managed_admin_config_layer(managed_preferences_base64.as_deref()).await?;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let managed_preferences = load_managed_admin_config_layer(None).await?;
|
||||
|
||||
Ok(LoadedConfigLayers {
|
||||
base: user_config.unwrap_or_else(default_empty_table),
|
||||
managed_config,
|
||||
managed_preferences,
|
||||
})
|
||||
}
|
||||
|
||||
async fn read_config_from_path(
|
||||
path: &Path,
|
||||
log_missing_as_info: bool,
|
||||
) -> io::Result<Option<TomlValue>> {
|
||||
match fs::read_to_string(path).await {
|
||||
Ok(contents) => match toml::from_str::<TomlValue>(&contents) {
|
||||
Ok(value) => Ok(Some(value)),
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse {}: {err}", path.display());
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
},
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
||||
if log_missing_as_info {
|
||||
tracing::info!("{} not found, using defaults", path.display());
|
||||
} else {
|
||||
tracing::debug!("{} not found", path.display());
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to read {}: {err}", path.display());
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge config `overlay` into `base`, giving `overlay` precedence.
|
||||
pub(crate) fn merge_toml_values(base: &mut TomlValue, overlay: &TomlValue) {
|
||||
if let TomlValue::Table(overlay_table) = overlay
|
||||
&& let TomlValue::Table(base_table) = base
|
||||
{
|
||||
for (key, value) in overlay_table {
|
||||
if let Some(existing) = base_table.get_mut(key) {
|
||||
merge_toml_values(existing, value);
|
||||
} else {
|
||||
base_table.insert(key.clone(), value.clone());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
*base = overlay.clone();
|
||||
}
|
||||
}
|
||||
|
||||
fn managed_config_default_path(codex_home: &Path) -> PathBuf {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = codex_home;
|
||||
PathBuf::from(CODEX_MANAGED_CONFIG_SYSTEM_PATH)
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
codex_home.join("managed_config.toml")
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_managed_layers(layers: LoadedConfigLayers) -> TomlValue {
|
||||
let LoadedConfigLayers {
|
||||
mut base,
|
||||
managed_config,
|
||||
managed_preferences,
|
||||
} = layers;
|
||||
|
||||
for overlay in [managed_config, managed_preferences].into_iter().flatten() {
|
||||
merge_toml_values(&mut base, &overlay);
|
||||
}
|
||||
|
||||
base
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn merges_managed_config_layer_on_top() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
std::fs::write(
|
||||
tmp.path().join(CONFIG_TOML_FILE),
|
||||
r#"foo = 1
|
||||
|
||||
[nested]
|
||||
value = "base"
|
||||
"#,
|
||||
)
|
||||
.expect("write base");
|
||||
std::fs::write(
|
||||
&managed_path,
|
||||
r#"foo = 2
|
||||
|
||||
[nested]
|
||||
value = "managed_config"
|
||||
extra = true
|
||||
"#,
|
||||
)
|
||||
.expect("write managed config");
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
};
|
||||
|
||||
let loaded = load_config_as_toml_with_overrides(tmp.path(), overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let table = loaded.as_table().expect("top-level table expected");
|
||||
|
||||
assert_eq!(table.get("foo"), Some(&TomlValue::Integer(2)));
|
||||
let nested = table
|
||||
.get("nested")
|
||||
.and_then(|v| v.as_table())
|
||||
.expect("nested");
|
||||
assert_eq!(
|
||||
nested.get("value"),
|
||||
Some(&TomlValue::String("managed_config".to_string()))
|
||||
);
|
||||
assert_eq!(nested.get("extra"), Some(&TomlValue::Boolean(true)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn returns_empty_when_all_layers_missing() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
};
|
||||
|
||||
let layers = load_config_layers_with_overrides(tmp.path(), overrides)
|
||||
.await
|
||||
.expect("load layers");
|
||||
let base_table = layers.base.as_table().expect("base table expected");
|
||||
assert!(
|
||||
base_table.is_empty(),
|
||||
"expected empty base layer when configs missing"
|
||||
);
|
||||
assert!(
|
||||
layers.managed_config.is_none(),
|
||||
"managed config layer should be absent when file missing"
|
||||
);
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
{
|
||||
let loaded = load_config_as_toml(tmp.path()).await.expect("load config");
|
||||
let table = loaded.as_table().expect("top-level table expected");
|
||||
assert!(
|
||||
table.is_empty(),
|
||||
"expected empty table when configs missing"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_take_highest_precedence() {
|
||||
use base64::Engine;
|
||||
|
||||
let managed_payload = r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#;
|
||||
let encoded = base64::prelude::BASE64_STANDARD.encode(managed_payload.as_bytes());
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
std::fs::write(
|
||||
tmp.path().join(CONFIG_TOML_FILE),
|
||||
r#"[nested]
|
||||
value = "base"
|
||||
"#,
|
||||
)
|
||||
.expect("write base");
|
||||
std::fs::write(
|
||||
&managed_path,
|
||||
r#"[nested]
|
||||
value = "managed_config"
|
||||
flag = true
|
||||
"#,
|
||||
)
|
||||
.expect("write managed config");
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(encoded),
|
||||
};
|
||||
|
||||
let loaded = load_config_as_toml_with_overrides(tmp.path(), overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let nested = loaded
|
||||
.get("nested")
|
||||
.and_then(|v| v.as_table())
|
||||
.expect("nested table");
|
||||
assert_eq!(
|
||||
nested.get("value"),
|
||||
Some(&TomlValue::String("managed".to_string()))
|
||||
);
|
||||
assert_eq!(nested.get("flag"), Some(&TomlValue::Boolean(false)));
|
||||
}
|
||||
}
|
||||
@@ -563,3 +563,34 @@ mod tests {
|
||||
.expect_err("should reject bearer token for stdio transport");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct AdminConfigToml {
|
||||
#[serde(default)]
|
||||
pub disallow_danger_full_access: Option<bool>,
|
||||
|
||||
#[serde(default)]
|
||||
pub allow_danger_with_reason: Option<bool>,
|
||||
|
||||
#[serde(default)]
|
||||
pub audit: Option<AdminAuditToml>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct AdminAuditToml {
|
||||
#[serde(default)]
|
||||
pub log_file: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub log_endpoint: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub log_events: Vec<AdminAuditEventKind>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AdminAuditEventKind {
|
||||
Danger,
|
||||
Command,
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use codex_protocol::ConversationId;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
@@ -35,20 +36,25 @@ pub struct NewConversation {
|
||||
pub struct ConversationManager {
|
||||
conversations: Arc<RwLock<HashMap<ConversationId, Arc<CodexConversation>>>>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
session_source: SessionSource,
|
||||
}
|
||||
|
||||
impl ConversationManager {
|
||||
pub fn new(auth_manager: Arc<AuthManager>) -> Self {
|
||||
pub fn new(auth_manager: Arc<AuthManager>, session_source: SessionSource) -> Self {
|
||||
Self {
|
||||
conversations: Arc::new(RwLock::new(HashMap::new())),
|
||||
auth_manager,
|
||||
session_source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct with a dummy AuthManager containing the provided CodexAuth.
|
||||
/// Used for integration tests: should not be used by ordinary business logic.
|
||||
pub fn with_auth(auth: CodexAuth) -> Self {
|
||||
Self::new(crate::AuthManager::from_auth_for_testing(auth))
|
||||
Self::new(
|
||||
crate::AuthManager::from_auth_for_testing(auth),
|
||||
SessionSource::Exec,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> {
|
||||
@@ -64,7 +70,13 @@ impl ConversationManager {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, InitialHistory::New).await?;
|
||||
} = Codex::spawn(
|
||||
config,
|
||||
auth_manager,
|
||||
InitialHistory::New,
|
||||
self.session_source,
|
||||
)
|
||||
.await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
|
||||
@@ -121,7 +133,7 @@ impl ConversationManager {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, initial_history).await?;
|
||||
} = Codex::spawn(config, auth_manager, initial_history, self.session_source).await?;
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
|
||||
@@ -155,7 +167,7 @@ impl ConversationManager {
|
||||
let CodexSpawnOk {
|
||||
codex,
|
||||
conversation_id,
|
||||
} = Codex::spawn(config, auth_manager, history).await?;
|
||||
} = Codex::spawn(config, auth_manager, history, self.session_source).await?;
|
||||
|
||||
self.finalize_spawn(codex, conversation_id).await
|
||||
}
|
||||
|
||||
@@ -108,6 +108,9 @@ pub enum CodexErr {
|
||||
#[error("unsupported operation: {0}")]
|
||||
UnsupportedOperation(String),
|
||||
|
||||
#[error("Fatal error: {0}")]
|
||||
Fatal(String),
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
// Automatic conversions for common external error types
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
@@ -27,6 +27,7 @@ use crate::protocol::SandboxPolicy;
|
||||
use crate::seatbelt::spawn_command_under_seatbelt;
|
||||
use crate::spawn::StdioPolicy;
|
||||
use crate::spawn::spawn_child_async;
|
||||
use serde::Serialize;
|
||||
|
||||
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
|
||||
|
||||
@@ -61,7 +62,8 @@ impl ExecParams {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum SandboxType {
|
||||
None,
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::client_common::tools::ResponsesApiTool;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
|
||||
pub const EXEC_COMMAND_TOOL_NAME: &str = "exec_command";
|
||||
pub const WRITE_STDIN_TOOL_NAME: &str = "write_stdin";
|
||||
@@ -49,7 +49,7 @@ pub fn create_exec_command_tool_for_responses_api() -> ResponsesApiTool {
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["cmd".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ Can write control characters (\u0003 for Ctrl-C), or an empty string to just pol
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["session_id".to_string(), "chars".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,11 +10,11 @@ pub(crate) use runner::ExecutorConfig;
|
||||
pub(crate) use runner::normalize_exec_result;
|
||||
|
||||
pub(crate) mod linkers {
|
||||
use crate::codex::ExecCommandContext;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec::StdoutStream;
|
||||
use crate::executor::backends::ExecutionMode;
|
||||
use crate::executor::runner::ExecutionRequest;
|
||||
use crate::tools::context::ExecCommandContext;
|
||||
|
||||
pub struct PreparedExec {
|
||||
pub(crate) context: ExecCommandContext,
|
||||
|
||||
@@ -6,8 +6,11 @@ use std::time::Duration;
|
||||
use super::backends::ExecutionMode;
|
||||
use super::backends::backend_for_mode;
|
||||
use super::cache::ApprovalCache;
|
||||
use crate::codex::ExecCommandContext;
|
||||
use crate::admin_controls::AdminAuditConfig;
|
||||
use crate::admin_controls::build_command_audit_payload;
|
||||
use crate::admin_controls::log_admin_event;
|
||||
use crate::codex::Session;
|
||||
use crate::config_types::AdminAuditEventKind;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::SandboxErr;
|
||||
use crate::error::get_error_message_ui;
|
||||
@@ -24,6 +27,7 @@ use crate::protocol::AskForApproval;
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell;
|
||||
use crate::tools::context::ExecCommandContext;
|
||||
use codex_otel::otel_event_manager::ToolDecisionSource;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -31,6 +35,7 @@ pub(crate) struct ExecutorConfig {
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
pub(crate) sandbox_cwd: PathBuf,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub(crate) admin_audit: Option<AdminAuditConfig>,
|
||||
}
|
||||
|
||||
impl ExecutorConfig {
|
||||
@@ -38,11 +43,13 @@ impl ExecutorConfig {
|
||||
sandbox_policy: SandboxPolicy,
|
||||
sandbox_cwd: PathBuf,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
admin_audit: Option<AdminAuditConfig>,
|
||||
) -> Self {
|
||||
Self {
|
||||
sandbox_policy,
|
||||
sandbox_cwd,
|
||||
codex_linux_sandbox_exe,
|
||||
admin_audit,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -148,10 +155,8 @@ impl Executor {
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
Err(ExecError::rejection(format!(
|
||||
"failed in sandbox {:?} with execution error: {error:?}",
|
||||
sandbox_decision.initial_sandbox
|
||||
)))
|
||||
let message = sandbox_failure_message(error);
|
||||
Err(ExecError::rejection(message))
|
||||
}
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
@@ -224,6 +229,17 @@ impl Executor {
|
||||
config: &ExecutorConfig,
|
||||
stdout_stream: Option<StdoutStream>,
|
||||
) -> Result<ExecToolCallOutput, CodexErr> {
|
||||
if let Some(admin_audit) = config.admin_audit.as_ref()
|
||||
&& admin_audit.should_log(AdminAuditEventKind::Command)
|
||||
{
|
||||
let payload = build_command_audit_payload(
|
||||
¶ms,
|
||||
sandbox,
|
||||
&config.sandbox_policy,
|
||||
&config.sandbox_cwd,
|
||||
);
|
||||
log_admin_event(admin_audit, payload);
|
||||
}
|
||||
process_exec_tool_call(
|
||||
params,
|
||||
sandbox,
|
||||
@@ -255,6 +271,12 @@ fn maybe_translate_shell_command(
|
||||
params
|
||||
}
|
||||
|
||||
fn sandbox_failure_message(error: SandboxErr) -> String {
|
||||
let codex_error = CodexErr::Sandbox(error);
|
||||
let friendly = get_error_message_ui(&codex_error);
|
||||
format!("failed in sandbox: {friendly}")
|
||||
}
|
||||
|
||||
pub(crate) struct ExecutionRequest {
|
||||
pub params: ExecParams,
|
||||
pub approval_command: Vec<String>,
|
||||
@@ -299,6 +321,7 @@ pub(crate) fn normalize_exec_result(
|
||||
let message = match err {
|
||||
ExecError::Function(FunctionCallError::RespondToModel(msg)) => msg.clone(),
|
||||
ExecError::Codex(e) => get_error_message_ui(e),
|
||||
err => err.to_string(),
|
||||
};
|
||||
let synthetic = ExecToolCallOutput {
|
||||
exit_code: -1,
|
||||
@@ -358,6 +381,23 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sandbox_failure_message_uses_denied_stderr() {
|
||||
let output = ExecToolCallOutput {
|
||||
exit_code: 101,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new("sandbox stderr".to_string()),
|
||||
aggregated_output: StreamOutput::new(String::new()),
|
||||
duration: Duration::from_millis(10),
|
||||
timed_out: false,
|
||||
};
|
||||
let err = SandboxErr::Denied {
|
||||
output: Box::new(output),
|
||||
};
|
||||
let message = sandbox_failure_message(err);
|
||||
assert_eq!(message, "failed in sandbox: sandbox stderr");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_function_error_synthesizes_payload() {
|
||||
let err = FunctionCallError::RespondToModel("boom".to_string());
|
||||
|
||||
@@ -207,7 +207,7 @@ mod tests {
|
||||
action,
|
||||
user_explicitly_approved_this_action: true,
|
||||
};
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None);
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None, None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["apply_patch".into()],
|
||||
@@ -250,7 +250,12 @@ mod tests {
|
||||
action,
|
||||
user_explicitly_approved_this_action: false,
|
||||
};
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::DangerFullAccess, std::env::temp_dir(), None);
|
||||
let cfg = ExecutorConfig::new(
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
std::env::temp_dir(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["apply_patch".into()],
|
||||
@@ -294,7 +299,7 @@ mod tests {
|
||||
action,
|
||||
user_explicitly_approved_this_action: false,
|
||||
};
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None);
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None, None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["apply_patch".into()],
|
||||
@@ -333,7 +338,12 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn select_shell_autoapprove_in_danger_mode() {
|
||||
let (session, ctx) = make_session_and_context();
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::DangerFullAccess, std::env::temp_dir(), None);
|
||||
let cfg = ExecutorConfig::new(
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
std::env::temp_dir(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
command: vec!["some-unknown".into()],
|
||||
@@ -369,7 +379,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn select_shell_escalates_on_failure_with_platform_sandbox() {
|
||||
let (session, ctx) = make_session_and_context();
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None);
|
||||
let cfg = ExecutorConfig::new(SandboxPolicy::ReadOnly, std::env::temp_dir(), None, None);
|
||||
let request = ExecutionRequest {
|
||||
params: ExecParams {
|
||||
// Unknown command => untrusted but not flagged dangerous
|
||||
|
||||
@@ -4,4 +4,8 @@ use thiserror::Error;
|
||||
pub enum FunctionCallError {
|
||||
#[error("{0}")]
|
||||
RespondToModel(String),
|
||||
#[error("LocalShellCall without call_id or id")]
|
||||
MissingLocalShellCallId,
|
||||
#[error("Fatal error: {0}")]
|
||||
Fatal(String),
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
// the TUI or the tracing stack).
|
||||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||||
|
||||
pub mod admin_controls;
|
||||
mod apply_patch;
|
||||
pub mod auth;
|
||||
pub mod bash;
|
||||
@@ -18,6 +19,7 @@ pub use codex_conversation::CodexConversation;
|
||||
mod command_safety;
|
||||
pub mod config;
|
||||
pub mod config_edit;
|
||||
pub mod config_loader;
|
||||
pub mod config_profile;
|
||||
pub mod config_types;
|
||||
mod conversation_history;
|
||||
@@ -36,6 +38,7 @@ mod mcp_tool_call;
|
||||
mod message_history;
|
||||
mod model_provider_info;
|
||||
pub mod parse_command;
|
||||
mod path_utils;
|
||||
mod truncate;
|
||||
mod unified_exec;
|
||||
mod user_instructions;
|
||||
@@ -57,7 +60,6 @@ pub mod default_client;
|
||||
pub mod model_family;
|
||||
mod openai_model_info;
|
||||
mod openai_tools;
|
||||
pub mod plan_tool;
|
||||
pub mod project_doc;
|
||||
mod rollout;
|
||||
pub(crate) mod safety;
|
||||
@@ -65,9 +67,10 @@ pub mod seatbelt;
|
||||
pub mod shell;
|
||||
pub mod spawn;
|
||||
pub mod terminal;
|
||||
mod tool_apply_patch;
|
||||
mod tools;
|
||||
pub mod turn_diff_tracker;
|
||||
pub use rollout::ARCHIVED_SESSIONS_SUBDIR;
|
||||
pub use rollout::INTERACTIVE_SESSION_SOURCES;
|
||||
pub use rollout::RolloutRecorder;
|
||||
pub use rollout::SESSIONS_SUBDIR;
|
||||
pub use rollout::SessionMeta;
|
||||
|
||||
@@ -123,12 +123,15 @@ impl McpClientAdapter {
|
||||
}
|
||||
|
||||
async fn new_streamable_http_client(
|
||||
server_name: String,
|
||||
url: String,
|
||||
bearer_token: Option<String>,
|
||||
params: mcp_types::InitializeRequestParams,
|
||||
startup_timeout: Duration,
|
||||
) -> Result<Self> {
|
||||
let client = Arc::new(RmcpClient::new_streamable_http_client(url, bearer_token)?);
|
||||
let client = Arc::new(
|
||||
RmcpClient::new_streamable_http_client(&server_name, &url, bearer_token).await?,
|
||||
);
|
||||
client.initialize(params, Some(startup_timeout)).await?;
|
||||
Ok(McpClientAdapter::Rmcp(client))
|
||||
}
|
||||
@@ -208,8 +211,7 @@ impl McpConnectionManager {
|
||||
) && !use_rmcp_client
|
||||
{
|
||||
info!(
|
||||
"skipping MCP server `{}` configured with url because rmcp client is disabled",
|
||||
server_name
|
||||
"skipping MCP server `{server_name}` because the legacy MCP client only supports stdio servers",
|
||||
);
|
||||
continue;
|
||||
}
|
||||
@@ -217,7 +219,6 @@ impl McpConnectionManager {
|
||||
let startup_timeout = cfg.startup_timeout_sec.unwrap_or(DEFAULT_STARTUP_TIMEOUT);
|
||||
let tool_timeout = cfg.tool_timeout_sec.unwrap_or(DEFAULT_TOOL_TIMEOUT);
|
||||
|
||||
let use_rmcp_client_flag = use_rmcp_client;
|
||||
join_set.spawn(async move {
|
||||
let McpServerConfig { transport, .. } = cfg;
|
||||
let params = mcp_types::InitializeRequestParams {
|
||||
@@ -246,17 +247,18 @@ impl McpConnectionManager {
|
||||
let command_os: OsString = command.into();
|
||||
let args_os: Vec<OsString> = args.into_iter().map(Into::into).collect();
|
||||
McpClientAdapter::new_stdio_client(
|
||||
use_rmcp_client_flag,
|
||||
use_rmcp_client,
|
||||
command_os,
|
||||
args_os,
|
||||
env,
|
||||
params.clone(),
|
||||
params,
|
||||
startup_timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
McpServerTransportConfig::StreamableHttp { url, bearer_token } => {
|
||||
McpClientAdapter::new_streamable_http_client(
|
||||
server_name.clone(),
|
||||
url,
|
||||
bearer_token,
|
||||
params,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::config_types::ReasoningSummaryFormat;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
use crate::tools::handlers::apply_patch::ApplyPatchToolType;
|
||||
|
||||
/// The `instructions` field in the payload sent to a model should always start
|
||||
/// with this content.
|
||||
@@ -41,6 +41,9 @@ pub struct ModelFamily {
|
||||
|
||||
// Instructions to use for querying the model
|
||||
pub base_instructions: String,
|
||||
|
||||
/// Names of beta tools that should be exposed to this model family.
|
||||
pub experimental_supported_tools: Vec<String>,
|
||||
}
|
||||
|
||||
macro_rules! model_family {
|
||||
@@ -57,6 +60,7 @@ macro_rules! model_family {
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
};
|
||||
// apply overrides
|
||||
$(
|
||||
@@ -105,6 +109,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
experimental_supported_tools: vec!["read_file".to_string()],
|
||||
)
|
||||
} else if slug.starts_with("gpt-5") {
|
||||
model_family!(
|
||||
@@ -127,5 +132,6 @@ pub fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
uses_local_shell_tool: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
19
codex-rs/core/src/path_utils.rs
Normal file
19
codex-rs/core/src/path_utils.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub(crate) fn expand_tilde(raw: &str) -> io::Result<PathBuf> {
|
||||
if raw.starts_with('~') {
|
||||
// `shellexpand::tilde` falls back to returning the input when the home directory
|
||||
// cannot be resolved; mirror the previous error semantics in that case.
|
||||
let expanded = shellexpand::tilde(raw);
|
||||
if expanded.starts_with('~') {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::NotFound,
|
||||
"could not resolve home directory while expanding path",
|
||||
));
|
||||
}
|
||||
return Ok(PathBuf::from(expanded.as_ref()));
|
||||
}
|
||||
|
||||
Ok(PathBuf::from(raw))
|
||||
}
|
||||
@@ -17,6 +17,7 @@ use super::SESSIONS_SUBDIR;
|
||||
use crate::protocol::EventMsg;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::RolloutLine;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
|
||||
/// Returned page of conversation summaries.
|
||||
#[derive(Debug, Default, PartialEq)]
|
||||
@@ -52,6 +53,7 @@ struct HeadTailSummary {
|
||||
tail: Vec<serde_json::Value>,
|
||||
saw_session_meta: bool,
|
||||
saw_user_event: bool,
|
||||
source: Option<SessionSource>,
|
||||
created_at: Option<String>,
|
||||
updated_at: Option<String>,
|
||||
}
|
||||
@@ -106,6 +108,7 @@ pub(crate) async fn get_conversations(
|
||||
codex_home: &Path,
|
||||
page_size: usize,
|
||||
cursor: Option<&Cursor>,
|
||||
allowed_sources: &[SessionSource],
|
||||
) -> io::Result<ConversationsPage> {
|
||||
let mut root = codex_home.to_path_buf();
|
||||
root.push(SESSIONS_SUBDIR);
|
||||
@@ -121,7 +124,8 @@ pub(crate) async fn get_conversations(
|
||||
|
||||
let anchor = cursor.cloned();
|
||||
|
||||
let result = traverse_directories_for_paths(root.clone(), page_size, anchor).await?;
|
||||
let result =
|
||||
traverse_directories_for_paths(root.clone(), page_size, anchor, allowed_sources).await?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
@@ -140,6 +144,7 @@ async fn traverse_directories_for_paths(
|
||||
root: PathBuf,
|
||||
page_size: usize,
|
||||
anchor: Option<Cursor>,
|
||||
allowed_sources: &[SessionSource],
|
||||
) -> io::Result<ConversationsPage> {
|
||||
let mut items: Vec<ConversationItem> = Vec::with_capacity(page_size);
|
||||
let mut scanned_files = 0usize;
|
||||
@@ -196,6 +201,13 @@ async fn traverse_directories_for_paths(
|
||||
let summary = read_head_and_tail(&path, HEAD_RECORD_LIMIT, TAIL_RECORD_LIMIT)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
if !allowed_sources.is_empty()
|
||||
&& !summary
|
||||
.source
|
||||
.is_some_and(|source| allowed_sources.iter().any(|s| s == &source))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
// Apply filters: must have session meta and at least one user message event
|
||||
if summary.saw_session_meta && summary.saw_user_event {
|
||||
let HeadTailSummary {
|
||||
@@ -341,6 +353,7 @@ async fn read_head_and_tail(
|
||||
|
||||
match rollout_line.item {
|
||||
RolloutItem::SessionMeta(session_meta_line) => {
|
||||
summary.source = Some(session_meta_line.meta.source);
|
||||
summary.created_at = summary
|
||||
.created_at
|
||||
.clone()
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
//! Rollout module: persistence and discovery of session rollout files.
|
||||
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
|
||||
pub const SESSIONS_SUBDIR: &str = "sessions";
|
||||
pub const ARCHIVED_SESSIONS_SUBDIR: &str = "archived_sessions";
|
||||
pub const INTERACTIVE_SESSION_SOURCES: &[SessionSource] =
|
||||
&[SessionSource::Cli, SessionSource::VSCode];
|
||||
|
||||
pub mod list;
|
||||
pub(crate) mod policy;
|
||||
|
||||
@@ -70,6 +70,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool {
|
||||
| EventMsg::ListCustomPromptsResponse(_)
|
||||
| EventMsg::PlanUpdate(_)
|
||||
| EventMsg::ShutdownComplete
|
||||
| EventMsg::ViewImageToolCall(_)
|
||||
| EventMsg::ConversationPath(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::RolloutLine;
|
||||
use codex_protocol::protocol::SessionMeta;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
|
||||
/// Records all [`ResponseItem`]s for a session and flushes them to disk after
|
||||
/// every update.
|
||||
@@ -53,6 +54,7 @@ pub enum RolloutRecorderParams {
|
||||
Create {
|
||||
conversation_id: ConversationId,
|
||||
instructions: Option<String>,
|
||||
source: SessionSource,
|
||||
},
|
||||
Resume {
|
||||
path: PathBuf,
|
||||
@@ -71,10 +73,15 @@ enum RolloutCmd {
|
||||
}
|
||||
|
||||
impl RolloutRecorderParams {
|
||||
pub fn new(conversation_id: ConversationId, instructions: Option<String>) -> Self {
|
||||
pub fn new(
|
||||
conversation_id: ConversationId,
|
||||
instructions: Option<String>,
|
||||
source: SessionSource,
|
||||
) -> Self {
|
||||
Self::Create {
|
||||
conversation_id,
|
||||
instructions,
|
||||
source,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,8 +96,9 @@ impl RolloutRecorder {
|
||||
codex_home: &Path,
|
||||
page_size: usize,
|
||||
cursor: Option<&Cursor>,
|
||||
allowed_sources: &[SessionSource],
|
||||
) -> std::io::Result<ConversationsPage> {
|
||||
get_conversations(codex_home, page_size, cursor).await
|
||||
get_conversations(codex_home, page_size, cursor, allowed_sources).await
|
||||
}
|
||||
|
||||
/// Attempt to create a new [`RolloutRecorder`]. If the sessions directory
|
||||
@@ -101,6 +109,7 @@ impl RolloutRecorder {
|
||||
RolloutRecorderParams::Create {
|
||||
conversation_id,
|
||||
instructions,
|
||||
source,
|
||||
} => {
|
||||
let LogFileInfo {
|
||||
file,
|
||||
@@ -127,6 +136,7 @@ impl RolloutRecorder {
|
||||
originator: originator().value.clone(),
|
||||
cli_version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
instructions,
|
||||
source,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ use time::format_description::FormatItem;
|
||||
use time::macros::format_description;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::rollout::INTERACTIVE_SESSION_SOURCES;
|
||||
use crate::rollout::list::ConversationItem;
|
||||
use crate::rollout::list::ConversationsPage;
|
||||
use crate::rollout::list::Cursor;
|
||||
@@ -28,13 +29,17 @@ use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::RolloutLine;
|
||||
use codex_protocol::protocol::SessionMeta;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::UserMessageEvent;
|
||||
|
||||
const NO_SOURCE_FILTER: &[SessionSource] = &[];
|
||||
|
||||
fn write_session_file(
|
||||
root: &Path,
|
||||
ts_str: &str,
|
||||
uuid: Uuid,
|
||||
num_records: usize,
|
||||
source: Option<SessionSource>,
|
||||
) -> std::io::Result<(OffsetDateTime, Uuid)> {
|
||||
let format: &[FormatItem] =
|
||||
format_description!("[year]-[month]-[day]T[hour]-[minute]-[second]");
|
||||
@@ -52,17 +57,23 @@ fn write_session_file(
|
||||
let file_path = dir.join(filename);
|
||||
let mut file = File::create(file_path)?;
|
||||
|
||||
let mut payload = serde_json::json!({
|
||||
"id": uuid,
|
||||
"timestamp": ts_str,
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version",
|
||||
});
|
||||
|
||||
if let Some(source) = source {
|
||||
payload["source"] = serde_json::to_value(source).unwrap();
|
||||
}
|
||||
|
||||
let meta = serde_json::json!({
|
||||
"timestamp": ts_str,
|
||||
"type": "session_meta",
|
||||
"payload": {
|
||||
"id": uuid,
|
||||
"timestamp": ts_str,
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
}
|
||||
"payload": payload,
|
||||
});
|
||||
writeln!(file, "{meta}")?;
|
||||
|
||||
@@ -99,11 +110,34 @@ async fn test_list_conversations_latest_first() {
|
||||
let u3 = Uuid::from_u128(3);
|
||||
|
||||
// Create three sessions across three days
|
||||
write_session_file(home, "2025-01-01T12-00-00", u1, 3).unwrap();
|
||||
write_session_file(home, "2025-01-02T12-00-00", u2, 3).unwrap();
|
||||
write_session_file(home, "2025-01-03T12-00-00", u3, 3).unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-01-01T12-00-00",
|
||||
u1,
|
||||
3,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-01-02T12-00-00",
|
||||
u2,
|
||||
3,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-01-03T12-00-00",
|
||||
u3,
|
||||
3,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let page = get_conversations(home, 10, None).await.unwrap();
|
||||
let page = get_conversations(home, 10, None, INTERACTIVE_SESSION_SOURCES)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Build expected objects
|
||||
let p1 = home
|
||||
@@ -131,7 +165,8 @@ async fn test_list_conversations_latest_first() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let head_2 = vec![serde_json::json!({
|
||||
"id": u2,
|
||||
@@ -139,7 +174,8 @@ async fn test_list_conversations_latest_first() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let head_1 = vec![serde_json::json!({
|
||||
"id": u1,
|
||||
@@ -147,7 +183,8 @@ async fn test_list_conversations_latest_first() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
|
||||
let expected_cursor: Cursor =
|
||||
@@ -198,13 +235,50 @@ async fn test_pagination_cursor() {
|
||||
let u5 = Uuid::from_u128(55);
|
||||
|
||||
// Oldest to newest
|
||||
write_session_file(home, "2025-03-01T09-00-00", u1, 1).unwrap();
|
||||
write_session_file(home, "2025-03-02T09-00-00", u2, 1).unwrap();
|
||||
write_session_file(home, "2025-03-03T09-00-00", u3, 1).unwrap();
|
||||
write_session_file(home, "2025-03-04T09-00-00", u4, 1).unwrap();
|
||||
write_session_file(home, "2025-03-05T09-00-00", u5, 1).unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-03-01T09-00-00",
|
||||
u1,
|
||||
1,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-03-02T09-00-00",
|
||||
u2,
|
||||
1,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-03-03T09-00-00",
|
||||
u3,
|
||||
1,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-03-04T09-00-00",
|
||||
u4,
|
||||
1,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-03-05T09-00-00",
|
||||
u5,
|
||||
1,
|
||||
Some(SessionSource::VSCode),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let page1 = get_conversations(home, 2, None).await.unwrap();
|
||||
let page1 = get_conversations(home, 2, None, INTERACTIVE_SESSION_SOURCES)
|
||||
.await
|
||||
.unwrap();
|
||||
let p5 = home
|
||||
.join("sessions")
|
||||
.join("2025")
|
||||
@@ -223,7 +297,8 @@ async fn test_pagination_cursor() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let head_4 = vec![serde_json::json!({
|
||||
"id": u4,
|
||||
@@ -231,7 +306,8 @@ async fn test_pagination_cursor() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let expected_cursor1: Cursor =
|
||||
serde_json::from_str(&format!("\"2025-03-04T09-00-00|{u4}\"")).unwrap();
|
||||
@@ -258,9 +334,14 @@ async fn test_pagination_cursor() {
|
||||
};
|
||||
assert_eq!(page1, expected_page1);
|
||||
|
||||
let page2 = get_conversations(home, 2, page1.next_cursor.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
let page2 = get_conversations(
|
||||
home,
|
||||
2,
|
||||
page1.next_cursor.as_ref(),
|
||||
INTERACTIVE_SESSION_SOURCES,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let p3 = home
|
||||
.join("sessions")
|
||||
.join("2025")
|
||||
@@ -279,7 +360,8 @@ async fn test_pagination_cursor() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let head_2 = vec![serde_json::json!({
|
||||
"id": u2,
|
||||
@@ -287,7 +369,8 @@ async fn test_pagination_cursor() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let expected_cursor2: Cursor =
|
||||
serde_json::from_str(&format!("\"2025-03-02T09-00-00|{u2}\"")).unwrap();
|
||||
@@ -314,9 +397,14 @@ async fn test_pagination_cursor() {
|
||||
};
|
||||
assert_eq!(page2, expected_page2);
|
||||
|
||||
let page3 = get_conversations(home, 2, page2.next_cursor.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
let page3 = get_conversations(
|
||||
home,
|
||||
2,
|
||||
page2.next_cursor.as_ref(),
|
||||
INTERACTIVE_SESSION_SOURCES,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let p1 = home
|
||||
.join("sessions")
|
||||
.join("2025")
|
||||
@@ -329,7 +417,8 @@ async fn test_pagination_cursor() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let expected_cursor3: Cursor =
|
||||
serde_json::from_str(&format!("\"2025-03-01T09-00-00|{u1}\"")).unwrap();
|
||||
@@ -355,9 +444,11 @@ async fn test_get_conversation_contents() {
|
||||
|
||||
let uuid = Uuid::new_v4();
|
||||
let ts = "2025-04-01T10-30-00";
|
||||
write_session_file(home, ts, uuid, 2).unwrap();
|
||||
write_session_file(home, ts, uuid, 2, Some(SessionSource::VSCode)).unwrap();
|
||||
|
||||
let page = get_conversations(home, 1, None).await.unwrap();
|
||||
let page = get_conversations(home, 1, None, INTERACTIVE_SESSION_SOURCES)
|
||||
.await
|
||||
.unwrap();
|
||||
let path = &page.items[0].path;
|
||||
|
||||
let content = get_conversation(path).await.unwrap();
|
||||
@@ -375,7 +466,8 @@ async fn test_get_conversation_contents() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})];
|
||||
let expected_cursor: Cursor = serde_json::from_str(&format!("\"{ts}|{uuid}\"")).unwrap();
|
||||
let expected_page = ConversationsPage {
|
||||
@@ -393,7 +485,19 @@ async fn test_get_conversation_contents() {
|
||||
assert_eq!(page, expected_page);
|
||||
|
||||
// Entire file contents equality
|
||||
let meta = serde_json::json!({"timestamp": ts, "type": "session_meta", "payload": {"id": uuid, "timestamp": ts, "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version"}});
|
||||
let meta = serde_json::json!({
|
||||
"timestamp": ts,
|
||||
"type": "session_meta",
|
||||
"payload": {
|
||||
"id": uuid,
|
||||
"timestamp": ts,
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
}
|
||||
});
|
||||
let user_event = serde_json::json!({
|
||||
"timestamp": ts,
|
||||
"type": "event_msg",
|
||||
@@ -428,6 +532,7 @@ async fn test_tail_includes_last_response_items() -> Result<()> {
|
||||
cwd: ".".into(),
|
||||
originator: "test_originator".into(),
|
||||
cli_version: "test_version".into(),
|
||||
source: SessionSource::VSCode,
|
||||
},
|
||||
git: None,
|
||||
}),
|
||||
@@ -460,7 +565,7 @@ async fn test_tail_includes_last_response_items() -> Result<()> {
|
||||
}
|
||||
drop(file);
|
||||
|
||||
let page = get_conversations(home, 1, None).await?;
|
||||
let page = get_conversations(home, 1, None, INTERACTIVE_SESSION_SOURCES).await?;
|
||||
let item = page.items.first().expect("conversation item");
|
||||
let tail_len = item.tail.len();
|
||||
assert_eq!(tail_len, 10usize.min(total_messages));
|
||||
@@ -511,6 +616,7 @@ async fn test_tail_handles_short_sessions() -> Result<()> {
|
||||
cwd: ".".into(),
|
||||
originator: "test_originator".into(),
|
||||
cli_version: "test_version".into(),
|
||||
source: SessionSource::VSCode,
|
||||
},
|
||||
git: None,
|
||||
}),
|
||||
@@ -542,7 +648,7 @@ async fn test_tail_handles_short_sessions() -> Result<()> {
|
||||
}
|
||||
drop(file);
|
||||
|
||||
let page = get_conversations(home, 1, None).await?;
|
||||
let page = get_conversations(home, 1, None, INTERACTIVE_SESSION_SOURCES).await?;
|
||||
let tail = &page.items.first().expect("conversation item").tail;
|
||||
|
||||
assert_eq!(tail.len(), 3);
|
||||
@@ -595,6 +701,7 @@ async fn test_tail_skips_trailing_non_responses() -> Result<()> {
|
||||
cwd: ".".into(),
|
||||
originator: "test_originator".into(),
|
||||
cli_version: "test_version".into(),
|
||||
source: SessionSource::VSCode,
|
||||
},
|
||||
git: None,
|
||||
}),
|
||||
@@ -640,7 +747,7 @@ async fn test_tail_skips_trailing_non_responses() -> Result<()> {
|
||||
writeln!(file, "{}", serde_json::to_string(&shutdown_event)?)?;
|
||||
drop(file);
|
||||
|
||||
let page = get_conversations(home, 1, None).await?;
|
||||
let page = get_conversations(home, 1, None, INTERACTIVE_SESSION_SOURCES).await?;
|
||||
let tail = &page.items.first().expect("conversation item").tail;
|
||||
|
||||
let expected: Vec<serde_json::Value> = (0..4)
|
||||
@@ -678,11 +785,13 @@ async fn test_stable_ordering_same_second_pagination() {
|
||||
let u2 = Uuid::from_u128(2);
|
||||
let u3 = Uuid::from_u128(3);
|
||||
|
||||
write_session_file(home, ts, u1, 0).unwrap();
|
||||
write_session_file(home, ts, u2, 0).unwrap();
|
||||
write_session_file(home, ts, u3, 0).unwrap();
|
||||
write_session_file(home, ts, u1, 0, Some(SessionSource::VSCode)).unwrap();
|
||||
write_session_file(home, ts, u2, 0, Some(SessionSource::VSCode)).unwrap();
|
||||
write_session_file(home, ts, u3, 0, Some(SessionSource::VSCode)).unwrap();
|
||||
|
||||
let page1 = get_conversations(home, 2, None).await.unwrap();
|
||||
let page1 = get_conversations(home, 2, None, INTERACTIVE_SESSION_SOURCES)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let p3 = home
|
||||
.join("sessions")
|
||||
@@ -703,7 +812,8 @@ async fn test_stable_ordering_same_second_pagination() {
|
||||
"instructions": null,
|
||||
"cwd": ".",
|
||||
"originator": "test_originator",
|
||||
"cli_version": "test_version"
|
||||
"cli_version": "test_version",
|
||||
"source": "vscode",
|
||||
})]
|
||||
};
|
||||
let expected_cursor1: Cursor = serde_json::from_str(&format!("\"{ts}|{u2}\"")).unwrap();
|
||||
@@ -730,9 +840,14 @@ async fn test_stable_ordering_same_second_pagination() {
|
||||
};
|
||||
assert_eq!(page1, expected_page1);
|
||||
|
||||
let page2 = get_conversations(home, 2, page1.next_cursor.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
let page2 = get_conversations(
|
||||
home,
|
||||
2,
|
||||
page1.next_cursor.as_ref(),
|
||||
INTERACTIVE_SESSION_SOURCES,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let p1 = home
|
||||
.join("sessions")
|
||||
.join("2025")
|
||||
@@ -754,3 +869,59 @@ async fn test_stable_ordering_same_second_pagination() {
|
||||
};
|
||||
assert_eq!(page2, expected_page2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_source_filter_excludes_non_matching_sessions() {
|
||||
let temp = TempDir::new().unwrap();
|
||||
let home = temp.path();
|
||||
|
||||
let interactive_id = Uuid::from_u128(42);
|
||||
let non_interactive_id = Uuid::from_u128(77);
|
||||
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-08-02T10-00-00",
|
||||
interactive_id,
|
||||
2,
|
||||
Some(SessionSource::Cli),
|
||||
)
|
||||
.unwrap();
|
||||
write_session_file(
|
||||
home,
|
||||
"2025-08-01T10-00-00",
|
||||
non_interactive_id,
|
||||
2,
|
||||
Some(SessionSource::Exec),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let interactive_only = get_conversations(home, 10, None, INTERACTIVE_SESSION_SOURCES)
|
||||
.await
|
||||
.unwrap();
|
||||
let paths: Vec<_> = interactive_only
|
||||
.items
|
||||
.iter()
|
||||
.map(|item| item.path.as_path())
|
||||
.collect();
|
||||
|
||||
assert_eq!(paths.len(), 1);
|
||||
assert!(paths.iter().all(|path| {
|
||||
path.ends_with("rollout-2025-08-02T10-00-00-00000000-0000-0000-0000-00000000002a.jsonl")
|
||||
}));
|
||||
|
||||
let all_sessions = get_conversations(home, 10, None, NO_SOURCE_FILTER)
|
||||
.await
|
||||
.unwrap();
|
||||
let all_paths: Vec<_> = all_sessions
|
||||
.items
|
||||
.into_iter()
|
||||
.map(|item| item.path)
|
||||
.collect();
|
||||
assert_eq!(all_paths.len(), 2);
|
||||
assert!(all_paths.iter().any(|path| {
|
||||
path.ends_with("rollout-2025-08-02T10-00-00-00000000-0000-0000-0000-00000000002a.jsonl")
|
||||
}));
|
||||
assert!(all_paths.iter().any(|path| {
|
||||
path.ends_with("rollout-2025-08-01T10-00-00-00000000-0000-0000-0000-00000000004d.jsonl")
|
||||
}));
|
||||
}
|
||||
|
||||
244
codex-rs/core/src/tools/context.rs
Normal file
244
codex-rs/core/src/tools/context.rs
Normal file
@@ -0,0 +1,244 @@
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::tools::TELEMETRY_PREVIEW_MAX_BYTES;
|
||||
use crate::tools::TELEMETRY_PREVIEW_MAX_LINES;
|
||||
use crate::tools::TELEMETRY_PREVIEW_TRUNCATION_NOTICE;
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ShellToolCallParams;
|
||||
use codex_protocol::protocol::FileChange;
|
||||
use codex_utils_string::take_bytes_at_char_boundary;
|
||||
use mcp_types::CallToolResult;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub struct ToolInvocation<'a> {
|
||||
pub session: &'a Session,
|
||||
pub turn: &'a TurnContext,
|
||||
pub tracker: &'a mut TurnDiffTracker,
|
||||
pub sub_id: &'a str,
|
||||
pub call_id: String,
|
||||
pub tool_name: String,
|
||||
pub payload: ToolPayload,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum ToolPayload {
|
||||
Function {
|
||||
arguments: String,
|
||||
},
|
||||
Custom {
|
||||
input: String,
|
||||
},
|
||||
LocalShell {
|
||||
params: ShellToolCallParams,
|
||||
},
|
||||
UnifiedExec {
|
||||
arguments: String,
|
||||
},
|
||||
Mcp {
|
||||
server: String,
|
||||
tool: String,
|
||||
raw_arguments: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl ToolPayload {
|
||||
pub fn log_payload(&self) -> Cow<'_, str> {
|
||||
match self {
|
||||
ToolPayload::Function { arguments } => Cow::Borrowed(arguments),
|
||||
ToolPayload::Custom { input } => Cow::Borrowed(input),
|
||||
ToolPayload::LocalShell { params } => Cow::Owned(params.command.join(" ")),
|
||||
ToolPayload::UnifiedExec { arguments } => Cow::Borrowed(arguments),
|
||||
ToolPayload::Mcp { raw_arguments, .. } => Cow::Borrowed(raw_arguments),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum ToolOutput {
|
||||
Function {
|
||||
content: String,
|
||||
success: Option<bool>,
|
||||
},
|
||||
Mcp {
|
||||
result: Result<CallToolResult, String>,
|
||||
},
|
||||
}
|
||||
|
||||
impl ToolOutput {
|
||||
pub fn log_preview(&self) -> String {
|
||||
match self {
|
||||
ToolOutput::Function { content, .. } => telemetry_preview(content),
|
||||
ToolOutput::Mcp { result } => format!("{result:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn success_for_logging(&self) -> bool {
|
||||
match self {
|
||||
ToolOutput::Function { success, .. } => success.unwrap_or(true),
|
||||
ToolOutput::Mcp { result } => result.is_ok(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_response(self, call_id: &str, payload: &ToolPayload) -> ResponseInputItem {
|
||||
match self {
|
||||
ToolOutput::Function { content, success } => {
|
||||
if matches!(payload, ToolPayload::Custom { .. }) {
|
||||
ResponseInputItem::CustomToolCallOutput {
|
||||
call_id: call_id.to_string(),
|
||||
output: content,
|
||||
}
|
||||
} else {
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id: call_id.to_string(),
|
||||
output: FunctionCallOutputPayload { content, success },
|
||||
}
|
||||
}
|
||||
}
|
||||
ToolOutput::Mcp { result } => ResponseInputItem::McpToolCallOutput {
|
||||
call_id: call_id.to_string(),
|
||||
result,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn telemetry_preview(content: &str) -> String {
|
||||
let truncated_slice = take_bytes_at_char_boundary(content, TELEMETRY_PREVIEW_MAX_BYTES);
|
||||
let truncated_by_bytes = truncated_slice.len() < content.len();
|
||||
|
||||
let mut preview = String::new();
|
||||
let mut lines_iter = truncated_slice.lines();
|
||||
for idx in 0..TELEMETRY_PREVIEW_MAX_LINES {
|
||||
match lines_iter.next() {
|
||||
Some(line) => {
|
||||
if idx > 0 {
|
||||
preview.push('\n');
|
||||
}
|
||||
preview.push_str(line);
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
let truncated_by_lines = lines_iter.next().is_some();
|
||||
|
||||
if !truncated_by_bytes && !truncated_by_lines {
|
||||
return content.to_string();
|
||||
}
|
||||
|
||||
if preview.len() < truncated_slice.len()
|
||||
&& truncated_slice
|
||||
.as_bytes()
|
||||
.get(preview.len())
|
||||
.is_some_and(|byte| *byte == b'\n')
|
||||
{
|
||||
preview.push('\n');
|
||||
}
|
||||
|
||||
if !preview.is_empty() && !preview.ends_with('\n') {
|
||||
preview.push('\n');
|
||||
}
|
||||
preview.push_str(TELEMETRY_PREVIEW_TRUNCATION_NOTICE);
|
||||
|
||||
preview
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn custom_tool_calls_should_roundtrip_as_custom_outputs() {
|
||||
let payload = ToolPayload::Custom {
|
||||
input: "patch".to_string(),
|
||||
};
|
||||
let response = ToolOutput::Function {
|
||||
content: "patched".to_string(),
|
||||
success: Some(true),
|
||||
}
|
||||
.into_response("call-42", &payload);
|
||||
|
||||
match response {
|
||||
ResponseInputItem::CustomToolCallOutput { call_id, output } => {
|
||||
assert_eq!(call_id, "call-42");
|
||||
assert_eq!(output, "patched");
|
||||
}
|
||||
other => panic!("expected CustomToolCallOutput, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn function_payloads_remain_function_outputs() {
|
||||
let payload = ToolPayload::Function {
|
||||
arguments: "{}".to_string(),
|
||||
};
|
||||
let response = ToolOutput::Function {
|
||||
content: "ok".to_string(),
|
||||
success: Some(true),
|
||||
}
|
||||
.into_response("fn-1", &payload);
|
||||
|
||||
match response {
|
||||
ResponseInputItem::FunctionCallOutput { call_id, output } => {
|
||||
assert_eq!(call_id, "fn-1");
|
||||
assert_eq!(output.content, "ok");
|
||||
assert_eq!(output.success, Some(true));
|
||||
}
|
||||
other => panic!("expected FunctionCallOutput, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_preview_returns_original_within_limits() {
|
||||
let content = "short output";
|
||||
assert_eq!(telemetry_preview(content), content);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_preview_truncates_by_bytes() {
|
||||
let content = "x".repeat(TELEMETRY_PREVIEW_MAX_BYTES + 8);
|
||||
let preview = telemetry_preview(&content);
|
||||
|
||||
assert!(preview.contains(TELEMETRY_PREVIEW_TRUNCATION_NOTICE));
|
||||
assert!(
|
||||
preview.len()
|
||||
<= TELEMETRY_PREVIEW_MAX_BYTES + TELEMETRY_PREVIEW_TRUNCATION_NOTICE.len() + 1
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_preview_truncates_by_lines() {
|
||||
let content = (0..(TELEMETRY_PREVIEW_MAX_LINES + 5))
|
||||
.map(|idx| format!("line {idx}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
let preview = telemetry_preview(&content);
|
||||
let lines: Vec<&str> = preview.lines().collect();
|
||||
|
||||
assert!(lines.len() <= TELEMETRY_PREVIEW_MAX_LINES + 1);
|
||||
assert_eq!(lines.last(), Some(&TELEMETRY_PREVIEW_TRUNCATION_NOTICE));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct ExecCommandContext {
|
||||
pub(crate) sub_id: String,
|
||||
pub(crate) call_id: String,
|
||||
pub(crate) command_for_display: Vec<String>,
|
||||
pub(crate) cwd: PathBuf,
|
||||
pub(crate) apply_patch: Option<ApplyPatchCommandContext>,
|
||||
pub(crate) tool_name: String,
|
||||
pub(crate) otel_event_manager: OtelEventManager,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct ApplyPatchCommandContext {
|
||||
pub(crate) user_explicitly_approved_this_action: bool,
|
||||
pub(crate) changes: HashMap<PathBuf, FileChange>,
|
||||
}
|
||||
@@ -1,15 +1,99 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client_common::tools::FreeformTool;
|
||||
use crate::client_common::tools::FreeformToolFormat;
|
||||
use crate::client_common::tools::ResponsesApiTool;
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::handle_container_exec_with_params;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
use crate::tools::spec::ApplyPatchToolArgs;
|
||||
use async_trait::async_trait;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::openai_tools::FreeformTool;
|
||||
use crate::openai_tools::FreeformToolFormat;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
pub struct ApplyPatchHandler;
|
||||
|
||||
const APPLY_PATCH_LARK_GRAMMAR: &str = include_str!("tool_apply_patch.lark");
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for ApplyPatchHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::Function
|
||||
}
|
||||
|
||||
fn matches_kind(&self, payload: &ToolPayload) -> bool {
|
||||
matches!(
|
||||
payload,
|
||||
ToolPayload::Function { .. } | ToolPayload::Custom { .. }
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation {
|
||||
session,
|
||||
turn,
|
||||
tracker,
|
||||
sub_id,
|
||||
call_id,
|
||||
tool_name,
|
||||
payload,
|
||||
} = invocation;
|
||||
|
||||
let patch_input = match payload {
|
||||
ToolPayload::Function { arguments } => {
|
||||
let args: ApplyPatchToolArgs = serde_json::from_str(&arguments).map_err(|e| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"failed to parse function arguments: {e:?}"
|
||||
))
|
||||
})?;
|
||||
args.input
|
||||
}
|
||||
ToolPayload::Custom { input } => input,
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"apply_patch handler received unsupported payload".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let exec_params = ExecParams {
|
||||
command: vec!["apply_patch".to_string(), patch_input.clone()],
|
||||
cwd: turn.cwd.clone(),
|
||||
timeout_ms: None,
|
||||
env: HashMap::new(),
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
};
|
||||
|
||||
let content = handle_container_exec_with_params(
|
||||
tool_name.as_str(),
|
||||
exec_params,
|
||||
session,
|
||||
turn,
|
||||
tracker,
|
||||
sub_id.to_string(),
|
||||
call_id.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ApplyPatchToolType {
|
||||
@@ -19,8 +103,8 @@ pub enum ApplyPatchToolType {
|
||||
|
||||
/// Returns a custom tool that can be used to edit files. Well-suited for GPT-5 models
|
||||
/// https://platform.openai.com/docs/guides/function-calling#custom-tools
|
||||
pub(crate) fn create_apply_patch_freeform_tool() -> OpenAiTool {
|
||||
OpenAiTool::Freeform(FreeformTool {
|
||||
pub(crate) fn create_apply_patch_freeform_tool() -> ToolSpec {
|
||||
ToolSpec::Freeform(FreeformTool {
|
||||
name: "apply_patch".to_string(),
|
||||
description: "Use the `apply_patch` tool to edit files".to_string(),
|
||||
format: FreeformToolFormat {
|
||||
@@ -32,7 +116,7 @@ pub(crate) fn create_apply_patch_freeform_tool() -> OpenAiTool {
|
||||
}
|
||||
|
||||
/// Returns a json tool that can be used to edit files. Should only be used with gpt-oss models
|
||||
pub(crate) fn create_apply_patch_json_tool() -> OpenAiTool {
|
||||
pub(crate) fn create_apply_patch_json_tool() -> ToolSpec {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"input".to_string(),
|
||||
@@ -41,7 +125,7 @@ pub(crate) fn create_apply_patch_json_tool() -> OpenAiTool {
|
||||
},
|
||||
);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "apply_patch".to_string(),
|
||||
description: r#"Use the `apply_patch` tool to edit files.
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
@@ -111,12 +195,12 @@ It is important to remember:
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
- File references can only be relative, NEVER ABSOLUTE.
|
||||
"#
|
||||
.to_string(),
|
||||
.to_string(),
|
||||
strict: false,
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["input".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
}
|
||||
71
codex-rs/core/src/tools/handlers/exec_stream.rs
Normal file
71
codex-rs/core/src/tools/handlers/exec_stream.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::exec_command::EXEC_COMMAND_TOOL_NAME;
|
||||
use crate::exec_command::ExecCommandParams;
|
||||
use crate::exec_command::WRITE_STDIN_TOOL_NAME;
|
||||
use crate::exec_command::WriteStdinParams;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
|
||||
pub struct ExecStreamHandler;
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for ExecStreamHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::Function
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation {
|
||||
session,
|
||||
tool_name,
|
||||
payload,
|
||||
..
|
||||
} = invocation;
|
||||
|
||||
let arguments = match payload {
|
||||
ToolPayload::Function { arguments } => arguments,
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"exec_stream handler received unsupported payload".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let content = match tool_name.as_str() {
|
||||
EXEC_COMMAND_TOOL_NAME => {
|
||||
let params: ExecCommandParams = serde_json::from_str(&arguments).map_err(|e| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"failed to parse function arguments: {e:?}"
|
||||
))
|
||||
})?;
|
||||
session.handle_exec_command_tool(params).await?
|
||||
}
|
||||
WRITE_STDIN_TOOL_NAME => {
|
||||
let params: WriteStdinParams = serde_json::from_str(&arguments).map_err(|e| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"failed to parse function arguments: {e:?}"
|
||||
))
|
||||
})?;
|
||||
session.handle_write_stdin_tool(params).await?
|
||||
}
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(format!(
|
||||
"exec_stream handler does not support tool {tool_name}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
70
codex-rs/core/src/tools/handlers/mcp.rs
Normal file
70
codex-rs/core/src/tools/handlers/mcp.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::mcp_tool_call::handle_mcp_tool_call;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
|
||||
pub struct McpHandler;
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for McpHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::Mcp
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation {
|
||||
session,
|
||||
sub_id,
|
||||
call_id,
|
||||
payload,
|
||||
..
|
||||
} = invocation;
|
||||
|
||||
let payload = match payload {
|
||||
ToolPayload::Mcp {
|
||||
server,
|
||||
tool,
|
||||
raw_arguments,
|
||||
} => (server, tool, raw_arguments),
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"mcp handler received unsupported payload".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let (server, tool, raw_arguments) = payload;
|
||||
let arguments_str = raw_arguments;
|
||||
|
||||
let response = handle_mcp_tool_call(
|
||||
session,
|
||||
sub_id,
|
||||
call_id.clone(),
|
||||
server,
|
||||
tool,
|
||||
arguments_str,
|
||||
)
|
||||
.await;
|
||||
|
||||
match response {
|
||||
codex_protocol::models::ResponseInputItem::McpToolCallOutput { result, .. } => {
|
||||
Ok(ToolOutput::Mcp { result })
|
||||
}
|
||||
codex_protocol::models::ResponseInputItem::FunctionCallOutput { output, .. } => {
|
||||
let codex_protocol::models::FunctionCallOutputPayload { content, success } = output;
|
||||
Ok(ToolOutput::Function { content, success })
|
||||
}
|
||||
_ => Err(FunctionCallError::RespondToModel(
|
||||
"mcp handler received unexpected response variant".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
19
codex-rs/core/src/tools/handlers/mod.rs
Normal file
19
codex-rs/core/src/tools/handlers/mod.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
pub mod apply_patch;
|
||||
mod exec_stream;
|
||||
mod mcp;
|
||||
mod plan;
|
||||
mod read_file;
|
||||
mod shell;
|
||||
mod unified_exec;
|
||||
mod view_image;
|
||||
|
||||
pub use plan::PLAN_TOOL;
|
||||
|
||||
pub use apply_patch::ApplyPatchHandler;
|
||||
pub use exec_stream::ExecStreamHandler;
|
||||
pub use mcp::McpHandler;
|
||||
pub use plan::PlanHandler;
|
||||
pub use read_file::ReadFileHandler;
|
||||
pub use shell::ShellHandler;
|
||||
pub use unified_exec::UnifiedExecHandler;
|
||||
pub use view_image::ViewImageHandler;
|
||||
@@ -1,23 +1,23 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::client_common::tools::ResponsesApiTool;
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::codex::Session;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::openai_tools::JsonSchema;
|
||||
use crate::openai_tools::OpenAiTool;
|
||||
use crate::openai_tools::ResponsesApiTool;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
use async_trait::async_trait;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
use codex_protocol::protocol::Event;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
// Use the canonical plan tool types from the protocol crate to ensure
|
||||
// type-identity matches events transported via `codex_protocol`.
|
||||
pub use codex_protocol::plan_tool::PlanItemArg;
|
||||
pub use codex_protocol::plan_tool::StepStatus;
|
||||
pub use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
pub struct PlanHandler;
|
||||
|
||||
// Types for the TODO tool arguments matching codex-vscode/todo-mcp/src/main.rs
|
||||
|
||||
pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
|
||||
pub static PLAN_TOOL: LazyLock<ToolSpec> = LazyLock::new(|| {
|
||||
let mut plan_item_props = BTreeMap::new();
|
||||
plan_item_props.insert("step".to_string(), JsonSchema::String { description: None });
|
||||
plan_item_props.insert(
|
||||
@@ -32,7 +32,7 @@ pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
|
||||
items: Box::new(JsonSchema::Object {
|
||||
properties: plan_item_props,
|
||||
required: Some(vec!["step".to_string(), "status".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
additional_properties: Some(false.into()),
|
||||
}),
|
||||
};
|
||||
|
||||
@@ -43,7 +43,7 @@ pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
|
||||
);
|
||||
properties.insert("plan".to_string(), plan_items_schema);
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
ToolSpec::Function(ResponsesApiTool {
|
||||
name: "update_plan".to_string(),
|
||||
description: r#"Updates the task plan.
|
||||
Provide an optional explanation and a list of plan items, each with a step and status.
|
||||
@@ -54,11 +54,47 @@ At most one step can be in_progress at a time.
|
||||
parameters: JsonSchema::Object {
|
||||
properties,
|
||||
required: Some(vec!["plan".to_string()]),
|
||||
additional_properties: Some(false),
|
||||
additional_properties: Some(false.into()),
|
||||
},
|
||||
})
|
||||
});
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for PlanHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::Function
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation {
|
||||
session,
|
||||
sub_id,
|
||||
call_id,
|
||||
payload,
|
||||
..
|
||||
} = invocation;
|
||||
|
||||
let arguments = match payload {
|
||||
ToolPayload::Function { arguments } => arguments,
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"update_plan handler received unsupported payload".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let content = handle_update_plan(session, arguments, sub_id.to_string(), call_id).await?;
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// This function doesn't do anything useful. However, it gives the model a structured way to record its plan that clients can read and render.
|
||||
/// So it's the _inputs_ to this function that are useful to clients, not the outputs and neither are actually useful for the model other
|
||||
/// than forcing it to come up and document a plan (TBD how that affects performance).
|
||||
255
codex-rs/core/src/tools/handlers/read_file.rs
Normal file
255
codex-rs/core/src/tools/handlers/read_file.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use codex_utils_string::take_bytes_at_char_boundary;
|
||||
use serde::Deserialize;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
|
||||
pub struct ReadFileHandler;
|
||||
|
||||
const MAX_LINE_LENGTH: usize = 500;
|
||||
|
||||
fn default_offset() -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
fn default_limit() -> usize {
|
||||
2000
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ReadFileArgs {
|
||||
file_path: String,
|
||||
#[serde(default = "default_offset")]
|
||||
offset: usize,
|
||||
#[serde(default = "default_limit")]
|
||||
limit: usize,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for ReadFileHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::Function
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation { payload, .. } = invocation;
|
||||
|
||||
let arguments = match payload {
|
||||
ToolPayload::Function { arguments } => arguments,
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"read_file handler received unsupported payload".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let args: ReadFileArgs = serde_json::from_str(&arguments).map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"failed to parse function arguments: {err:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
let ReadFileArgs {
|
||||
file_path,
|
||||
offset,
|
||||
limit,
|
||||
} = args;
|
||||
|
||||
if offset == 0 {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"offset must be a 1-indexed line number".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"limit must be greater than zero".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let path = PathBuf::from(&file_path);
|
||||
if !path.is_absolute() {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"file_path must be an absolute path".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let collected = read_file_slice(&path, offset, limit).await?;
|
||||
Ok(ToolOutput::Function {
|
||||
content: collected.join("\n"),
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_file_slice(
|
||||
path: &Path,
|
||||
offset: usize,
|
||||
limit: usize,
|
||||
) -> Result<Vec<String>, FunctionCallError> {
|
||||
let file = File::open(path)
|
||||
.await
|
||||
.map_err(|err| FunctionCallError::RespondToModel(format!("failed to read file: {err}")))?;
|
||||
|
||||
let mut reader = BufReader::new(file);
|
||||
let mut collected = Vec::new();
|
||||
let mut seen = 0usize;
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
loop {
|
||||
buffer.clear();
|
||||
let bytes_read = reader.read_until(b'\n', &mut buffer).await.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!("failed to read file: {err}"))
|
||||
})?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
if buffer.last() == Some(&b'\n') {
|
||||
buffer.pop();
|
||||
if buffer.last() == Some(&b'\r') {
|
||||
buffer.pop();
|
||||
}
|
||||
}
|
||||
|
||||
seen += 1;
|
||||
|
||||
if seen < offset {
|
||||
continue;
|
||||
}
|
||||
|
||||
if collected.len() == limit {
|
||||
break;
|
||||
}
|
||||
|
||||
let formatted = format_line(&buffer);
|
||||
collected.push(format!("L{seen}: {formatted}"));
|
||||
|
||||
if collected.len() == limit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if seen < offset {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"offset exceeds file length".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(collected)
|
||||
}
|
||||
|
||||
fn format_line(bytes: &[u8]) -> String {
|
||||
let decoded = String::from_utf8_lossy(bytes);
|
||||
if decoded.len() > MAX_LINE_LENGTH {
|
||||
take_bytes_at_char_boundary(&decoded, MAX_LINE_LENGTH).to_string()
|
||||
} else {
|
||||
decoded.into_owned()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
#[tokio::test]
|
||||
async fn reads_requested_range() {
|
||||
let mut temp = NamedTempFile::new().expect("create temp file");
|
||||
use std::io::Write as _;
|
||||
writeln!(temp, "alpha").unwrap();
|
||||
writeln!(temp, "beta").unwrap();
|
||||
writeln!(temp, "gamma").unwrap();
|
||||
|
||||
let lines = read_file_slice(temp.path(), 2, 2)
|
||||
.await
|
||||
.expect("read slice");
|
||||
assert_eq!(lines, vec!["L2: beta".to_string(), "L3: gamma".to_string()]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn errors_when_offset_exceeds_length() {
|
||||
let mut temp = NamedTempFile::new().expect("create temp file");
|
||||
use std::io::Write as _;
|
||||
writeln!(temp, "only").unwrap();
|
||||
|
||||
let err = read_file_slice(temp.path(), 3, 1)
|
||||
.await
|
||||
.expect_err("offset exceeds length");
|
||||
assert_eq!(
|
||||
err,
|
||||
FunctionCallError::RespondToModel("offset exceeds file length".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn reads_non_utf8_lines() {
|
||||
let mut temp = NamedTempFile::new().expect("create temp file");
|
||||
use std::io::Write as _;
|
||||
temp.as_file_mut().write_all(b"\xff\xfe\nplain\n").unwrap();
|
||||
|
||||
let lines = read_file_slice(temp.path(), 1, 2)
|
||||
.await
|
||||
.expect("read slice");
|
||||
let expected_first = format!("L1: {}{}", '\u{FFFD}', '\u{FFFD}');
|
||||
assert_eq!(lines, vec![expected_first, "L2: plain".to_string()]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn trims_crlf_endings() {
|
||||
let mut temp = NamedTempFile::new().expect("create temp file");
|
||||
use std::io::Write as _;
|
||||
write!(temp, "one\r\ntwo\r\n").unwrap();
|
||||
|
||||
let lines = read_file_slice(temp.path(), 1, 2)
|
||||
.await
|
||||
.expect("read slice");
|
||||
assert_eq!(lines, vec!["L1: one".to_string(), "L2: two".to_string()]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn respects_limit_even_with_more_lines() {
|
||||
let mut temp = NamedTempFile::new().expect("create temp file");
|
||||
use std::io::Write as _;
|
||||
writeln!(temp, "first").unwrap();
|
||||
writeln!(temp, "second").unwrap();
|
||||
writeln!(temp, "third").unwrap();
|
||||
|
||||
let lines = read_file_slice(temp.path(), 1, 2)
|
||||
.await
|
||||
.expect("read slice");
|
||||
assert_eq!(
|
||||
lines,
|
||||
vec!["L1: first".to_string(), "L2: second".to_string()]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn truncates_lines_longer_than_max_length() {
|
||||
let mut temp = NamedTempFile::new().expect("create temp file");
|
||||
use std::io::Write as _;
|
||||
let long_line = "x".repeat(MAX_LINE_LENGTH + 50);
|
||||
writeln!(temp, "{long_line}").unwrap();
|
||||
|
||||
let lines = read_file_slice(temp.path(), 1, 1)
|
||||
.await
|
||||
.expect("read slice");
|
||||
let expected = "x".repeat(MAX_LINE_LENGTH);
|
||||
assert_eq!(lines, vec![format!("L1: {expected}")]);
|
||||
}
|
||||
}
|
||||
103
codex-rs/core/src/tools/handlers/shell.rs
Normal file
103
codex-rs/core/src/tools/handlers/shell.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
use async_trait::async_trait;
|
||||
use codex_protocol::models::ShellToolCallParams;
|
||||
|
||||
use crate::codex::TurnContext;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec_env::create_env;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::handle_container_exec_with_params;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
|
||||
pub struct ShellHandler;
|
||||
|
||||
impl ShellHandler {
|
||||
fn to_exec_params(params: ShellToolCallParams, turn_context: &TurnContext) -> ExecParams {
|
||||
ExecParams {
|
||||
command: params.command,
|
||||
cwd: turn_context.resolve_path(params.workdir.clone()),
|
||||
timeout_ms: params.timeout_ms,
|
||||
env: create_env(&turn_context.shell_environment_policy),
|
||||
with_escalated_permissions: params.with_escalated_permissions,
|
||||
justification: params.justification,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for ShellHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::Function
|
||||
}
|
||||
|
||||
fn matches_kind(&self, payload: &ToolPayload) -> bool {
|
||||
matches!(
|
||||
payload,
|
||||
ToolPayload::Function { .. } | ToolPayload::LocalShell { .. }
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation {
|
||||
session,
|
||||
turn,
|
||||
tracker,
|
||||
sub_id,
|
||||
call_id,
|
||||
tool_name,
|
||||
payload,
|
||||
} = invocation;
|
||||
|
||||
match payload {
|
||||
ToolPayload::Function { arguments } => {
|
||||
let params: ShellToolCallParams =
|
||||
serde_json::from_str(&arguments).map_err(|e| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"failed to parse function arguments: {e:?}"
|
||||
))
|
||||
})?;
|
||||
let exec_params = Self::to_exec_params(params, turn);
|
||||
let content = handle_container_exec_with_params(
|
||||
tool_name.as_str(),
|
||||
exec_params,
|
||||
session,
|
||||
turn,
|
||||
tracker,
|
||||
sub_id.to_string(),
|
||||
call_id.clone(),
|
||||
)
|
||||
.await?;
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
ToolPayload::LocalShell { params } => {
|
||||
let exec_params = Self::to_exec_params(params, turn);
|
||||
let content = handle_container_exec_with_params(
|
||||
tool_name.as_str(),
|
||||
exec_params,
|
||||
session,
|
||||
turn,
|
||||
tracker,
|
||||
sub_id.to_string(),
|
||||
call_id.clone(),
|
||||
)
|
||||
.await?;
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
_ => Err(FunctionCallError::RespondToModel(format!(
|
||||
"unsupported payload for shell handler: {tool_name}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
112
codex-rs/core/src/tools/handlers/unified_exec.rs
Normal file
112
codex-rs/core/src/tools/handlers/unified_exec.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use async_trait::async_trait;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
use crate::unified_exec::UnifiedExecRequest;
|
||||
|
||||
pub struct UnifiedExecHandler;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct UnifiedExecArgs {
|
||||
input: Vec<String>,
|
||||
#[serde(default)]
|
||||
session_id: Option<String>,
|
||||
#[serde(default)]
|
||||
timeout_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for UnifiedExecHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::UnifiedExec
|
||||
}
|
||||
|
||||
fn matches_kind(&self, payload: &ToolPayload) -> bool {
|
||||
matches!(
|
||||
payload,
|
||||
ToolPayload::UnifiedExec { .. } | ToolPayload::Function { .. }
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation {
|
||||
session, payload, ..
|
||||
} = invocation;
|
||||
|
||||
let args = match payload {
|
||||
ToolPayload::UnifiedExec { arguments } | ToolPayload::Function { arguments } => {
|
||||
serde_json::from_str::<UnifiedExecArgs>(&arguments).map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"failed to parse function arguments: {err:?}"
|
||||
))
|
||||
})?
|
||||
}
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"unified_exec handler received unsupported payload".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let UnifiedExecArgs {
|
||||
input,
|
||||
session_id,
|
||||
timeout_ms,
|
||||
} = args;
|
||||
|
||||
let parsed_session_id = if let Some(session_id) = session_id {
|
||||
match session_id.parse::<i32>() {
|
||||
Ok(parsed) => Some(parsed),
|
||||
Err(output) => {
|
||||
return Err(FunctionCallError::RespondToModel(format!(
|
||||
"invalid session_id: {session_id} due to error {output:?}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let request = UnifiedExecRequest {
|
||||
session_id: parsed_session_id,
|
||||
input_chunks: &input,
|
||||
timeout_ms,
|
||||
};
|
||||
|
||||
let value = session
|
||||
.run_unified_exec_request(request)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!("unified exec failed: {err:?}"))
|
||||
})?;
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
struct SerializedUnifiedExecResult {
|
||||
session_id: Option<String>,
|
||||
output: String,
|
||||
}
|
||||
|
||||
let content = serde_json::to_string(&SerializedUnifiedExecResult {
|
||||
session_id: value.session_id.map(|id| id.to_string()),
|
||||
output: value.output,
|
||||
})
|
||||
.map_err(|err| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"failed to serialize unified exec output: {err:?}"
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
96
codex-rs/core/src/tools/handlers/view_image.rs
Normal file
96
codex-rs/core/src/tools/handlers/view_image.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use async_trait::async_trait;
|
||||
use serde::Deserialize;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::InputItem;
|
||||
use crate::protocol::ViewImageToolCallEvent;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
|
||||
pub struct ViewImageHandler;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ViewImageArgs {
|
||||
path: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ToolHandler for ViewImageHandler {
|
||||
fn kind(&self) -> ToolKind {
|
||||
ToolKind::Function
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
invocation: ToolInvocation<'_>,
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let ToolInvocation {
|
||||
session,
|
||||
turn,
|
||||
payload,
|
||||
sub_id,
|
||||
call_id,
|
||||
..
|
||||
} = invocation;
|
||||
|
||||
let arguments = match payload {
|
||||
ToolPayload::Function { arguments } => arguments,
|
||||
_ => {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"view_image handler received unsupported payload".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let args: ViewImageArgs = serde_json::from_str(&arguments).map_err(|e| {
|
||||
FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}"))
|
||||
})?;
|
||||
|
||||
let abs_path = turn.resolve_path(Some(args.path));
|
||||
|
||||
let metadata = fs::metadata(&abs_path).await.map_err(|error| {
|
||||
FunctionCallError::RespondToModel(format!(
|
||||
"unable to locate image at `{}`: {error}",
|
||||
abs_path.display()
|
||||
))
|
||||
})?;
|
||||
|
||||
if !metadata.is_file() {
|
||||
return Err(FunctionCallError::RespondToModel(format!(
|
||||
"image path `{}` is not a file",
|
||||
abs_path.display()
|
||||
)));
|
||||
}
|
||||
let event_path = abs_path.clone();
|
||||
|
||||
session
|
||||
.inject_input(vec![InputItem::LocalImage { path: abs_path }])
|
||||
.await
|
||||
.map_err(|_| {
|
||||
FunctionCallError::RespondToModel(
|
||||
"unable to attach image (no active task)".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
session
|
||||
.send_event(Event {
|
||||
id: sub_id.to_string(),
|
||||
msg: EventMsg::ViewImageToolCall(ViewImageToolCallEvent {
|
||||
call_id,
|
||||
path: event_path,
|
||||
}),
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content: "attached local image path".to_string(),
|
||||
success: Some(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
280
codex-rs/core/src/tools/mod.rs
Normal file
280
codex-rs/core/src/tools/mod.rs
Normal file
@@ -0,0 +1,280 @@
|
||||
pub mod context;
|
||||
pub(crate) mod handlers;
|
||||
pub mod registry;
|
||||
pub mod router;
|
||||
pub mod spec;
|
||||
|
||||
use crate::apply_patch;
|
||||
use crate::apply_patch::ApplyPatchExec;
|
||||
use crate::apply_patch::InternalApplyPatchInvocation;
|
||||
use crate::apply_patch::convert_apply_patch_to_protocol;
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::SandboxErr;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::exec::StdoutStream;
|
||||
use crate::executor::ExecutionMode;
|
||||
use crate::executor::errors::ExecError;
|
||||
use crate::executor::linkers::PreparedExec;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::context::ApplyPatchCommandContext;
|
||||
use crate::tools::context::ExecCommandContext;
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
use codex_apply_patch::MaybeApplyPatchVerified;
|
||||
use codex_apply_patch::maybe_parse_apply_patch_verified;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_utils_string::take_bytes_at_char_boundary;
|
||||
use codex_utils_string::take_last_bytes_at_char_boundary;
|
||||
pub use router::ToolRouter;
|
||||
use serde::Serialize;
|
||||
use tracing::trace;
|
||||
|
||||
// Model-formatting limits: clients get full streams; only content sent to the model is truncated.
|
||||
pub(crate) const MODEL_FORMAT_MAX_BYTES: usize = 10 * 1024; // 10 KiB
|
||||
pub(crate) const MODEL_FORMAT_MAX_LINES: usize = 256; // lines
|
||||
pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2;
|
||||
pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128
|
||||
pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2;
|
||||
|
||||
// Telemetry preview limits: keep log events smaller than model budgets.
|
||||
pub(crate) const TELEMETRY_PREVIEW_MAX_BYTES: usize = 2 * 1024; // 2 KiB
|
||||
pub(crate) const TELEMETRY_PREVIEW_MAX_LINES: usize = 64; // lines
|
||||
pub(crate) const TELEMETRY_PREVIEW_TRUNCATION_NOTICE: &str =
|
||||
"[... telemetry preview truncated ...]";
|
||||
|
||||
// TODO(jif) break this down
|
||||
pub(crate) async fn handle_container_exec_with_params(
|
||||
tool_name: &str,
|
||||
params: ExecParams,
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
turn_diff_tracker: &mut TurnDiffTracker,
|
||||
sub_id: String,
|
||||
call_id: String,
|
||||
) -> Result<String, FunctionCallError> {
|
||||
let otel_event_manager = turn_context.client.get_otel_event_manager();
|
||||
|
||||
if params.with_escalated_permissions.unwrap_or(false)
|
||||
&& !matches!(turn_context.approval_policy, AskForApproval::OnRequest)
|
||||
{
|
||||
return Err(FunctionCallError::RespondToModel(format!(
|
||||
"approval policy is {policy:?}; reject command — you should not ask for escalated permissions if the approval policy is {policy:?}",
|
||||
policy = turn_context.approval_policy
|
||||
)));
|
||||
}
|
||||
|
||||
// check if this was a patch, and apply it if so
|
||||
let apply_patch_exec = match maybe_parse_apply_patch_verified(¶ms.command, ¶ms.cwd) {
|
||||
MaybeApplyPatchVerified::Body(changes) => {
|
||||
match apply_patch::apply_patch(sess, turn_context, &sub_id, &call_id, changes).await {
|
||||
InternalApplyPatchInvocation::Output(item) => return item,
|
||||
InternalApplyPatchInvocation::DelegateToExec(apply_patch_exec) => {
|
||||
Some(apply_patch_exec)
|
||||
}
|
||||
}
|
||||
}
|
||||
MaybeApplyPatchVerified::CorrectnessError(parse_error) => {
|
||||
// It looks like an invocation of `apply_patch`, but we
|
||||
// could not resolve it into a patch that would apply
|
||||
// cleanly. Return to model for resample.
|
||||
return Err(FunctionCallError::RespondToModel(format!(
|
||||
"apply_patch verification failed: {parse_error}"
|
||||
)));
|
||||
}
|
||||
MaybeApplyPatchVerified::ShellParseError(error) => {
|
||||
trace!("Failed to parse shell command, {error:?}");
|
||||
None
|
||||
}
|
||||
MaybeApplyPatchVerified::NotApplyPatch => None,
|
||||
};
|
||||
|
||||
let command_for_display = if let Some(exec) = apply_patch_exec.as_ref() {
|
||||
vec!["apply_patch".to_string(), exec.action.patch.clone()]
|
||||
} else {
|
||||
params.command.clone()
|
||||
};
|
||||
|
||||
let exec_command_context = ExecCommandContext {
|
||||
sub_id: sub_id.clone(),
|
||||
call_id: call_id.clone(),
|
||||
command_for_display: command_for_display.clone(),
|
||||
cwd: params.cwd.clone(),
|
||||
apply_patch: apply_patch_exec.as_ref().map(
|
||||
|ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action,
|
||||
}| ApplyPatchCommandContext {
|
||||
user_explicitly_approved_this_action: *user_explicitly_approved_this_action,
|
||||
changes: convert_apply_patch_to_protocol(action),
|
||||
},
|
||||
),
|
||||
tool_name: tool_name.to_string(),
|
||||
otel_event_manager,
|
||||
};
|
||||
|
||||
let mode = match apply_patch_exec {
|
||||
Some(exec) => ExecutionMode::ApplyPatch(exec),
|
||||
None => ExecutionMode::Shell,
|
||||
};
|
||||
|
||||
sess.services.executor.update_environment(
|
||||
turn_context.sandbox_policy.clone(),
|
||||
turn_context.cwd.clone(),
|
||||
);
|
||||
|
||||
let prepared_exec = PreparedExec::new(
|
||||
exec_command_context,
|
||||
params,
|
||||
command_for_display,
|
||||
mode,
|
||||
Some(StdoutStream {
|
||||
sub_id: sub_id.clone(),
|
||||
call_id: call_id.clone(),
|
||||
tx_event: sess.get_tx_event(),
|
||||
}),
|
||||
turn_context.shell_environment_policy.use_profile,
|
||||
);
|
||||
|
||||
let output_result = sess
|
||||
.run_exec_with_events(
|
||||
turn_diff_tracker,
|
||||
prepared_exec,
|
||||
turn_context.approval_policy,
|
||||
)
|
||||
.await;
|
||||
|
||||
match output_result {
|
||||
Ok(output) => {
|
||||
let ExecToolCallOutput { exit_code, .. } = &output;
|
||||
let content = format_exec_output_apply_patch(&output);
|
||||
if *exit_code == 0 {
|
||||
Ok(content)
|
||||
} else {
|
||||
Err(FunctionCallError::RespondToModel(content))
|
||||
}
|
||||
}
|
||||
Err(ExecError::Function(err)) => Err(err),
|
||||
Err(ExecError::Codex(CodexErr::Sandbox(SandboxErr::Timeout { output }))) => Err(
|
||||
FunctionCallError::RespondToModel(format_exec_output_apply_patch(&output)),
|
||||
),
|
||||
Err(ExecError::Codex(err)) => Err(FunctionCallError::RespondToModel(format!(
|
||||
"execution error: {err:?}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format_exec_output_apply_patch(exec_output: &ExecToolCallOutput) -> String {
|
||||
let ExecToolCallOutput {
|
||||
exit_code,
|
||||
duration,
|
||||
..
|
||||
} = exec_output;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ExecMetadata {
|
||||
exit_code: i32,
|
||||
duration_seconds: f32,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ExecOutput<'a> {
|
||||
output: &'a str,
|
||||
metadata: ExecMetadata,
|
||||
}
|
||||
|
||||
// round to 1 decimal place
|
||||
let duration_seconds = ((duration.as_secs_f32()) * 10.0).round() / 10.0;
|
||||
|
||||
let formatted_output = format_exec_output_str(exec_output);
|
||||
|
||||
let payload = ExecOutput {
|
||||
output: &formatted_output,
|
||||
metadata: ExecMetadata {
|
||||
exit_code: *exit_code,
|
||||
duration_seconds,
|
||||
},
|
||||
};
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
serde_json::to_string(&payload).expect("serialize ExecOutput")
|
||||
}
|
||||
|
||||
pub fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
|
||||
let ExecToolCallOutput {
|
||||
aggregated_output, ..
|
||||
} = exec_output;
|
||||
|
||||
// Head+tail truncation for the model: show the beginning and end with an elision.
|
||||
// Clients still receive full streams; only this formatted summary is capped.
|
||||
|
||||
let mut s = &aggregated_output.text;
|
||||
let prefixed_str: String;
|
||||
|
||||
if exec_output.timed_out {
|
||||
prefixed_str = format!(
|
||||
"command timed out after {} milliseconds\n",
|
||||
exec_output.duration.as_millis()
|
||||
) + s;
|
||||
s = &prefixed_str;
|
||||
}
|
||||
|
||||
let total_lines = s.lines().count();
|
||||
if s.len() <= MODEL_FORMAT_MAX_BYTES && total_lines <= MODEL_FORMAT_MAX_LINES {
|
||||
return s.to_string();
|
||||
}
|
||||
|
||||
let segments: Vec<&str> = s.split_inclusive('\n').collect();
|
||||
let head_take = MODEL_FORMAT_HEAD_LINES.min(segments.len());
|
||||
let tail_take = MODEL_FORMAT_TAIL_LINES.min(segments.len().saturating_sub(head_take));
|
||||
let omitted = segments.len().saturating_sub(head_take + tail_take);
|
||||
|
||||
let head_slice_end: usize = segments
|
||||
.iter()
|
||||
.take(head_take)
|
||||
.map(|segment| segment.len())
|
||||
.sum();
|
||||
let tail_slice_start: usize = if tail_take == 0 {
|
||||
s.len()
|
||||
} else {
|
||||
s.len()
|
||||
- segments
|
||||
.iter()
|
||||
.rev()
|
||||
.take(tail_take)
|
||||
.map(|segment| segment.len())
|
||||
.sum::<usize>()
|
||||
};
|
||||
let marker = format!("\n[... omitted {omitted} of {total_lines} lines ...]\n\n");
|
||||
|
||||
// Byte budgets for head/tail around the marker
|
||||
let mut head_budget = MODEL_FORMAT_HEAD_BYTES.min(MODEL_FORMAT_MAX_BYTES);
|
||||
let tail_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(head_budget + marker.len());
|
||||
if tail_budget == 0 && marker.len() >= MODEL_FORMAT_MAX_BYTES {
|
||||
// Degenerate case: marker alone exceeds budget; return a clipped marker
|
||||
return take_bytes_at_char_boundary(&marker, MODEL_FORMAT_MAX_BYTES).to_string();
|
||||
}
|
||||
if tail_budget == 0 {
|
||||
// Make room for the marker by shrinking head
|
||||
head_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(marker.len());
|
||||
}
|
||||
|
||||
let head_slice = &s[..head_slice_end];
|
||||
let head_part = take_bytes_at_char_boundary(head_slice, head_budget);
|
||||
let mut result = String::with_capacity(MODEL_FORMAT_MAX_BYTES.min(s.len()));
|
||||
|
||||
result.push_str(head_part);
|
||||
result.push_str(&marker);
|
||||
|
||||
let remaining = MODEL_FORMAT_MAX_BYTES.saturating_sub(result.len());
|
||||
if remaining == 0 {
|
||||
return result;
|
||||
}
|
||||
|
||||
let tail_slice = &s[tail_slice_start..];
|
||||
let tail_part = take_last_bytes_at_char_boundary(tail_slice, remaining);
|
||||
result.push_str(tail_part);
|
||||
|
||||
result
|
||||
}
|
||||
197
codex-rs/core/src/tools/registry.rs
Normal file
197
codex-rs/core/src/tools/registry.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum ToolKind {
|
||||
Function,
|
||||
UnifiedExec,
|
||||
Mcp,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ToolHandler: Send + Sync {
|
||||
fn kind(&self) -> ToolKind;
|
||||
|
||||
fn matches_kind(&self, payload: &ToolPayload) -> bool {
|
||||
matches!(
|
||||
(self.kind(), payload),
|
||||
(ToolKind::Function, ToolPayload::Function { .. })
|
||||
| (ToolKind::UnifiedExec, ToolPayload::UnifiedExec { .. })
|
||||
| (ToolKind::Mcp, ToolPayload::Mcp { .. })
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle(&self, invocation: ToolInvocation<'_>)
|
||||
-> Result<ToolOutput, FunctionCallError>;
|
||||
}
|
||||
|
||||
pub struct ToolRegistry {
|
||||
handlers: HashMap<String, Arc<dyn ToolHandler>>,
|
||||
}
|
||||
|
||||
impl ToolRegistry {
|
||||
pub fn new(handlers: HashMap<String, Arc<dyn ToolHandler>>) -> Self {
|
||||
Self { handlers }
|
||||
}
|
||||
|
||||
pub fn handler(&self, name: &str) -> Option<Arc<dyn ToolHandler>> {
|
||||
self.handlers.get(name).map(Arc::clone)
|
||||
}
|
||||
|
||||
// TODO(jif) for dynamic tools.
|
||||
// pub fn register(&mut self, name: impl Into<String>, handler: Arc<dyn ToolHandler>) {
|
||||
// let name = name.into();
|
||||
// if self.handlers.insert(name.clone(), handler).is_some() {
|
||||
// warn!("overwriting handler for tool {name}");
|
||||
// }
|
||||
// }
|
||||
|
||||
pub async fn dispatch<'a>(
|
||||
&self,
|
||||
invocation: ToolInvocation<'a>,
|
||||
) -> Result<ResponseInputItem, FunctionCallError> {
|
||||
let tool_name = invocation.tool_name.clone();
|
||||
let call_id_owned = invocation.call_id.clone();
|
||||
let otel = invocation.turn.client.get_otel_event_manager();
|
||||
let payload_for_response = invocation.payload.clone();
|
||||
let log_payload = payload_for_response.log_payload();
|
||||
|
||||
let handler = match self.handler(tool_name.as_ref()) {
|
||||
Some(handler) => handler,
|
||||
None => {
|
||||
let message =
|
||||
unsupported_tool_call_message(&invocation.payload, tool_name.as_ref());
|
||||
otel.tool_result(
|
||||
tool_name.as_ref(),
|
||||
&call_id_owned,
|
||||
log_payload.as_ref(),
|
||||
Duration::ZERO,
|
||||
false,
|
||||
&message,
|
||||
);
|
||||
return Err(FunctionCallError::RespondToModel(message));
|
||||
}
|
||||
};
|
||||
|
||||
if !handler.matches_kind(&invocation.payload) {
|
||||
let message = format!("tool {tool_name} invoked with incompatible payload");
|
||||
otel.tool_result(
|
||||
tool_name.as_ref(),
|
||||
&call_id_owned,
|
||||
log_payload.as_ref(),
|
||||
Duration::ZERO,
|
||||
false,
|
||||
&message,
|
||||
);
|
||||
return Err(FunctionCallError::Fatal(message));
|
||||
}
|
||||
|
||||
let output_cell = tokio::sync::Mutex::new(None);
|
||||
|
||||
let result = otel
|
||||
.log_tool_result(
|
||||
tool_name.as_ref(),
|
||||
&call_id_owned,
|
||||
log_payload.as_ref(),
|
||||
|| {
|
||||
let handler = handler.clone();
|
||||
let output_cell = &output_cell;
|
||||
let invocation = invocation;
|
||||
async move {
|
||||
match handler.handle(invocation).await {
|
||||
Ok(output) => {
|
||||
let preview = output.log_preview();
|
||||
let success = output.success_for_logging();
|
||||
let mut guard = output_cell.lock().await;
|
||||
*guard = Some(output);
|
||||
Ok((preview, success))
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
let mut guard = output_cell.lock().await;
|
||||
let output = guard.take().ok_or_else(|| {
|
||||
FunctionCallError::Fatal("tool produced no output".to_string())
|
||||
})?;
|
||||
Ok(output.into_response(&call_id_owned, &payload_for_response))
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ToolRegistryBuilder {
|
||||
handlers: HashMap<String, Arc<dyn ToolHandler>>,
|
||||
specs: Vec<ToolSpec>,
|
||||
}
|
||||
|
||||
impl ToolRegistryBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
handlers: HashMap::new(),
|
||||
specs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_spec(&mut self, spec: ToolSpec) {
|
||||
self.specs.push(spec);
|
||||
}
|
||||
|
||||
pub fn register_handler(&mut self, name: impl Into<String>, handler: Arc<dyn ToolHandler>) {
|
||||
let name = name.into();
|
||||
if self
|
||||
.handlers
|
||||
.insert(name.clone(), handler.clone())
|
||||
.is_some()
|
||||
{
|
||||
warn!("overwriting handler for tool {name}");
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(jif) for dynamic tools.
|
||||
// pub fn register_many<I>(&mut self, names: I, handler: Arc<dyn ToolHandler>)
|
||||
// where
|
||||
// I: IntoIterator,
|
||||
// I::Item: Into<String>,
|
||||
// {
|
||||
// for name in names {
|
||||
// let name = name.into();
|
||||
// if self
|
||||
// .handlers
|
||||
// .insert(name.clone(), handler.clone())
|
||||
// .is_some()
|
||||
// {
|
||||
// warn!("overwriting handler for tool {name}");
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
pub fn build(self) -> (Vec<ToolSpec>, ToolRegistry) {
|
||||
let registry = ToolRegistry::new(self.handlers);
|
||||
(self.specs, registry)
|
||||
}
|
||||
}
|
||||
|
||||
fn unsupported_tool_call_message(payload: &ToolPayload, tool_name: &str) -> String {
|
||||
match payload {
|
||||
ToolPayload::Custom { .. } => format!("unsupported custom tool call: {tool_name}"),
|
||||
_ => format!("unsupported call: {tool_name}"),
|
||||
}
|
||||
}
|
||||
177
codex-rs/core/src/tools/router.rs
Normal file
177
codex-rs/core/src/tools/router.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::registry::ToolRegistry;
|
||||
use crate::tools::spec::ToolsConfig;
|
||||
use crate::tools::spec::build_specs;
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
use codex_protocol::models::LocalShellAction;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::models::ShellToolCallParams;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ToolCall {
|
||||
pub tool_name: String,
|
||||
pub call_id: String,
|
||||
pub payload: ToolPayload,
|
||||
}
|
||||
|
||||
pub struct ToolRouter {
|
||||
registry: ToolRegistry,
|
||||
specs: Vec<ToolSpec>,
|
||||
}
|
||||
|
||||
impl ToolRouter {
|
||||
pub fn from_config(
|
||||
config: &ToolsConfig,
|
||||
mcp_tools: Option<HashMap<String, mcp_types::Tool>>,
|
||||
) -> Self {
|
||||
let builder = build_specs(config, mcp_tools);
|
||||
let (specs, registry) = builder.build();
|
||||
Self { registry, specs }
|
||||
}
|
||||
|
||||
pub fn specs(&self) -> &[ToolSpec] {
|
||||
&self.specs
|
||||
}
|
||||
|
||||
pub fn build_tool_call(
|
||||
session: &Session,
|
||||
item: ResponseItem,
|
||||
) -> Result<Option<ToolCall>, FunctionCallError> {
|
||||
match item {
|
||||
ResponseItem::FunctionCall {
|
||||
name,
|
||||
arguments,
|
||||
call_id,
|
||||
..
|
||||
} => {
|
||||
if let Some((server, tool)) = session.parse_mcp_tool_name(&name) {
|
||||
Ok(Some(ToolCall {
|
||||
tool_name: name,
|
||||
call_id,
|
||||
payload: ToolPayload::Mcp {
|
||||
server,
|
||||
tool,
|
||||
raw_arguments: arguments,
|
||||
},
|
||||
}))
|
||||
} else {
|
||||
let payload = if name == "unified_exec" {
|
||||
ToolPayload::UnifiedExec { arguments }
|
||||
} else {
|
||||
ToolPayload::Function { arguments }
|
||||
};
|
||||
Ok(Some(ToolCall {
|
||||
tool_name: name,
|
||||
call_id,
|
||||
payload,
|
||||
}))
|
||||
}
|
||||
}
|
||||
ResponseItem::CustomToolCall {
|
||||
name,
|
||||
input,
|
||||
call_id,
|
||||
..
|
||||
} => Ok(Some(ToolCall {
|
||||
tool_name: name,
|
||||
call_id,
|
||||
payload: ToolPayload::Custom { input },
|
||||
})),
|
||||
ResponseItem::LocalShellCall {
|
||||
id,
|
||||
call_id,
|
||||
action,
|
||||
..
|
||||
} => {
|
||||
let call_id = call_id
|
||||
.or(id)
|
||||
.ok_or(FunctionCallError::MissingLocalShellCallId)?;
|
||||
|
||||
match action {
|
||||
LocalShellAction::Exec(exec) => {
|
||||
let params = ShellToolCallParams {
|
||||
command: exec.command,
|
||||
workdir: exec.working_directory,
|
||||
timeout_ms: exec.timeout_ms,
|
||||
with_escalated_permissions: None,
|
||||
justification: None,
|
||||
};
|
||||
Ok(Some(ToolCall {
|
||||
tool_name: "local_shell".to_string(),
|
||||
call_id,
|
||||
payload: ToolPayload::LocalShell { params },
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn dispatch_tool_call(
|
||||
&self,
|
||||
session: &Session,
|
||||
turn: &TurnContext,
|
||||
tracker: &mut TurnDiffTracker,
|
||||
sub_id: &str,
|
||||
call: ToolCall,
|
||||
) -> Result<ResponseInputItem, FunctionCallError> {
|
||||
let ToolCall {
|
||||
tool_name,
|
||||
call_id,
|
||||
payload,
|
||||
} = call;
|
||||
let payload_outputs_custom = matches!(payload, ToolPayload::Custom { .. });
|
||||
let failure_call_id = call_id.clone();
|
||||
|
||||
let invocation = ToolInvocation {
|
||||
session,
|
||||
turn,
|
||||
tracker,
|
||||
sub_id,
|
||||
call_id,
|
||||
tool_name,
|
||||
payload,
|
||||
};
|
||||
|
||||
match self.registry.dispatch(invocation).await {
|
||||
Ok(response) => Ok(response),
|
||||
Err(FunctionCallError::Fatal(message)) => Err(FunctionCallError::Fatal(message)),
|
||||
Err(err) => Ok(Self::failure_response(
|
||||
failure_call_id,
|
||||
payload_outputs_custom,
|
||||
err,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn failure_response(
|
||||
call_id: String,
|
||||
payload_outputs_custom: bool,
|
||||
err: FunctionCallError,
|
||||
) -> ResponseInputItem {
|
||||
let message = err.to_string();
|
||||
if payload_outputs_custom {
|
||||
ResponseInputItem::CustomToolCallOutput {
|
||||
call_id,
|
||||
output: message,
|
||||
}
|
||||
} else {
|
||||
ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: codex_protocol::models::FunctionCallOutputPayload {
|
||||
content: message,
|
||||
success: Some(false),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
1286
codex-rs/core/src/tools/spec.rs
Normal file
1286
codex-rs/core/src/tools/spec.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,9 @@ use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::ConfigToml;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
use assert_cmd::cargo::cargo_bin;
|
||||
|
||||
pub mod responses;
|
||||
pub mod test_codex;
|
||||
pub mod test_codex_exec;
|
||||
@@ -17,12 +20,25 @@ pub mod test_codex_exec;
|
||||
pub fn load_default_config_for_test(codex_home: &TempDir) -> Config {
|
||||
Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
default_test_overrides(),
|
||||
codex_home.path().to_path_buf(),
|
||||
)
|
||||
.expect("defaults for test should always succeed")
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn default_test_overrides() -> ConfigOverrides {
|
||||
ConfigOverrides {
|
||||
codex_linux_sandbox_exe: Some(cargo_bin("codex-linux-sandbox")),
|
||||
..ConfigOverrides::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn default_test_overrides() -> ConfigOverrides {
|
||||
ConfigOverrides::default()
|
||||
}
|
||||
|
||||
/// Builds an SSE stream body from a JSON fixture.
|
||||
///
|
||||
/// The fixture must contain an array of objects where each object represents a
|
||||
|
||||
@@ -13,7 +13,7 @@ use tempfile::TempDir;
|
||||
|
||||
use crate::load_default_config_for_test;
|
||||
|
||||
type ConfigMutator = dyn FnOnce(&mut Config);
|
||||
type ConfigMutator = dyn FnOnce(&mut Config) + Send;
|
||||
|
||||
pub struct TestCodexBuilder {
|
||||
config_mutators: Vec<Box<ConfigMutator>>,
|
||||
@@ -22,7 +22,7 @@ pub struct TestCodexBuilder {
|
||||
impl TestCodexBuilder {
|
||||
pub fn with_config<T>(mut self, mutator: T) -> Self
|
||||
where
|
||||
T: FnOnce(&mut Config) + 'static,
|
||||
T: FnOnce(&mut Config) + Send + 'static,
|
||||
{
|
||||
self.config_mutators.push(Box::new(mutator));
|
||||
self
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#![allow(clippy::expect_used)]
|
||||
use codex_core::auth::CODEX_API_KEY_ENV_VAR;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
@@ -14,7 +15,7 @@ impl TestCodexExecBuilder {
|
||||
.expect("should find binary for codex-exec");
|
||||
cmd.current_dir(self.cwd.path())
|
||||
.env("CODEX_HOME", self.home.path())
|
||||
.env("OPENAI_API_KEY", "dummy");
|
||||
.env(CODEX_API_KEY_ENV_VAR, "dummy");
|
||||
cmd
|
||||
}
|
||||
pub fn cmd_with_server(&self, server: &MockServer) -> assert_cmd::Command {
|
||||
|
||||
@@ -76,7 +76,7 @@ async fn chat_mode_stream_cli() {
|
||||
server.verify().await;
|
||||
|
||||
// Verify a new session rollout was created and is discoverable via list_conversations
|
||||
let page = RolloutRecorder::list_conversations(home.path(), 10, None)
|
||||
let page = RolloutRecorder::list_conversations(home.path(), 10, None, &[])
|
||||
.await
|
||||
.expect("list conversations");
|
||||
assert!(
|
||||
|
||||
@@ -17,6 +17,7 @@ use codex_core::built_in_model_providers;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SessionSource;
|
||||
use codex_otel::otel_event_manager::OtelEventManager;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
@@ -538,7 +539,7 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let conversation_manager = ConversationManager::new(auth_manager, SessionSource::Exec);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
..
|
||||
|
||||
@@ -12,12 +12,18 @@ mod fork_conversation;
|
||||
mod json_result;
|
||||
mod live_cli;
|
||||
mod model_overrides;
|
||||
mod model_tools;
|
||||
mod otel;
|
||||
mod prompt_caching;
|
||||
mod read_file;
|
||||
mod review;
|
||||
mod rmcp_client;
|
||||
mod rollout_list_find;
|
||||
mod seatbelt;
|
||||
mod stream_error_allows_next_turn;
|
||||
mod stream_no_completed;
|
||||
mod tool_harness;
|
||||
mod tools;
|
||||
mod unified_exec;
|
||||
mod user_notification;
|
||||
mod view_image;
|
||||
|
||||
131
codex-rs/core/tests/suite/model_tools.rs
Normal file
131
codex-rs/core/tests/suite/model_tools.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
#![allow(clippy::unwrap_used)]
|
||||
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::wait_for_event;
|
||||
use tempfile::TempDir;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
fn sse_completed(id: &str) -> String {
|
||||
load_sse_fixture_with_id("tests/fixtures/completed_template.json", id)
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
fn tool_identifiers(body: &serde_json::Value) -> Vec<String> {
|
||||
body["tools"]
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|tool| {
|
||||
tool.get("name")
|
||||
.and_then(|v| v.as_str())
|
||||
.or_else(|| tool.get("type").and_then(|v| v.as_str()))
|
||||
.map(std::string::ToString::to_string)
|
||||
.expect("tool should have either name or type")
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
async fn collect_tool_identifiers_for_model(model: &str) -> Vec<String> {
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let sse = sse_completed(model);
|
||||
let template = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse, "text/event-stream");
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.respond_with(template)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
let cwd = TempDir::new().unwrap();
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.cwd = cwd.path().to_path_buf();
|
||||
config.model_provider = model_provider;
|
||||
config.model = model.to_string();
|
||||
config.model_family =
|
||||
find_family_for_model(model).unwrap_or_else(|| panic!("unknown model family for {model}"));
|
||||
config.include_plan_tool = false;
|
||||
config.include_apply_patch_tool = false;
|
||||
config.include_view_image_tool = false;
|
||||
config.tools_web_search_request = false;
|
||||
config.use_experimental_streamable_shell_tool = false;
|
||||
config.use_experimental_unified_exec_tool = false;
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation")
|
||||
.conversation;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello tools".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.unwrap();
|
||||
assert_eq!(
|
||||
requests.len(),
|
||||
1,
|
||||
"expected a single request for model {model}"
|
||||
);
|
||||
let body = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
tool_identifiers(&body)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn model_selects_expected_tools() {
|
||||
skip_if_no_network!();
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
let codex_tools = collect_tool_identifiers_for_model("codex-mini-latest").await;
|
||||
assert_eq!(
|
||||
codex_tools,
|
||||
vec!["local_shell".to_string()],
|
||||
"codex-mini-latest should expose the local shell tool",
|
||||
);
|
||||
|
||||
let o3_tools = collect_tool_identifiers_for_model("o3").await;
|
||||
assert_eq!(
|
||||
o3_tools,
|
||||
vec!["shell".to_string()],
|
||||
"o3 should expose the generic shell tool",
|
||||
);
|
||||
|
||||
let gpt5_codex_tools = collect_tool_identifiers_for_model("gpt-5-codex").await;
|
||||
assert_eq!(
|
||||
gpt5_codex_tools,
|
||||
vec!["shell".to_string(), "read_file".to_string()],
|
||||
"gpt-5-codex should expose the beta read_file tool",
|
||||
);
|
||||
}
|
||||
@@ -219,7 +219,13 @@ async fn prompt_tools_are_consistent_across_requests() {
|
||||
|
||||
// our internal implementation is responsible for keeping tools in sync
|
||||
// with the OpenAI schema, so we just verify the tool presence here
|
||||
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch", "view_image"];
|
||||
let expected_tools_names: &[&str] = &[
|
||||
"shell",
|
||||
"update_plan",
|
||||
"apply_patch",
|
||||
"read_file",
|
||||
"view_image",
|
||||
];
|
||||
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body0["instructions"],
|
||||
|
||||
124
codex-rs/core/tests/suite/read_file.rs
Normal file
124
codex-rs/core/tests/suite/read_file.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_function_call;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::TestCodex;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value;
|
||||
use wiremock::matchers::any;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn read_file_tool_returns_requested_lines() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = test_codex().build(&server).await?;
|
||||
|
||||
let file_path = cwd.path().join("sample.txt");
|
||||
std::fs::write(&file_path, "first\nsecond\nthird\nfourth\n")?;
|
||||
let file_path = file_path.to_string_lossy().to_string();
|
||||
|
||||
let call_id = "read-file-call";
|
||||
let arguments = serde_json::json!({
|
||||
"file_path": file_path,
|
||||
"offset": 2,
|
||||
"limit": 2,
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_function_call(call_id, "read_file", &arguments),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please inspect sample.txt".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
assert!(
|
||||
!request_bodies.is_empty(),
|
||||
"expected at least one request body"
|
||||
);
|
||||
|
||||
let tool_output_item = request_bodies
|
||||
.iter()
|
||||
.find_map(|body| {
|
||||
body.get("input")
|
||||
.and_then(Value::as_array)
|
||||
.and_then(|items| {
|
||||
items.iter().find(|item| {
|
||||
item.get("type").and_then(Value::as_str) == Some("function_call_output")
|
||||
})
|
||||
})
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
panic!("function_call_output item not found in requests: {request_bodies:#?}")
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
tool_output_item.get("call_id").and_then(Value::as_str),
|
||||
Some(call_id)
|
||||
);
|
||||
|
||||
let output_text = tool_output_item
|
||||
.get("output")
|
||||
.and_then(|value| match value {
|
||||
Value::String(text) => Some(text.as_str()),
|
||||
Value::Object(obj) => obj.get("content").and_then(Value::as_str),
|
||||
_ => None,
|
||||
})
|
||||
.expect("output text present");
|
||||
assert_eq!(output_text, "L2: second\nL3: third");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,6 +1,11 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsString;
|
||||
use std::fs;
|
||||
use std::net::TcpListener;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use codex_core::config_types::McpServerConfig;
|
||||
use codex_core::config_types::McpServerTransportConfig;
|
||||
@@ -19,6 +24,8 @@ use core_test_support::wait_for_event;
|
||||
use core_test_support::wait_for_event_with_timeout;
|
||||
use escargot::CargoBuild;
|
||||
use serde_json::Value;
|
||||
use serial_test::serial;
|
||||
use tempfile::tempdir;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::process::Child;
|
||||
use tokio::process::Command;
|
||||
@@ -328,6 +335,189 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This test writes to a fallback credentials file in CODEX_HOME.
|
||||
/// Ideally, we wouldn't need to serialize the test but it's much more cumbersome to wire CODEX_HOME through the code.
|
||||
#[serial(codex_home)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
|
||||
let call_id = "call-789";
|
||||
let server_name = "rmcp_http_oauth";
|
||||
let tool_name = format!("{server_name}__echo");
|
||||
|
||||
mount_sse_once_match(
|
||||
&server,
|
||||
any(),
|
||||
responses::sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
responses::ev_function_call(call_id, &tool_name, "{\"message\":\"ping\"}"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
mount_sse_once_match(
|
||||
&server,
|
||||
any(),
|
||||
responses::sse(vec![
|
||||
responses::ev_assistant_message(
|
||||
"msg-1",
|
||||
"rmcp streamable http oauth echo tool completed successfully.",
|
||||
),
|
||||
responses::ev_completed("resp-2"),
|
||||
]),
|
||||
)
|
||||
.await;
|
||||
|
||||
let expected_env_value = "propagated-env-http-oauth";
|
||||
let expected_token = "initial-access-token";
|
||||
let client_id = "test-client-id";
|
||||
let refresh_token = "initial-refresh-token";
|
||||
let rmcp_http_server_bin = CargoBuild::new()
|
||||
.package("codex-rmcp-client")
|
||||
.bin("test_streamable_http_server")
|
||||
.run()?
|
||||
.path()
|
||||
.to_string_lossy()
|
||||
.into_owned();
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0")?;
|
||||
let port = listener.local_addr()?.port();
|
||||
drop(listener);
|
||||
let bind_addr = format!("127.0.0.1:{port}");
|
||||
let server_url = format!("http://{bind_addr}/mcp");
|
||||
|
||||
let mut http_server_child = Command::new(&rmcp_http_server_bin)
|
||||
.kill_on_drop(true)
|
||||
.env("MCP_STREAMABLE_HTTP_BIND_ADDR", &bind_addr)
|
||||
.env("MCP_EXPECT_BEARER", expected_token)
|
||||
.env("MCP_TEST_VALUE", expected_env_value)
|
||||
.spawn()?;
|
||||
|
||||
wait_for_streamable_http_server(&mut http_server_child, &bind_addr, Duration::from_secs(5))
|
||||
.await?;
|
||||
|
||||
let temp_home = tempdir()?;
|
||||
let _guard = EnvVarGuard::set("CODEX_HOME", temp_home.path().as_os_str());
|
||||
write_fallback_oauth_tokens(
|
||||
temp_home.path(),
|
||||
server_name,
|
||||
&server_url,
|
||||
client_id,
|
||||
expected_token,
|
||||
refresh_token,
|
||||
)?;
|
||||
|
||||
let fixture = test_codex()
|
||||
.with_config(move |config| {
|
||||
config.use_experimental_use_rmcp_client = true;
|
||||
config.mcp_servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::StreamableHttp {
|
||||
url: server_url,
|
||||
bearer_token: None,
|
||||
},
|
||||
startup_timeout_sec: Some(Duration::from_secs(10)),
|
||||
tool_timeout_sec: None,
|
||||
},
|
||||
);
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "call the rmcp streamable http oauth echo tool".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: fixture.cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let begin_event = wait_for_event_with_timeout(
|
||||
&fixture.codex,
|
||||
|ev| matches!(ev, EventMsg::McpToolCallBegin(_)),
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.await;
|
||||
|
||||
let EventMsg::McpToolCallBegin(begin) = begin_event else {
|
||||
unreachable!("event guard guarantees McpToolCallBegin");
|
||||
};
|
||||
assert_eq!(begin.invocation.server, server_name);
|
||||
assert_eq!(begin.invocation.tool, "echo");
|
||||
|
||||
let end_event = wait_for_event(&fixture.codex, |ev| {
|
||||
matches!(ev, EventMsg::McpToolCallEnd(_))
|
||||
})
|
||||
.await;
|
||||
let EventMsg::McpToolCallEnd(end) = end_event else {
|
||||
unreachable!("event guard guarantees McpToolCallEnd");
|
||||
};
|
||||
|
||||
let result = end
|
||||
.result
|
||||
.as_ref()
|
||||
.expect("rmcp echo tool should return success");
|
||||
assert_eq!(result.is_error, Some(false));
|
||||
assert!(
|
||||
result.content.is_empty(),
|
||||
"content should default to an empty array"
|
||||
);
|
||||
|
||||
let structured = result
|
||||
.structured_content
|
||||
.as_ref()
|
||||
.expect("structured content");
|
||||
let Value::Object(map) = structured else {
|
||||
panic!("structured content should be an object: {structured:?}");
|
||||
};
|
||||
let echo_value = map
|
||||
.get("echo")
|
||||
.and_then(Value::as_str)
|
||||
.expect("echo payload present");
|
||||
assert_eq!(echo_value, "ECHOING: ping");
|
||||
let env_value = map
|
||||
.get("env")
|
||||
.and_then(Value::as_str)
|
||||
.expect("env snapshot inserted");
|
||||
assert_eq!(env_value, expected_env_value);
|
||||
|
||||
wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
|
||||
server.verify().await;
|
||||
|
||||
match http_server_child.try_wait() {
|
||||
Ok(Some(_)) => {}
|
||||
Ok(None) => {
|
||||
let _ = http_server_child.kill().await;
|
||||
}
|
||||
Err(error) => {
|
||||
eprintln!("failed to check streamable http oauth server status: {error}");
|
||||
let _ = http_server_child.kill().await;
|
||||
}
|
||||
}
|
||||
if let Err(error) = http_server_child.wait().await {
|
||||
eprintln!("failed to await streamable http oauth server shutdown: {error}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn wait_for_streamable_http_server(
|
||||
server_child: &mut Child,
|
||||
address: &str,
|
||||
@@ -369,3 +559,60 @@ async fn wait_for_streamable_http_server(
|
||||
sleep(Duration::from_millis(50)).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn write_fallback_oauth_tokens(
|
||||
home: &Path,
|
||||
server_name: &str,
|
||||
server_url: &str,
|
||||
client_id: &str,
|
||||
access_token: &str,
|
||||
refresh_token: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let expires_at = SystemTime::now()
|
||||
.checked_add(Duration::from_secs(3600))
|
||||
.ok_or_else(|| anyhow::anyhow!("failed to compute expiry time"))?
|
||||
.duration_since(UNIX_EPOCH)?
|
||||
.as_millis() as u64;
|
||||
|
||||
let store = serde_json::json!({
|
||||
"stub": {
|
||||
"server_name": server_name,
|
||||
"server_url": server_url,
|
||||
"client_id": client_id,
|
||||
"access_token": access_token,
|
||||
"expires_at": expires_at,
|
||||
"refresh_token": refresh_token,
|
||||
"scopes": ["profile"],
|
||||
}
|
||||
});
|
||||
|
||||
let file_path = home.join(".credentials.json");
|
||||
fs::write(&file_path, serde_json::to_vec(&store)?)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct EnvVarGuard {
|
||||
key: &'static str,
|
||||
original: Option<OsString>,
|
||||
}
|
||||
|
||||
impl EnvVarGuard {
|
||||
fn set(key: &'static str, value: &std::ffi::OsStr) -> Self {
|
||||
let original = std::env::var_os(key);
|
||||
unsafe {
|
||||
std::env::set_var(key, value);
|
||||
}
|
||||
Self { key, original }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EnvVarGuard {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
match &self.original {
|
||||
Some(value) => std::env::set_var(self.key, value),
|
||||
None => std::env::remove_var(self.key),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
568
codex-rs/core/tests/suite/tool_harness.rs
Normal file
568
codex-rs/core/tests/suite/tool_harness.rs
Normal file
@@ -0,0 +1,568 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::ev_apply_patch_function_call;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_function_call;
|
||||
use core_test_support::responses::ev_local_shell_call;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::TestCodex;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use wiremock::matchers::any;
|
||||
|
||||
fn function_call_output(body: &Value) -> Option<&Value> {
|
||||
body.get("input")
|
||||
.and_then(Value::as_array)
|
||||
.and_then(|items| {
|
||||
items.iter().find(|item| {
|
||||
item.get("type").and_then(Value::as_str) == Some("function_call_output")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn extract_output_text(item: &Value) -> Option<&str> {
|
||||
item.get("output").and_then(|value| match value {
|
||||
Value::String(text) => Some(text.as_str()),
|
||||
Value::Object(obj) => obj.get("content").and_then(Value::as_str),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
fn find_request_with_function_call_output(requests: &[Value]) -> Option<&Value> {
|
||||
requests
|
||||
.iter()
|
||||
.find(|body| function_call_output(body).is_some())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_tool_executes_command_and_streams_output() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.include_apply_patch_tool = true;
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-tool-call";
|
||||
let command = vec!["/bin/echo", "tool harness"];
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_local_shell_call(call_id, "completed", command),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "all done"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please run the shell command".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
if matches!(event.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
let exec_output: Value = serde_json::from_str(output_text)?;
|
||||
assert_eq!(exec_output["metadata"]["exit_code"], 0);
|
||||
let stdout = exec_output["output"].as_str().expect("stdout field");
|
||||
assert!(
|
||||
stdout.contains("tool harness"),
|
||||
"expected stdout to contain command output, got {stdout:?}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn update_plan_tool_emits_plan_update_event() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.include_plan_tool = true;
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let call_id = "plan-tool-call";
|
||||
let plan_args = json!({
|
||||
"explanation": "Tool harness check",
|
||||
"plan": [
|
||||
{"step": "Inspect workspace", "status": "in_progress"},
|
||||
{"step": "Report results", "status": "pending"},
|
||||
],
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_function_call(call_id, "update_plan", &plan_args),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "plan acknowledged"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please update the plan".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut saw_plan_update = false;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
match event.msg {
|
||||
EventMsg::PlanUpdate(update) => {
|
||||
saw_plan_update = true;
|
||||
assert_eq!(update.explanation.as_deref(), Some("Tool harness check"));
|
||||
assert_eq!(update.plan.len(), 2);
|
||||
assert_eq!(update.plan[0].step, "Inspect workspace");
|
||||
assert!(matches!(update.plan[0].status, StepStatus::InProgress));
|
||||
assert_eq!(update.plan[1].step, "Report results");
|
||||
assert!(matches!(update.plan[1].status, StepStatus::Pending));
|
||||
}
|
||||
EventMsg::TaskComplete(_) => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(saw_plan_update, "expected PlanUpdate event");
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
assert_eq!(
|
||||
output_item.get("call_id").and_then(Value::as_str),
|
||||
Some(call_id)
|
||||
);
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
assert_eq!(output_text, "Plan updated");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn update_plan_tool_rejects_malformed_payload() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.include_plan_tool = true;
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let call_id = "plan-tool-invalid";
|
||||
let invalid_args = json!({
|
||||
"explanation": "Missing plan data"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_function_call(call_id, "update_plan", &invalid_args),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "malformed plan payload"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please update the plan".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut saw_plan_update = false;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
match event.msg {
|
||||
EventMsg::PlanUpdate(_) => saw_plan_update = true,
|
||||
EventMsg::TaskComplete(_) => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
!saw_plan_update,
|
||||
"did not expect PlanUpdate event for malformed payload"
|
||||
);
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
assert_eq!(
|
||||
output_item.get("call_id").and_then(Value::as_str),
|
||||
Some(call_id)
|
||||
);
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
assert!(
|
||||
output_text.contains("failed to parse function arguments"),
|
||||
"expected parse error message in output text, got {output_text:?}"
|
||||
);
|
||||
if let Some(success_flag) = output_item
|
||||
.get("output")
|
||||
.and_then(|value| value.as_object())
|
||||
.and_then(|obj| obj.get("success"))
|
||||
.and_then(serde_json::Value::as_bool)
|
||||
{
|
||||
assert!(
|
||||
!success_flag,
|
||||
"expected tool output to mark success=false for malformed payload"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn apply_patch_tool_executes_and_emits_patch_events() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.include_apply_patch_tool = true;
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let call_id = "apply-patch-call";
|
||||
let patch_content = r#"*** Begin Patch
|
||||
*** Add File: notes.txt
|
||||
+Tool harness apply patch
|
||||
*** End Patch"#;
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_apply_patch_function_call(call_id, patch_content),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "patch complete"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please apply a patch".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut saw_patch_begin = false;
|
||||
let mut patch_end_success = None;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
match event.msg {
|
||||
EventMsg::PatchApplyBegin(begin) => {
|
||||
saw_patch_begin = true;
|
||||
assert_eq!(begin.call_id, call_id);
|
||||
}
|
||||
EventMsg::PatchApplyEnd(end) => {
|
||||
assert_eq!(end.call_id, call_id);
|
||||
patch_end_success = Some(end.success);
|
||||
}
|
||||
EventMsg::TaskComplete(_) => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(saw_patch_begin, "expected PatchApplyBegin event");
|
||||
let patch_end_success =
|
||||
patch_end_success.expect("expected PatchApplyEnd event to capture success flag");
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
assert_eq!(
|
||||
output_item.get("call_id").and_then(Value::as_str),
|
||||
Some(call_id)
|
||||
);
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
|
||||
if let Ok(exec_output) = serde_json::from_str::<Value>(output_text) {
|
||||
let exit_code = exec_output["metadata"]["exit_code"]
|
||||
.as_i64()
|
||||
.expect("exit_code present");
|
||||
let summary = exec_output["output"].as_str().expect("output field");
|
||||
assert_eq!(
|
||||
exit_code, 0,
|
||||
"expected apply_patch exit_code=0, got {exit_code}, summary: {summary:?}"
|
||||
);
|
||||
assert!(
|
||||
patch_end_success,
|
||||
"expected PatchApplyEnd success flag, summary: {summary:?}"
|
||||
);
|
||||
assert!(
|
||||
summary.contains("Success."),
|
||||
"expected apply_patch summary to note success, got {summary:?}"
|
||||
);
|
||||
|
||||
let patched_path = cwd.path().join("notes.txt");
|
||||
let contents = std::fs::read_to_string(&patched_path)
|
||||
.unwrap_or_else(|e| panic!("failed reading {}: {e}", patched_path.display()));
|
||||
assert_eq!(contents, "Tool harness apply patch\n");
|
||||
} else {
|
||||
assert!(
|
||||
output_text.contains("codex-run-as-apply-patch"),
|
||||
"expected apply_patch failure message to mention codex-run-as-apply-patch, got {output_text:?}"
|
||||
);
|
||||
assert!(
|
||||
!patch_end_success,
|
||||
"expected PatchApplyEnd to report success=false when apply_patch invocation fails"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn apply_patch_reports_parse_diagnostics() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.include_apply_patch_tool = true;
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let call_id = "apply-patch-parse-error";
|
||||
let patch_content = r"*** Begin Patch
|
||||
*** Update File: broken.txt
|
||||
*** End Patch";
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_apply_patch_function_call(call_id, patch_content),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "failed"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please apply a patch".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
if matches!(event.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
assert_eq!(
|
||||
output_item.get("call_id").and_then(Value::as_str),
|
||||
Some(call_id)
|
||||
);
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
|
||||
assert!(
|
||||
output_text.contains("apply_patch verification failed"),
|
||||
"expected apply_patch verification failure message, got {output_text:?}"
|
||||
);
|
||||
assert!(
|
||||
output_text.contains("invalid hunk"),
|
||||
"expected parse diagnostics in output text, got {output_text:?}"
|
||||
);
|
||||
|
||||
if let Some(success_flag) = output_item
|
||||
.get("output")
|
||||
.and_then(|value| value.as_object())
|
||||
.and_then(|obj| obj.get("success"))
|
||||
.and_then(serde_json::Value::as_bool)
|
||||
{
|
||||
assert!(
|
||||
!success_flag,
|
||||
"expected tool output to mark success=false for parse failures"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
460
codex-rs/core/tests/suite/tools.rs
Normal file
460
codex-rs/core/tests/suite/tools.rs
Normal file
@@ -0,0 +1,460 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_custom_tool_call;
|
||||
use core_test_support::responses::ev_function_call;
|
||||
use core_test_support::responses::mount_sse_sequence;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::TestCodex;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use wiremock::Request;
|
||||
|
||||
async fn submit_turn(
|
||||
test: &TestCodex,
|
||||
prompt: &str,
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
) -> Result<()> {
|
||||
let session_model = test.session_configured.model.clone();
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: prompt.into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: test.cwd.path().to_path_buf(),
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
let event = test.codex.next_event().await?;
|
||||
if matches!(event.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn request_bodies(requests: &[Request]) -> Result<Vec<Value>> {
|
||||
requests
|
||||
.iter()
|
||||
.map(|req| Ok(serde_json::from_slice::<Value>(&req.body)?))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn collect_output_items<'a>(bodies: &'a [Value], ty: &str) -> Vec<&'a Value> {
|
||||
let mut out = Vec::new();
|
||||
for body in bodies {
|
||||
if let Some(items) = body.get("input").and_then(Value::as_array) {
|
||||
for item in items {
|
||||
if item.get("type").and_then(Value::as_str) == Some(ty) {
|
||||
out.push(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn tool_names(body: &Value) -> Vec<String> {
|
||||
body.get("tools")
|
||||
.and_then(Value::as_array)
|
||||
.map(|tools| {
|
||||
tools
|
||||
.iter()
|
||||
.filter_map(|tool| {
|
||||
tool.get("name")
|
||||
.or_else(|| tool.get("type"))
|
||||
.and_then(Value::as_str)
|
||||
.map(str::to_string)
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn custom_tool_unknown_returns_custom_output_error() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex();
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "custom-unsupported";
|
||||
let tool_name = "unsupported_tool";
|
||||
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_custom_tool_call(call_id, tool_name, "\"payload\""),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
submit_turn(
|
||||
&test,
|
||||
"invoke custom tool",
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
let bodies = request_bodies(&requests)?;
|
||||
let custom_items = collect_output_items(&bodies, "custom_tool_call_output");
|
||||
assert_eq!(custom_items.len(), 1, "expected single custom tool output");
|
||||
let item = custom_items[0];
|
||||
assert_eq!(item.get("call_id").and_then(Value::as_str), Some(call_id));
|
||||
|
||||
let output = item
|
||||
.get("output")
|
||||
.and_then(Value::as_str)
|
||||
.unwrap_or_default();
|
||||
let expected = format!("unsupported custom tool call: {tool_name}");
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_escalated_permissions_rejected_then_ok() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex();
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let command = ["/bin/echo", "shell ok"];
|
||||
let call_id_blocked = "shell-blocked";
|
||||
let call_id_success = "shell-success";
|
||||
|
||||
let first_args = json!({
|
||||
"command": command,
|
||||
"timeout_ms": 1_000,
|
||||
"with_escalated_permissions": true,
|
||||
});
|
||||
let second_args = json!({
|
||||
"command": command,
|
||||
"timeout_ms": 1_000,
|
||||
});
|
||||
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call(
|
||||
call_id_blocked,
|
||||
"shell",
|
||||
&serde_json::to_string(&first_args)?,
|
||||
),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-2"}}),
|
||||
ev_function_call(
|
||||
call_id_success,
|
||||
"shell",
|
||||
&serde_json::to_string(&second_args)?,
|
||||
),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-3"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
submit_turn(
|
||||
&test,
|
||||
"run the shell command",
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
let bodies = request_bodies(&requests)?;
|
||||
let function_outputs = collect_output_items(&bodies, "function_call_output");
|
||||
for item in &function_outputs {
|
||||
let call_id = item
|
||||
.get("call_id")
|
||||
.and_then(Value::as_str)
|
||||
.unwrap_or_default();
|
||||
assert!(
|
||||
call_id == call_id_blocked || call_id == call_id_success,
|
||||
"unexpected call id {call_id}"
|
||||
);
|
||||
}
|
||||
|
||||
let policy = AskForApproval::Never;
|
||||
let expected_message = format!(
|
||||
"approval policy is {policy:?}; reject command — you should not ask for escalated permissions if the approval policy is {policy:?}"
|
||||
);
|
||||
|
||||
let blocked_outputs: Vec<&Value> = function_outputs
|
||||
.iter()
|
||||
.filter(|item| item.get("call_id").and_then(Value::as_str) == Some(call_id_blocked))
|
||||
.copied()
|
||||
.collect();
|
||||
assert!(
|
||||
!blocked_outputs.is_empty(),
|
||||
"expected at least one rejection output for {call_id_blocked}"
|
||||
);
|
||||
for item in blocked_outputs {
|
||||
assert_eq!(
|
||||
item.get("output").and_then(Value::as_str),
|
||||
Some(expected_message.as_str()),
|
||||
"unexpected rejection message"
|
||||
);
|
||||
}
|
||||
|
||||
let success_item = function_outputs
|
||||
.iter()
|
||||
.find(|item| item.get("call_id").and_then(Value::as_str) == Some(call_id_success))
|
||||
.expect("success output present");
|
||||
let output_json: Value = serde_json::from_str(
|
||||
success_item
|
||||
.get("output")
|
||||
.and_then(Value::as_str)
|
||||
.expect("success output string"),
|
||||
)?;
|
||||
assert_eq!(
|
||||
output_json["metadata"]["exit_code"].as_i64(),
|
||||
Some(0),
|
||||
"expected exit code 0 after rerunning without escalation",
|
||||
);
|
||||
let stdout = output_json["output"].as_str().unwrap_or_default();
|
||||
assert!(
|
||||
stdout.contains("shell ok"),
|
||||
"expected stdout to include command output, got {stdout:?}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn local_shell_missing_ids_maps_to_function_output_error() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex();
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let local_shell_event = json!({
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "local_shell_call",
|
||||
"status": "completed",
|
||||
"action": {
|
||||
"type": "exec",
|
||||
"command": ["/bin/echo", "hi"],
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
local_shell_event,
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
submit_turn(
|
||||
&test,
|
||||
"check shell output",
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
let bodies = request_bodies(&requests)?;
|
||||
let function_outputs = collect_output_items(&bodies, "function_call_output");
|
||||
assert_eq!(
|
||||
function_outputs.len(),
|
||||
1,
|
||||
"expected a single function output"
|
||||
);
|
||||
let item = function_outputs[0];
|
||||
assert_eq!(item.get("call_id").and_then(Value::as_str), Some(""));
|
||||
assert_eq!(
|
||||
item.get("output").and_then(Value::as_str),
|
||||
Some("LocalShellCall without call_id or id"),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn collect_tools(use_unified_exec: bool) -> Result<Vec<String>> {
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let responses = vec![sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-1"),
|
||||
])];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.use_experimental_unified_exec_tool = use_unified_exec;
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
submit_turn(
|
||||
&test,
|
||||
"list tools",
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert_eq!(
|
||||
requests.len(),
|
||||
1,
|
||||
"expected a single request for tools collection"
|
||||
);
|
||||
let bodies = request_bodies(&requests)?;
|
||||
let first_body = bodies.first().expect("request body present");
|
||||
Ok(tool_names(first_body))
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_spec_toggle_end_to_end() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let tools_disabled = collect_tools(false).await?;
|
||||
assert!(
|
||||
!tools_disabled.iter().any(|name| name == "unified_exec"),
|
||||
"tools list should not include unified_exec when disabled: {tools_disabled:?}"
|
||||
);
|
||||
|
||||
let tools_enabled = collect_tools(true).await?;
|
||||
assert!(
|
||||
tools_enabled.iter().any(|name| name == "unified_exec"),
|
||||
"tools list should include unified_exec when enabled: {tools_enabled:?}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn shell_timeout_includes_timeout_prefix_and_metadata() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let mut builder = test_codex();
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
let call_id = "shell-timeout";
|
||||
let timeout_ms = 50u64;
|
||||
let args = json!({
|
||||
"command": ["/bin/sh", "-c", "yes line | head -n 400; sleep 1"],
|
||||
"timeout_ms": timeout_ms,
|
||||
});
|
||||
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call(call_id, "shell", &serde_json::to_string(&args)?),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
submit_turn(
|
||||
&test,
|
||||
"run a long command",
|
||||
AskForApproval::Never,
|
||||
SandboxPolicy::DangerFullAccess,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
let bodies = request_bodies(&requests)?;
|
||||
let function_outputs = collect_output_items(&bodies, "function_call_output");
|
||||
let timeout_item = function_outputs
|
||||
.iter()
|
||||
.find(|item| item.get("call_id").and_then(Value::as_str) == Some(call_id))
|
||||
.expect("timeout output present");
|
||||
|
||||
let output_str = timeout_item
|
||||
.get("output")
|
||||
.and_then(Value::as_str)
|
||||
.expect("timeout output string");
|
||||
|
||||
// The exec path can report a timeout in two ways depending on timing:
|
||||
// 1) Structured JSON with exit_code 124 and a timeout prefix (preferred), or
|
||||
// 2) A plain error string if the child is observed as killed by a signal first.
|
||||
if let Ok(output_json) = serde_json::from_str::<Value>(output_str) {
|
||||
assert_eq!(
|
||||
output_json["metadata"]["exit_code"].as_i64(),
|
||||
Some(124),
|
||||
"expected timeout exit code 124",
|
||||
);
|
||||
|
||||
let stdout = output_json["output"].as_str().unwrap_or_default();
|
||||
assert!(
|
||||
stdout.starts_with("command timed out after "),
|
||||
"expected timeout prefix, got {stdout:?}"
|
||||
);
|
||||
let first_line = stdout.lines().next().unwrap_or_default();
|
||||
let duration_ms = first_line
|
||||
.strip_prefix("command timed out after ")
|
||||
.and_then(|line| line.strip_suffix(" milliseconds"))
|
||||
.and_then(|value| value.parse::<u64>().ok())
|
||||
.unwrap_or_default();
|
||||
assert!(
|
||||
duration_ms >= timeout_ms,
|
||||
"expected duration >= configured timeout, got {duration_ms} (timeout {timeout_ms})"
|
||||
);
|
||||
} else {
|
||||
// Fallback: accept the signal classification path to deflake the test.
|
||||
assert!(
|
||||
output_str.contains("execution error"),
|
||||
"unexpected non-JSON output: {output_str:?}"
|
||||
);
|
||||
assert!(
|
||||
output_str.contains("Signal(") || output_str.to_lowercase().contains("signal"),
|
||||
"expected signal classification in error output, got {output_str:?}"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
280
codex-rs/core/tests/suite/unified_exec.rs
Normal file
280
codex-rs/core/tests/suite/unified_exec.rs
Normal file
@@ -0,0 +1,280 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Result;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_function_call;
|
||||
use core_test_support::responses::mount_sse_sequence;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::skip_if_sandbox;
|
||||
use core_test_support::test_codex::TestCodex;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use serde_json::Value;
|
||||
|
||||
fn extract_output_text(item: &Value) -> Option<&str> {
|
||||
item.get("output").and_then(|value| match value {
|
||||
Value::String(text) => Some(text.as_str()),
|
||||
Value::Object(obj) => obj.get("content").and_then(Value::as_str),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
fn collect_tool_outputs(bodies: &[Value]) -> Result<HashMap<String, Value>> {
|
||||
let mut outputs = HashMap::new();
|
||||
for body in bodies {
|
||||
if let Some(items) = body.get("input").and_then(Value::as_array) {
|
||||
for item in items {
|
||||
if item.get("type").and_then(Value::as_str) != Some("function_call_output") {
|
||||
continue;
|
||||
}
|
||||
if let Some(call_id) = item.get("call_id").and_then(Value::as_str) {
|
||||
let content = extract_output_text(item)
|
||||
.ok_or_else(|| anyhow::anyhow!("missing tool output content"))?;
|
||||
let parsed: Value = serde_json::from_str(content)?;
|
||||
outputs.insert(call_id.to_string(), parsed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_reuses_session_via_stdin() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.use_experimental_unified_exec_tool = true;
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let first_call_id = "uexec-start";
|
||||
let first_args = serde_json::json!({
|
||||
"input": ["/bin/cat"],
|
||||
"timeout_ms": 200,
|
||||
});
|
||||
|
||||
let second_call_id = "uexec-stdin";
|
||||
let second_args = serde_json::json!({
|
||||
"input": ["hello unified exec\n"],
|
||||
"session_id": "0",
|
||||
"timeout_ms": 500,
|
||||
});
|
||||
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
serde_json::json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call(
|
||||
first_call_id,
|
||||
"unified_exec",
|
||||
&serde_json::to_string(&first_args)?,
|
||||
),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
serde_json::json!({"type": "response.created", "response": {"id": "resp-2"}}),
|
||||
ev_function_call(
|
||||
second_call_id,
|
||||
"unified_exec",
|
||||
&serde_json::to_string(&second_args)?,
|
||||
),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "all done"),
|
||||
ev_completed("resp-3"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "run unified exec".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
if matches!(event.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let outputs = collect_tool_outputs(&bodies)?;
|
||||
|
||||
let start_output = outputs
|
||||
.get(first_call_id)
|
||||
.expect("missing first unified_exec output");
|
||||
let session_id = start_output["session_id"].as_str().unwrap_or_default();
|
||||
assert!(
|
||||
!session_id.is_empty(),
|
||||
"expected session id in first unified_exec response"
|
||||
);
|
||||
assert!(
|
||||
start_output["output"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.is_empty()
|
||||
);
|
||||
|
||||
let reuse_output = outputs
|
||||
.get(second_call_id)
|
||||
.expect("missing reused unified_exec output");
|
||||
assert_eq!(
|
||||
reuse_output["session_id"].as_str().unwrap_or_default(),
|
||||
session_id
|
||||
);
|
||||
let echoed = reuse_output["output"].as_str().unwrap_or_default();
|
||||
assert!(
|
||||
echoed.contains("hello unified exec"),
|
||||
"expected echoed output, got {echoed:?}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn unified_exec_timeout_and_followup_poll() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
skip_if_sandbox!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.use_experimental_unified_exec_tool = true;
|
||||
});
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = builder.build(&server).await?;
|
||||
|
||||
let first_call_id = "uexec-timeout";
|
||||
let first_args = serde_json::json!({
|
||||
"input": ["/bin/sh", "-c", "sleep 0.1; echo ready"],
|
||||
"timeout_ms": 10,
|
||||
});
|
||||
|
||||
let second_call_id = "uexec-poll";
|
||||
let second_args = serde_json::json!({
|
||||
"input": Vec::<String>::new(),
|
||||
"session_id": "0",
|
||||
"timeout_ms": 800,
|
||||
});
|
||||
|
||||
let responses = vec![
|
||||
sse(vec![
|
||||
serde_json::json!({"type": "response.created", "response": {"id": "resp-1"}}),
|
||||
ev_function_call(
|
||||
first_call_id,
|
||||
"unified_exec",
|
||||
&serde_json::to_string(&first_args)?,
|
||||
),
|
||||
ev_completed("resp-1"),
|
||||
]),
|
||||
sse(vec![
|
||||
serde_json::json!({"type": "response.created", "response": {"id": "resp-2"}}),
|
||||
ev_function_call(
|
||||
second_call_id,
|
||||
"unified_exec",
|
||||
&serde_json::to_string(&second_args)?,
|
||||
),
|
||||
ev_completed("resp-2"),
|
||||
]),
|
||||
sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-3"),
|
||||
]),
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "check timeout".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
if matches!(event.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(!requests.is_empty(), "expected at least one POST request");
|
||||
|
||||
let bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let outputs = collect_tool_outputs(&bodies)?;
|
||||
|
||||
let first_output = outputs.get(first_call_id).expect("missing timeout output");
|
||||
assert_eq!(first_output["session_id"], "0");
|
||||
assert!(
|
||||
first_output["output"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.is_empty()
|
||||
);
|
||||
|
||||
let poll_output = outputs.get(second_call_id).expect("missing poll output");
|
||||
let output_text = poll_output["output"].as_str().unwrap_or_default();
|
||||
assert!(
|
||||
output_text.contains("ready"),
|
||||
"expected ready output, got {output_text:?}"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
351
codex-rs/core/tests/suite/view_image.rs
Normal file
351
codex-rs/core/tests/suite/view_image.rs
Normal file
@@ -0,0 +1,351 @@
|
||||
#![cfg(not(target_os = "windows"))]
|
||||
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_function_call;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::TestCodex;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use serde_json::Value;
|
||||
use wiremock::matchers::any;
|
||||
|
||||
fn function_call_output(body: &Value) -> Option<&Value> {
|
||||
body.get("input")
|
||||
.and_then(Value::as_array)
|
||||
.and_then(|items| {
|
||||
items.iter().find(|item| {
|
||||
item.get("type").and_then(Value::as_str) == Some("function_call_output")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn find_image_message(body: &Value) -> Option<&Value> {
|
||||
body.get("input")
|
||||
.and_then(Value::as_array)
|
||||
.and_then(|items| {
|
||||
items.iter().find(|item| {
|
||||
item.get("type").and_then(Value::as_str) == Some("message")
|
||||
&& item
|
||||
.get("content")
|
||||
.and_then(Value::as_array)
|
||||
.map(|content| {
|
||||
content.iter().any(|span| {
|
||||
span.get("type").and_then(Value::as_str) == Some("input_image")
|
||||
})
|
||||
})
|
||||
.unwrap_or(false)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn extract_output_text(item: &Value) -> Option<&str> {
|
||||
item.get("output").and_then(|value| match value {
|
||||
Value::String(text) => Some(text.as_str()),
|
||||
Value::Object(obj) => obj.get("content").and_then(Value::as_str),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
fn find_request_with_function_call_output(requests: &[Value]) -> Option<&Value> {
|
||||
requests
|
||||
.iter()
|
||||
.find(|body| function_call_output(body).is_some())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = test_codex().build(&server).await?;
|
||||
|
||||
let rel_path = "assets/example.png";
|
||||
let abs_path = cwd.path().join(rel_path);
|
||||
if let Some(parent) = abs_path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
let image_bytes = b"fake_png_bytes".to_vec();
|
||||
std::fs::write(&abs_path, &image_bytes)?;
|
||||
|
||||
let call_id = "view-image-call";
|
||||
let arguments = serde_json::json!({ "path": rel_path }).to_string();
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_function_call(call_id, "view_image", &arguments),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please add the screenshot".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut tool_event = None;
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
match event.msg {
|
||||
EventMsg::ViewImageToolCall(ev) => tool_event = Some(ev),
|
||||
EventMsg::TaskComplete(_) => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let tool_event = tool_event.expect("view image tool event emitted");
|
||||
assert_eq!(tool_event.call_id, call_id);
|
||||
assert_eq!(tool_event.path, abs_path);
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(
|
||||
requests.len() >= 2,
|
||||
"expected at least two POST requests, got {}",
|
||||
requests.len()
|
||||
);
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
assert_eq!(output_text, "attached local image path");
|
||||
|
||||
let image_message = find_image_message(body_with_tool_output)
|
||||
.expect("pending input image message not included in request");
|
||||
let image_url = image_message
|
||||
.get("content")
|
||||
.and_then(Value::as_array)
|
||||
.and_then(|content| {
|
||||
content.iter().find_map(|span| {
|
||||
if span.get("type").and_then(Value::as_str) == Some("input_image") {
|
||||
span.get("image_url").and_then(Value::as_str)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.expect("image_url present");
|
||||
|
||||
let expected_image_url = format!(
|
||||
"data:image/png;base64,{}",
|
||||
BASE64_STANDARD.encode(&image_bytes)
|
||||
);
|
||||
assert_eq!(image_url, expected_image_url);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = test_codex().build(&server).await?;
|
||||
|
||||
let rel_path = "assets";
|
||||
let abs_path = cwd.path().join(rel_path);
|
||||
std::fs::create_dir_all(&abs_path)?;
|
||||
|
||||
let call_id = "view-image-directory";
|
||||
let arguments = serde_json::json!({ "path": rel_path }).to_string();
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_function_call(call_id, "view_image", &arguments),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please attach the folder".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
if matches!(event.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(
|
||||
requests.len() >= 2,
|
||||
"expected at least two POST requests, got {}",
|
||||
requests.len()
|
||||
);
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
let expected_message = format!("image path `{}` is not a file", abs_path.display());
|
||||
assert_eq!(output_text, expected_message);
|
||||
|
||||
assert!(
|
||||
find_image_message(body_with_tool_output).is_none(),
|
||||
"directory path should not produce an input_image message"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
..
|
||||
} = test_codex().build(&server).await?;
|
||||
|
||||
let rel_path = "missing/example.png";
|
||||
let abs_path = cwd.path().join(rel_path);
|
||||
|
||||
let call_id = "view-image-missing";
|
||||
let arguments = serde_json::json!({ "path": rel_path }).to_string();
|
||||
|
||||
let first_response = sse(vec![
|
||||
serde_json::json!({
|
||||
"type": "response.created",
|
||||
"response": {"id": "resp-1"}
|
||||
}),
|
||||
ev_function_call(call_id, "view_image", &arguments),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), first_response).await;
|
||||
|
||||
let second_response = sse(vec![
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
]);
|
||||
responses::mount_sse_once_match(&server, any(), second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![InputItem::Text {
|
||||
text: "please attach the missing image".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
loop {
|
||||
let event = codex.next_event().await.expect("event");
|
||||
if matches!(event.msg, EventMsg::TaskComplete(_)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let requests = server.received_requests().await.expect("recorded requests");
|
||||
assert!(
|
||||
requests.len() >= 2,
|
||||
"expected at least two POST requests, got {}",
|
||||
requests.len()
|
||||
);
|
||||
let request_bodies = requests
|
||||
.iter()
|
||||
.map(|req| req.body_json::<Value>().expect("request json"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let body_with_tool_output = find_request_with_function_call_output(&request_bodies)
|
||||
.expect("function_call_output item not found in requests");
|
||||
let output_item = function_call_output(body_with_tool_output).expect("tool output item");
|
||||
let output_text = extract_output_text(output_item).expect("output text present");
|
||||
let expected_prefix = format!("unable to locate image at `{}`:", abs_path.display());
|
||||
assert!(
|
||||
output_text.starts_with(&expected_prefix),
|
||||
"expected error to start with `{expected_prefix}` but got `{output_text}`"
|
||||
);
|
||||
|
||||
assert!(
|
||||
find_image_message(body_with_tool_output).is_none(),
|
||||
"missing file should not produce an input_image message"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -16,7 +16,6 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-common = { workspace = true, features = [
|
||||
@@ -27,10 +26,12 @@ codex-common = { workspace = true, features = [
|
||||
codex-core = { workspace = true }
|
||||
codex-ollama = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
opentelemetry-appender-tracing = { workspace = true }
|
||||
owo-colors = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
supports-color = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
@@ -40,7 +41,6 @@ tokio = { workspace = true, features = [
|
||||
] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
opentelemetry-appender-tracing = { workspace = true }
|
||||
ts-rs = { workspace = true, features = [
|
||||
"uuid-impl",
|
||||
"serde-json-impl",
|
||||
@@ -52,10 +52,10 @@ ts-rs = { workspace = true, features = [
|
||||
assert_cmd = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
walkdir = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
|
||||
@@ -72,7 +72,7 @@ pub struct Cli {
|
||||
pub include_plan_tool: bool,
|
||||
|
||||
/// Specifies file where the last message from the agent should be written.
|
||||
#[arg(long = "output-last-message")]
|
||||
#[arg(long = "output-last-message", short = 'o', value_name = "FILE")]
|
||||
pub last_message_file: Option<PathBuf>,
|
||||
|
||||
/// Initial instructions for the agent. If not provided as an argument (or
|
||||
|
||||
@@ -21,6 +21,8 @@ pub(crate) trait EventProcessor {
|
||||
|
||||
/// Handle a single event emitted by the agent.
|
||||
fn process_event(&mut self, event: Event) -> CodexStatus;
|
||||
|
||||
fn print_final_output(&mut self) {}
|
||||
}
|
||||
|
||||
pub(crate) fn handle_last_message(last_agent_message: Option<&str>, output_file: &Path) {
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
use codex_common::elapsed::format_duration;
|
||||
use codex_common::elapsed::format_elapsed;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::plan_tool::UpdatePlanArgs;
|
||||
use codex_core::protocol::AgentMessageDeltaEvent;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::AgentReasoningDeltaEvent;
|
||||
use codex_core::protocol::AgentReasoningRawContentDeltaEvent;
|
||||
use codex_core::protocol::AgentReasoningRawContentEvent;
|
||||
use codex_core::protocol::BackgroundEventEvent;
|
||||
use codex_core::protocol::ErrorEvent;
|
||||
@@ -31,7 +27,6 @@ use owo_colors::OwoColorize;
|
||||
use owo_colors::Style;
|
||||
use shlex::try_join;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Instant;
|
||||
|
||||
@@ -39,12 +34,13 @@ use crate::event_processor::CodexStatus;
|
||||
use crate::event_processor::EventProcessor;
|
||||
use crate::event_processor::handle_last_message;
|
||||
use codex_common::create_config_summary_entries;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
|
||||
/// This should be configurable. When used in CI, users may not want to impose
|
||||
/// a limit so they can see the full transcript.
|
||||
const MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL: usize = 20;
|
||||
pub(crate) struct EventProcessorWithHumanOutput {
|
||||
call_id_to_command: HashMap<String, ExecCommandBegin>,
|
||||
call_id_to_patch: HashMap<String, PatchApplyBegin>,
|
||||
|
||||
// To ensure that --color=never is respected, ANSI escapes _must_ be added
|
||||
@@ -62,10 +58,9 @@ pub(crate) struct EventProcessorWithHumanOutput {
|
||||
/// Whether to include `AgentReasoning` events in the output.
|
||||
show_agent_reasoning: bool,
|
||||
show_raw_agent_reasoning: bool,
|
||||
answer_started: bool,
|
||||
reasoning_started: bool,
|
||||
raw_reasoning_started: bool,
|
||||
last_message_path: Option<PathBuf>,
|
||||
last_total_token_usage: Option<codex_core::protocol::TokenUsageInfo>,
|
||||
final_message: Option<String>,
|
||||
}
|
||||
|
||||
impl EventProcessorWithHumanOutput {
|
||||
@@ -74,12 +69,10 @@ impl EventProcessorWithHumanOutput {
|
||||
config: &Config,
|
||||
last_message_path: Option<PathBuf>,
|
||||
) -> Self {
|
||||
let call_id_to_command = HashMap::new();
|
||||
let call_id_to_patch = HashMap::new();
|
||||
|
||||
if with_ansi {
|
||||
Self {
|
||||
call_id_to_command,
|
||||
call_id_to_patch,
|
||||
bold: Style::new().bold(),
|
||||
italic: Style::new().italic(),
|
||||
@@ -90,14 +83,12 @@ impl EventProcessorWithHumanOutput {
|
||||
cyan: Style::new().cyan(),
|
||||
show_agent_reasoning: !config.hide_agent_reasoning,
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
answer_started: false,
|
||||
reasoning_started: false,
|
||||
raw_reasoning_started: false,
|
||||
last_message_path,
|
||||
last_total_token_usage: None,
|
||||
final_message: None,
|
||||
}
|
||||
} else {
|
||||
Self {
|
||||
call_id_to_command,
|
||||
call_id_to_patch,
|
||||
bold: Style::new(),
|
||||
italic: Style::new(),
|
||||
@@ -108,32 +99,23 @@ impl EventProcessorWithHumanOutput {
|
||||
cyan: Style::new(),
|
||||
show_agent_reasoning: !config.hide_agent_reasoning,
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
answer_started: false,
|
||||
reasoning_started: false,
|
||||
raw_reasoning_started: false,
|
||||
last_message_path,
|
||||
last_total_token_usage: None,
|
||||
final_message: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ExecCommandBegin {
|
||||
command: Vec<String>,
|
||||
}
|
||||
|
||||
struct PatchApplyBegin {
|
||||
start_time: Instant,
|
||||
auto_approved: bool,
|
||||
}
|
||||
|
||||
// Timestamped println helper. The timestamp is styled with self.dimmed.
|
||||
#[macro_export]
|
||||
macro_rules! ts_println {
|
||||
/// Timestamped helper. The timestamp is styled with self.dimmed.
|
||||
macro_rules! ts_msg {
|
||||
($self:ident, $($arg:tt)*) => {{
|
||||
let now = chrono::Utc::now();
|
||||
let formatted = now.format("[%Y-%m-%dT%H:%M:%S]");
|
||||
print!("{} ", formatted.style($self.dimmed));
|
||||
println!($($arg)*);
|
||||
eprintln!($($arg)*);
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -141,31 +123,35 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
/// Print a concise summary of the effective configuration that will be used
|
||||
/// for the session. This mirrors the information shown in the TUI welcome
|
||||
/// screen.
|
||||
fn print_config_summary(&mut self, config: &Config, prompt: &str, _: &SessionConfiguredEvent) {
|
||||
fn print_config_summary(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
prompt: &str,
|
||||
session_configured_event: &SessionConfiguredEvent,
|
||||
) {
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
ts_println!(
|
||||
ts_msg!(
|
||||
self,
|
||||
"OpenAI Codex v{} (research preview)\n--------",
|
||||
VERSION
|
||||
);
|
||||
|
||||
let entries = create_config_summary_entries(config);
|
||||
let mut entries = create_config_summary_entries(config);
|
||||
entries.push((
|
||||
"session id",
|
||||
session_configured_event.session_id.to_string(),
|
||||
));
|
||||
|
||||
for (key, value) in entries {
|
||||
println!("{} {}", format!("{key}:").style(self.bold), value);
|
||||
eprintln!("{} {}", format!("{key}:").style(self.bold), value);
|
||||
}
|
||||
|
||||
println!("--------");
|
||||
eprintln!("--------");
|
||||
|
||||
// Echo the prompt that will be sent to the agent so it is visible in the
|
||||
// transcript/logs before any events come in. Note the prompt may have been
|
||||
// read from stdin, so it may not be visible in the terminal otherwise.
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"User instructions:".style(self.bold).style(self.cyan),
|
||||
prompt
|
||||
);
|
||||
ts_msg!(self, "{}\n{}", "user".style(self.cyan), prompt);
|
||||
}
|
||||
|
||||
fn process_event(&mut self, event: Event) -> CodexStatus {
|
||||
@@ -173,144 +159,71 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
match msg {
|
||||
EventMsg::Error(ErrorEvent { message }) => {
|
||||
let prefix = "ERROR:".style(self.red);
|
||||
ts_println!(self, "{prefix} {message}");
|
||||
ts_msg!(self, "{prefix} {message}");
|
||||
}
|
||||
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
|
||||
ts_println!(self, "{}", message.style(self.dimmed));
|
||||
ts_msg!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::StreamError(StreamErrorEvent { message }) => {
|
||||
ts_println!(self, "{}", message.style(self.dimmed));
|
||||
ts_msg!(self, "{}", message.style(self.dimmed));
|
||||
}
|
||||
EventMsg::TaskStarted(_) => {
|
||||
// Ignore.
|
||||
}
|
||||
EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => {
|
||||
let last_message = last_agent_message.as_deref();
|
||||
if let Some(output_file) = self.last_message_path.as_deref() {
|
||||
handle_last_message(last_agent_message.as_deref(), output_file);
|
||||
handle_last_message(last_message, output_file);
|
||||
}
|
||||
|
||||
self.final_message = last_agent_message;
|
||||
|
||||
return CodexStatus::InitiateShutdown;
|
||||
}
|
||||
EventMsg::TokenCount(ev) => {
|
||||
if let Some(usage_info) = ev.info {
|
||||
ts_println!(
|
||||
self,
|
||||
"tokens used: {}",
|
||||
format_with_separators(usage_info.total_token_usage.blended_total())
|
||||
);
|
||||
}
|
||||
}
|
||||
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }) => {
|
||||
if !self.answer_started {
|
||||
ts_println!(self, "{}\n", "codex".style(self.italic).style(self.magenta));
|
||||
self.answer_started = true;
|
||||
}
|
||||
print!("{delta}");
|
||||
#[expect(clippy::expect_used)]
|
||||
std::io::stdout().flush().expect("could not flush stdout");
|
||||
}
|
||||
EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }) => {
|
||||
if !self.show_agent_reasoning {
|
||||
return CodexStatus::Running;
|
||||
}
|
||||
if !self.reasoning_started {
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n",
|
||||
"thinking".style(self.italic).style(self.magenta),
|
||||
);
|
||||
self.reasoning_started = true;
|
||||
}
|
||||
print!("{delta}");
|
||||
#[expect(clippy::expect_used)]
|
||||
std::io::stdout().flush().expect("could not flush stdout");
|
||||
self.last_total_token_usage = ev.info;
|
||||
}
|
||||
|
||||
EventMsg::AgentReasoningSectionBreak(_) => {
|
||||
if !self.show_agent_reasoning {
|
||||
return CodexStatus::Running;
|
||||
}
|
||||
println!();
|
||||
#[expect(clippy::expect_used)]
|
||||
std::io::stdout().flush().expect("could not flush stdout");
|
||||
eprintln!();
|
||||
}
|
||||
EventMsg::AgentReasoningRawContent(AgentReasoningRawContentEvent { text }) => {
|
||||
if !self.show_raw_agent_reasoning {
|
||||
return CodexStatus::Running;
|
||||
}
|
||||
if !self.raw_reasoning_started {
|
||||
print!("{text}");
|
||||
#[expect(clippy::expect_used)]
|
||||
std::io::stdout().flush().expect("could not flush stdout");
|
||||
} else {
|
||||
println!();
|
||||
self.raw_reasoning_started = false;
|
||||
}
|
||||
}
|
||||
EventMsg::AgentReasoningRawContentDelta(AgentReasoningRawContentDeltaEvent {
|
||||
delta,
|
||||
}) => {
|
||||
if !self.show_raw_agent_reasoning {
|
||||
return CodexStatus::Running;
|
||||
}
|
||||
if !self.raw_reasoning_started {
|
||||
self.raw_reasoning_started = true;
|
||||
}
|
||||
print!("{delta}");
|
||||
#[expect(clippy::expect_used)]
|
||||
std::io::stdout().flush().expect("could not flush stdout");
|
||||
}
|
||||
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
|
||||
// if answer_started is false, this means we haven't received any
|
||||
// delta. Thus, we need to print the message as a new answer.
|
||||
if !self.answer_started {
|
||||
ts_println!(
|
||||
if self.show_raw_agent_reasoning {
|
||||
ts_msg!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"codex".style(self.italic).style(self.magenta),
|
||||
message,
|
||||
"thinking".style(self.italic).style(self.magenta),
|
||||
text,
|
||||
);
|
||||
} else {
|
||||
println!();
|
||||
self.answer_started = false;
|
||||
}
|
||||
}
|
||||
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
|
||||
call_id,
|
||||
command,
|
||||
cwd,
|
||||
parsed_cmd: _,
|
||||
}) => {
|
||||
self.call_id_to_command.insert(
|
||||
call_id,
|
||||
ExecCommandBegin {
|
||||
command: command.clone(),
|
||||
},
|
||||
);
|
||||
ts_println!(
|
||||
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
|
||||
ts_msg!(
|
||||
self,
|
||||
"{} {} in {}",
|
||||
"exec".style(self.magenta),
|
||||
"{}\n{}",
|
||||
"codex".style(self.italic).style(self.magenta),
|
||||
message,
|
||||
);
|
||||
}
|
||||
EventMsg::ExecCommandBegin(ExecCommandBeginEvent { command, cwd, .. }) => {
|
||||
eprint!(
|
||||
"{}\n{} in {}",
|
||||
"exec".style(self.italic).style(self.magenta),
|
||||
escape_command(&command).style(self.bold),
|
||||
cwd.to_string_lossy(),
|
||||
);
|
||||
}
|
||||
EventMsg::ExecCommandOutputDelta(_) => {}
|
||||
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
|
||||
call_id,
|
||||
aggregated_output,
|
||||
duration,
|
||||
exit_code,
|
||||
..
|
||||
}) => {
|
||||
let exec_command = self.call_id_to_command.remove(&call_id);
|
||||
let (duration, call) = if let Some(ExecCommandBegin { command, .. }) = exec_command
|
||||
{
|
||||
(
|
||||
format!(" in {}", format_duration(duration)),
|
||||
format!("{}", escape_command(&command).style(self.bold)),
|
||||
)
|
||||
} else {
|
||||
("".to_string(), format!("exec('{call_id}')"))
|
||||
};
|
||||
let duration = format!(" in {}", format_duration(duration));
|
||||
|
||||
let truncated_output = aggregated_output
|
||||
.lines()
|
||||
@@ -319,21 +232,21 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
.join("\n");
|
||||
match exit_code {
|
||||
0 => {
|
||||
let title = format!("{call} succeeded{duration}:");
|
||||
ts_println!(self, "{}", title.style(self.green));
|
||||
let title = format!(" succeeded{duration}:");
|
||||
ts_msg!(self, "{}", title.style(self.green));
|
||||
}
|
||||
_ => {
|
||||
let title = format!("{call} exited {exit_code}{duration}:");
|
||||
ts_println!(self, "{}", title.style(self.red));
|
||||
let title = format!(" exited {exit_code}{duration}:");
|
||||
ts_msg!(self, "{}", title.style(self.red));
|
||||
}
|
||||
}
|
||||
println!("{}", truncated_output.style(self.dimmed));
|
||||
eprintln!("{}", truncated_output.style(self.dimmed));
|
||||
}
|
||||
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
|
||||
call_id: _,
|
||||
invocation,
|
||||
}) => {
|
||||
ts_println!(
|
||||
ts_msg!(
|
||||
self,
|
||||
"{} {}",
|
||||
"tool".style(self.magenta),
|
||||
@@ -358,7 +271,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
format_mcp_invocation(&invocation)
|
||||
);
|
||||
|
||||
ts_println!(self, "{}", title.style(title_style));
|
||||
ts_msg!(self, "{}", title.style(title_style));
|
||||
|
||||
if let Ok(res) = result {
|
||||
let val: serde_json::Value = res.into();
|
||||
@@ -366,13 +279,13 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
serde_json::to_string_pretty(&val).unwrap_or_else(|_| val.to_string());
|
||||
|
||||
for line in pretty.lines().take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL) {
|
||||
println!("{}", line.style(self.dimmed));
|
||||
eprintln!("{}", line.style(self.dimmed));
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::WebSearchBegin(WebSearchBeginEvent { call_id: _ }) => {}
|
||||
EventMsg::WebSearchEnd(WebSearchEndEvent { call_id: _, query }) => {
|
||||
ts_println!(self, "🌐 Searched: {query}");
|
||||
ts_msg!(self, "🌐 Searched: {query}");
|
||||
}
|
||||
EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
|
||||
call_id,
|
||||
@@ -389,11 +302,10 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
},
|
||||
);
|
||||
|
||||
ts_println!(
|
||||
ts_msg!(
|
||||
self,
|
||||
"{} auto_approved={}:",
|
||||
"apply_patch".style(self.magenta),
|
||||
auto_approved,
|
||||
"{}",
|
||||
"file update".style(self.magenta).style(self.italic),
|
||||
);
|
||||
|
||||
// Pretty-print the patch summary with colored diff markers so
|
||||
@@ -406,9 +318,9 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
format_file_change(change),
|
||||
path.to_string_lossy()
|
||||
);
|
||||
println!("{}", header.style(self.magenta));
|
||||
eprintln!("{}", header.style(self.magenta));
|
||||
for line in content.lines() {
|
||||
println!("{}", line.style(self.green));
|
||||
eprintln!("{}", line.style(self.green));
|
||||
}
|
||||
}
|
||||
FileChange::Delete { content } => {
|
||||
@@ -417,9 +329,9 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
format_file_change(change),
|
||||
path.to_string_lossy()
|
||||
);
|
||||
println!("{}", header.style(self.magenta));
|
||||
eprintln!("{}", header.style(self.magenta));
|
||||
for line in content.lines() {
|
||||
println!("{}", line.style(self.red));
|
||||
eprintln!("{}", line.style(self.red));
|
||||
}
|
||||
}
|
||||
FileChange::Update {
|
||||
@@ -436,20 +348,20 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
} else {
|
||||
format!("{} {}", format_file_change(change), path.to_string_lossy())
|
||||
};
|
||||
println!("{}", header.style(self.magenta));
|
||||
eprintln!("{}", header.style(self.magenta));
|
||||
|
||||
// Colorize diff lines. We keep file header lines
|
||||
// (--- / +++) without extra coloring so they are
|
||||
// still readable.
|
||||
for diff_line in unified_diff.lines() {
|
||||
if diff_line.starts_with('+') && !diff_line.starts_with("+++") {
|
||||
println!("{}", diff_line.style(self.green));
|
||||
eprintln!("{}", diff_line.style(self.green));
|
||||
} else if diff_line.starts_with('-')
|
||||
&& !diff_line.starts_with("---")
|
||||
{
|
||||
println!("{}", diff_line.style(self.red));
|
||||
eprintln!("{}", diff_line.style(self.red));
|
||||
} else {
|
||||
println!("{diff_line}");
|
||||
eprintln!("{diff_line}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -486,14 +398,18 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
};
|
||||
|
||||
let title = format!("{label} exited {exit_code}{duration}:");
|
||||
ts_println!(self, "{}", title.style(title_style));
|
||||
ts_msg!(self, "{}", title.style(title_style));
|
||||
for line in output.lines() {
|
||||
println!("{}", line.style(self.dimmed));
|
||||
eprintln!("{}", line.style(self.dimmed));
|
||||
}
|
||||
}
|
||||
EventMsg::TurnDiff(TurnDiffEvent { unified_diff }) => {
|
||||
ts_println!(self, "{}", "turn diff:".style(self.magenta));
|
||||
println!("{unified_diff}");
|
||||
ts_msg!(
|
||||
self,
|
||||
"{}",
|
||||
"file update:".style(self.magenta).style(self.italic)
|
||||
);
|
||||
eprintln!("{unified_diff}");
|
||||
}
|
||||
EventMsg::ExecApprovalRequest(_) => {
|
||||
// Should we exit?
|
||||
@@ -503,17 +419,12 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
}
|
||||
EventMsg::AgentReasoning(agent_reasoning_event) => {
|
||||
if self.show_agent_reasoning {
|
||||
if !self.reasoning_started {
|
||||
ts_println!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"codex".style(self.italic).style(self.magenta),
|
||||
agent_reasoning_event.text,
|
||||
);
|
||||
} else {
|
||||
println!();
|
||||
self.reasoning_started = false;
|
||||
}
|
||||
ts_msg!(
|
||||
self,
|
||||
"{}\n{}",
|
||||
"thinking".style(self.italic).style(self.magenta),
|
||||
agent_reasoning_event.text,
|
||||
);
|
||||
}
|
||||
}
|
||||
EventMsg::SessionConfigured(session_configured_event) => {
|
||||
@@ -527,41 +438,40 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
rollout_path: _,
|
||||
} = session_configured_event;
|
||||
|
||||
ts_println!(
|
||||
ts_msg!(
|
||||
self,
|
||||
"{} {}",
|
||||
"codex session".style(self.magenta).style(self.bold),
|
||||
conversation_id.to_string().style(self.dimmed)
|
||||
);
|
||||
|
||||
ts_println!(self, "model: {}", model);
|
||||
println!();
|
||||
ts_msg!(self, "model: {}", model);
|
||||
eprintln!();
|
||||
}
|
||||
EventMsg::PlanUpdate(plan_update_event) => {
|
||||
let UpdatePlanArgs { explanation, plan } = plan_update_event;
|
||||
|
||||
// Header
|
||||
ts_println!(self, "{}", "Plan update".style(self.magenta));
|
||||
ts_msg!(self, "{}", "Plan update".style(self.magenta));
|
||||
|
||||
// Optional explanation
|
||||
if let Some(explanation) = explanation
|
||||
&& !explanation.trim().is_empty()
|
||||
{
|
||||
ts_println!(self, "{}", explanation.style(self.italic));
|
||||
ts_msg!(self, "{}", explanation.style(self.italic));
|
||||
}
|
||||
|
||||
// Pretty-print the plan items with simple status markers.
|
||||
for item in plan {
|
||||
use codex_core::plan_tool::StepStatus;
|
||||
match item.status {
|
||||
StepStatus::Completed => {
|
||||
ts_println!(self, " {} {}", "✓".style(self.green), item.step);
|
||||
ts_msg!(self, " {} {}", "✓".style(self.green), item.step);
|
||||
}
|
||||
StepStatus::InProgress => {
|
||||
ts_println!(self, " {} {}", "→".style(self.cyan), item.step);
|
||||
ts_msg!(self, " {} {}", "→".style(self.cyan), item.step);
|
||||
}
|
||||
StepStatus::Pending => {
|
||||
ts_println!(
|
||||
ts_msg!(
|
||||
self,
|
||||
" {} {}",
|
||||
"•".style(self.dimmed),
|
||||
@@ -580,15 +490,23 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
EventMsg::ListCustomPromptsResponse(_) => {
|
||||
// Currently ignored in exec output.
|
||||
}
|
||||
EventMsg::ViewImageToolCall(view) => {
|
||||
ts_msg!(
|
||||
self,
|
||||
"{} {}",
|
||||
"viewed image".style(self.magenta),
|
||||
view.path.display()
|
||||
);
|
||||
}
|
||||
EventMsg::TurnAborted(abort_reason) => match abort_reason.reason {
|
||||
TurnAbortReason::Interrupted => {
|
||||
ts_println!(self, "task interrupted");
|
||||
ts_msg!(self, "task interrupted");
|
||||
}
|
||||
TurnAbortReason::Replaced => {
|
||||
ts_println!(self, "task aborted: replaced by a new task");
|
||||
ts_msg!(self, "task aborted: replaced by a new task");
|
||||
}
|
||||
TurnAbortReason::ReviewEnded => {
|
||||
ts_println!(self, "task aborted: review ended");
|
||||
ts_msg!(self, "task aborted: review ended");
|
||||
}
|
||||
},
|
||||
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
|
||||
@@ -596,9 +514,35 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
EventMsg::UserMessage(_) => {}
|
||||
EventMsg::EnteredReviewMode(_) => {}
|
||||
EventMsg::ExitedReviewMode(_) => {}
|
||||
EventMsg::AgentMessageDelta(_) => {}
|
||||
EventMsg::AgentReasoningDelta(_) => {}
|
||||
EventMsg::AgentReasoningRawContentDelta(_) => {}
|
||||
}
|
||||
CodexStatus::Running
|
||||
}
|
||||
|
||||
fn print_final_output(&mut self) {
|
||||
if let Some(usage_info) = &self.last_total_token_usage {
|
||||
eprintln!(
|
||||
"{}\n{}",
|
||||
"tokens used".style(self.magenta).style(self.italic),
|
||||
format_with_separators(usage_info.total_token_usage.blended_total())
|
||||
);
|
||||
}
|
||||
|
||||
// If the user has not piped the final message to a file, they will see
|
||||
// it twice: once written to stderr as part of the normal event
|
||||
// processing, and once here on stdout. We print the token summary above
|
||||
// to help break up the output visually in that case.
|
||||
#[allow(clippy::print_stdout)]
|
||||
if let Some(message) = &self.final_message {
|
||||
if message.ends_with('\n') {
|
||||
print!("{message}");
|
||||
} else {
|
||||
println!("{message}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn escape_command(command: &[String]) -> String {
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::sync::atomic::AtomicU64;
|
||||
use crate::event_processor::CodexStatus;
|
||||
use crate::event_processor::EventProcessor;
|
||||
use crate::event_processor::handle_last_message;
|
||||
use crate::exec_events::AssistantMessageItem;
|
||||
use crate::exec_events::AgentMessageItem;
|
||||
use crate::exec_events::CommandExecutionItem;
|
||||
use crate::exec_events::CommandExecutionStatus;
|
||||
use crate::exec_events::FileChangeItem;
|
||||
@@ -31,8 +31,6 @@ use crate::exec_events::TurnStartedEvent;
|
||||
use crate::exec_events::Usage;
|
||||
use crate::exec_events::WebSearchItem;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::plan_tool::StepStatus;
|
||||
use codex_core::plan_tool::UpdatePlanArgs;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
use codex_core::protocol::AgentReasoningEvent;
|
||||
use codex_core::protocol::Event;
|
||||
@@ -48,6 +46,8 @@ use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_core::protocol::TaskStartedEvent;
|
||||
use codex_core::protocol::WebSearchEndEvent;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
use tracing::error;
|
||||
use tracing::warn;
|
||||
|
||||
@@ -162,7 +162,7 @@ impl EventProcessorWithJsonOutput {
|
||||
let item = ThreadItem {
|
||||
id: self.get_next_item_id(),
|
||||
|
||||
details: ThreadItemDetails::AssistantMessage(AssistantMessageItem {
|
||||
details: ThreadItemDetails::AgentMessage(AgentMessageItem {
|
||||
text: payload.message.clone(),
|
||||
}),
|
||||
};
|
||||
@@ -428,6 +428,7 @@ impl EventProcessor for EventProcessorWithJsonOutput {
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
fn process_event(&mut self, event: Event) -> CodexStatus {
|
||||
let aggregated = self.collect_thread_events(&event);
|
||||
for conv_event in aggregated {
|
||||
|
||||
@@ -95,11 +95,11 @@ pub struct ThreadItem {
|
||||
|
||||
/// Typed payloads for each supported thread item type.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, TS)]
|
||||
#[serde(tag = "item_type", rename_all = "snake_case")]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum ThreadItemDetails {
|
||||
/// Response from the agent.
|
||||
/// Either a natural-language response or a JSON string when structured output is requested.
|
||||
AssistantMessage(AssistantMessageItem),
|
||||
AgentMessage(AgentMessageItem),
|
||||
/// Agent's reasoning summary.
|
||||
Reasoning(ReasoningItem),
|
||||
/// Tracks a command executed by the agent. The item starts when the command is
|
||||
@@ -124,7 +124,7 @@ pub enum ThreadItemDetails {
|
||||
/// Response from the agent.
|
||||
/// Either a natural-language response or a JSON string when structured output is requested.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, TS)]
|
||||
pub struct AssistantMessageItem {
|
||||
pub struct AgentMessageItem {
|
||||
pub text: String,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,25 @@
|
||||
// - In the default output mode, it is paramount that the only thing written to
|
||||
// stdout is the final message (if any).
|
||||
// - In --json mode, stdout must be valid JSONL, one event per line.
|
||||
// For both modes, any other output must be written to stderr.
|
||||
#![deny(clippy::print_stdout)]
|
||||
|
||||
mod cli;
|
||||
mod event_processor;
|
||||
mod event_processor_with_human_output;
|
||||
pub mod event_processor_with_jsonl_output;
|
||||
pub mod exec_events;
|
||||
|
||||
use anyhow::bail;
|
||||
pub use cli::Cli;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::admin_controls::DangerAuditAction;
|
||||
use codex_core::admin_controls::PendingAdminAction;
|
||||
use codex_core::admin_controls::build_danger_audit_payload;
|
||||
use codex_core::admin_controls::log_admin_event;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::git_info::get_git_repo_root;
|
||||
@@ -17,6 +28,7 @@ use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SessionSource;
|
||||
use codex_core::protocol::TaskCompleteEvent;
|
||||
use codex_ollama::DEFAULT_OSS_MODEL;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
@@ -27,6 +39,7 @@ use serde_json::Value;
|
||||
use std::io::IsTerminal;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use supports_color::Stream;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
@@ -112,8 +125,8 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
cli::Color::Always => (true, true),
|
||||
cli::Color::Never => (false, false),
|
||||
cli::Color::Auto => (
|
||||
std::io::stdout().is_terminal(),
|
||||
std::io::stderr().is_terminal(),
|
||||
supports_color::on_cached(Stream::Stdout).is_some(),
|
||||
supports_color::on_cached(Stream::Stderr).is_some(),
|
||||
),
|
||||
};
|
||||
|
||||
@@ -169,7 +182,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
codex_linux_sandbox_exe,
|
||||
base_instructions: None,
|
||||
include_plan_tool: Some(include_plan_tool),
|
||||
include_apply_patch_tool: None,
|
||||
include_apply_patch_tool: Some(true),
|
||||
include_view_image_tool: None,
|
||||
show_raw_agent_reasoning: oss.then_some(true),
|
||||
tools_web_search_request: None,
|
||||
@@ -183,7 +196,25 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
}
|
||||
};
|
||||
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides)?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides).await?;
|
||||
|
||||
if config.admin.has_pending_danger() {
|
||||
if let Some(audit) = config.admin.audit.as_ref()
|
||||
&& let Some(PendingAdminAction::Danger(pending)) = config
|
||||
.admin
|
||||
.pending
|
||||
.iter()
|
||||
.find(|action| matches!(action, PendingAdminAction::Danger(_)))
|
||||
{
|
||||
log_admin_event(
|
||||
audit,
|
||||
build_danger_audit_payload(pending, DangerAuditAction::Denied, None),
|
||||
);
|
||||
}
|
||||
bail!(
|
||||
"danger-full-access requires interactive justification; rerun in the interactive TUI"
|
||||
);
|
||||
}
|
||||
|
||||
let otel = codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION"));
|
||||
|
||||
@@ -236,8 +267,8 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let conversation_manager =
|
||||
ConversationManager::new(AuthManager::shared(config.codex_home.clone()));
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone(), true);
|
||||
let conversation_manager = ConversationManager::new(auth_manager.clone(), SessionSource::Exec);
|
||||
|
||||
// Handle resume subcommand by resolving a rollout path and using explicit resume API.
|
||||
let NewConversation {
|
||||
@@ -249,11 +280,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
|
||||
if let Some(path) = resume_path {
|
||||
conversation_manager
|
||||
.resume_conversation_from_rollout(
|
||||
config.clone(),
|
||||
path,
|
||||
AuthManager::shared(config.codex_home.clone()),
|
||||
)
|
||||
.resume_conversation_from_rollout(config.clone(), path, auth_manager.clone())
|
||||
.await?
|
||||
} else {
|
||||
conversation_manager
|
||||
@@ -367,6 +394,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
}
|
||||
}
|
||||
}
|
||||
event_processor.print_final_output();
|
||||
if error_seen {
|
||||
std::process::exit(1);
|
||||
}
|
||||
@@ -379,7 +407,9 @@ async fn resolve_resume_path(
|
||||
args: &crate::cli::ResumeArgs,
|
||||
) -> anyhow::Result<Option<PathBuf>> {
|
||||
if args.last {
|
||||
match codex_core::RolloutRecorder::list_conversations(&config.codex_home, 1, None).await {
|
||||
match codex_core::RolloutRecorder::list_conversations(&config.codex_home, 1, None, &[])
|
||||
.await
|
||||
{
|
||||
Ok(page) => Ok(page.items.first().map(|it| it.path.clone())),
|
||||
Err(e) => {
|
||||
error!("Error listing conversations: {e}");
|
||||
|
||||
@@ -14,7 +14,7 @@ use codex_core::protocol::PatchApplyEndEvent;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_core::protocol::WebSearchEndEvent;
|
||||
use codex_exec::event_processor_with_jsonl_output::EventProcessorWithJsonOutput;
|
||||
use codex_exec::exec_events::AssistantMessageItem;
|
||||
use codex_exec::exec_events::AgentMessageItem;
|
||||
use codex_exec::exec_events::CommandExecutionItem;
|
||||
use codex_exec::exec_events::CommandExecutionStatus;
|
||||
use codex_exec::exec_events::ItemCompletedEvent;
|
||||
@@ -37,6 +37,9 @@ use codex_exec::exec_events::TurnFailedEvent;
|
||||
use codex_exec::exec_events::TurnStartedEvent;
|
||||
use codex_exec::exec_events::Usage;
|
||||
use codex_exec::exec_events::WebSearchItem;
|
||||
use codex_protocol::plan_tool::PlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
use mcp_types::CallToolResult;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::PathBuf;
|
||||
@@ -115,10 +118,6 @@ fn web_search_end_emits_item_completed() {
|
||||
|
||||
#[test]
|
||||
fn plan_update_emits_todo_list_started_updated_and_completed() {
|
||||
use codex_core::plan_tool::PlanItemArg;
|
||||
use codex_core::plan_tool::StepStatus;
|
||||
use codex_core::plan_tool::UpdatePlanArgs;
|
||||
|
||||
let mut ep = EventProcessorWithJsonOutput::new(None);
|
||||
|
||||
// First plan update => item.started (todo_list)
|
||||
@@ -339,10 +338,6 @@ fn mcp_tool_call_failure_sets_failed_status() {
|
||||
|
||||
#[test]
|
||||
fn plan_update_after_complete_starts_new_todo_list_with_new_id() {
|
||||
use codex_core::plan_tool::PlanItemArg;
|
||||
use codex_core::plan_tool::StepStatus;
|
||||
use codex_core::plan_tool::UpdatePlanArgs;
|
||||
|
||||
let mut ep = EventProcessorWithJsonOutput::new(None);
|
||||
|
||||
// First turn: start + complete
|
||||
@@ -410,7 +405,7 @@ fn agent_reasoning_produces_item_completed_reasoning() {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn agent_message_produces_item_completed_assistant_message() {
|
||||
fn agent_message_produces_item_completed_agent_message() {
|
||||
let mut ep = EventProcessorWithJsonOutput::new(None);
|
||||
let ev = event(
|
||||
"e1",
|
||||
@@ -424,7 +419,7 @@ fn agent_message_produces_item_completed_assistant_message() {
|
||||
vec![ThreadEvent::ItemCompleted(ItemCompletedEvent {
|
||||
item: ThreadItem {
|
||||
id: "item_0".to_string(),
|
||||
details: ThreadItemDetails::AssistantMessage(AssistantMessageItem {
|
||||
details: ThreadItemDetails::AgentMessage(AgentMessageItem {
|
||||
text: "hello".to_string(),
|
||||
}),
|
||||
},
|
||||
|
||||
34
codex-rs/exec/tests/suite/auth_env.rs
Normal file
34
codex-rs/exec/tests/suite/auth_env.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::sse_response;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::test_codex_exec::test_codex_exec;
|
||||
use wiremock::Mock;
|
||||
use wiremock::matchers::header;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn exec_uses_codex_api_key_env_var() -> anyhow::Result<()> {
|
||||
let test = test_codex_exec();
|
||||
let server = start_mock_server().await;
|
||||
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(header("Authorization", "Bearer dummy"))
|
||||
.respond_with(sse_response(sse(vec![ev_completed("request_0")])))
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
test.cmd_with_server(&server)
|
||||
.arg("--skip-git-repo-check")
|
||||
.arg("-C")
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg("echo testing codex api key")
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
// Aggregates all former standalone integration tests as modules.
|
||||
mod apply_patch;
|
||||
mod auth_env;
|
||||
mod output_schema;
|
||||
mod resume;
|
||||
mod sandbox;
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
#![allow(clippy::unwrap_used, clippy::expect_used)]
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use core_test_support::test_codex_exec::test_codex_exec;
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
use std::path::Path;
|
||||
use std::string::ToString;
|
||||
use tempfile::TempDir;
|
||||
use uuid::Uuid;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
@@ -72,18 +71,15 @@ fn extract_conversation_id(path: &std::path::Path) -> String {
|
||||
|
||||
#[test]
|
||||
fn exec_resume_last_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
let home = TempDir::new()?;
|
||||
let fixture = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("tests/fixtures/cli_responses_fixture.sse");
|
||||
let test = test_codex_exec();
|
||||
let fixture =
|
||||
Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/cli_responses_fixture.sse");
|
||||
|
||||
// 1) First run: create a session with a unique marker in the content.
|
||||
let marker = format!("resume-last-{}", Uuid::new_v4());
|
||||
let prompt = format!("echo {marker}");
|
||||
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
@@ -94,7 +90,7 @@ fn exec_resume_last_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
.success();
|
||||
|
||||
// Find the created session file containing the marker.
|
||||
let sessions_dir = home.path().join("sessions");
|
||||
let sessions_dir = test.home_path().join("sessions");
|
||||
let path = find_session_file_containing_marker(&sessions_dir, &marker)
|
||||
.expect("no session file found after first run");
|
||||
|
||||
@@ -102,11 +98,7 @@ fn exec_resume_last_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
let marker2 = format!("resume-last-2-{}", Uuid::new_v4());
|
||||
let prompt2 = format!("echo {marker2}");
|
||||
|
||||
let mut binding = assert_cmd::Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?;
|
||||
let cmd = binding
|
||||
.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
@@ -114,8 +106,9 @@ fn exec_resume_last_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg(&prompt2)
|
||||
.arg("resume")
|
||||
.arg("--last");
|
||||
cmd.assert().success();
|
||||
.arg("--last")
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
// Ensure the same file was updated and contains both markers.
|
||||
let resumed_path = find_session_file_containing_marker(&sessions_dir, &marker2)
|
||||
@@ -132,18 +125,15 @@ fn exec_resume_last_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
|
||||
#[test]
|
||||
fn exec_resume_by_id_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
let home = TempDir::new()?;
|
||||
let fixture = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("tests/fixtures/cli_responses_fixture.sse");
|
||||
let test = test_codex_exec();
|
||||
let fixture =
|
||||
Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/cli_responses_fixture.sse");
|
||||
|
||||
// 1) First run: create a session
|
||||
let marker = format!("resume-by-id-{}", Uuid::new_v4());
|
||||
let prompt = format!("echo {marker}");
|
||||
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
@@ -153,7 +143,7 @@ fn exec_resume_by_id_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let sessions_dir = home.path().join("sessions");
|
||||
let sessions_dir = test.home_path().join("sessions");
|
||||
let path = find_session_file_containing_marker(&sessions_dir, &marker)
|
||||
.expect("no session file found after first run");
|
||||
let session_id = extract_conversation_id(&path);
|
||||
@@ -166,11 +156,7 @@ fn exec_resume_by_id_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
let marker2 = format!("resume-by-id-2-{}", Uuid::new_v4());
|
||||
let prompt2 = format!("echo {marker2}");
|
||||
|
||||
let mut binding = assert_cmd::Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?;
|
||||
let cmd = binding
|
||||
.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
@@ -178,8 +164,9 @@ fn exec_resume_by_id_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
.arg(env!("CARGO_MANIFEST_DIR"))
|
||||
.arg(&prompt2)
|
||||
.arg("resume")
|
||||
.arg(&session_id);
|
||||
cmd.assert().success();
|
||||
.arg(&session_id)
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let resumed_path = find_session_file_containing_marker(&sessions_dir, &marker2)
|
||||
.expect("no resumed session file containing marker2");
|
||||
@@ -195,17 +182,14 @@ fn exec_resume_by_id_appends_to_existing_file() -> anyhow::Result<()> {
|
||||
|
||||
#[test]
|
||||
fn exec_resume_preserves_cli_configuration_overrides() -> anyhow::Result<()> {
|
||||
let home = TempDir::new()?;
|
||||
let fixture = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("tests/fixtures/cli_responses_fixture.sse");
|
||||
let test = test_codex_exec();
|
||||
let fixture =
|
||||
Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/cli_responses_fixture.sse");
|
||||
|
||||
let marker = format!("resume-config-{}", Uuid::new_v4());
|
||||
let prompt = format!("echo {marker}");
|
||||
|
||||
Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
test.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
@@ -219,17 +203,15 @@ fn exec_resume_preserves_cli_configuration_overrides() -> anyhow::Result<()> {
|
||||
.assert()
|
||||
.success();
|
||||
|
||||
let sessions_dir = home.path().join("sessions");
|
||||
let sessions_dir = test.home_path().join("sessions");
|
||||
let path = find_session_file_containing_marker(&sessions_dir, &marker)
|
||||
.expect("no session file found after first run");
|
||||
|
||||
let marker2 = format!("resume-config-2-{}", Uuid::new_v4());
|
||||
let prompt2 = format!("echo {marker2}");
|
||||
|
||||
let output = Command::cargo_bin("codex-exec")
|
||||
.context("should find binary for codex-exec")?
|
||||
.env("CODEX_HOME", home.path())
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
let output = test
|
||||
.cmd()
|
||||
.env("CODEX_RS_SSE_FIXTURE", &fixture)
|
||||
.env("OPENAI_BASE_URL", "http://unused.local")
|
||||
.arg("--skip-git-repo-check")
|
||||
@@ -247,14 +229,14 @@ fn exec_resume_preserves_cli_configuration_overrides() -> anyhow::Result<()> {
|
||||
|
||||
assert!(output.status.success(), "resume run failed: {output:?}");
|
||||
|
||||
let stdout = String::from_utf8(output.stdout)?;
|
||||
let stderr = String::from_utf8(output.stderr)?;
|
||||
assert!(
|
||||
stdout.contains("model: gpt-5-high"),
|
||||
"stdout missing model override: {stdout}"
|
||||
stderr.contains("model: gpt-5-high"),
|
||||
"stderr missing model override: {stderr}"
|
||||
);
|
||||
assert!(
|
||||
stdout.contains("sandbox: workspace-write"),
|
||||
"stdout missing sandbox override: {stdout}"
|
||||
stderr.contains("sandbox: workspace-write"),
|
||||
"stderr missing sandbox override: {stderr}"
|
||||
);
|
||||
|
||||
let resumed_path = find_session_file_containing_marker(&sessions_dir, &marker2)
|
||||
|
||||
@@ -14,6 +14,7 @@ pub use codex_core::AuthManager;
|
||||
pub use codex_core::CodexAuth;
|
||||
pub use codex_core::auth::AuthDotJson;
|
||||
pub use codex_core::auth::CLIENT_ID;
|
||||
pub use codex_core::auth::CODEX_API_KEY_ENV_VAR;
|
||||
pub use codex_core::auth::OPENAI_API_KEY_ENV_VAR;
|
||||
pub use codex_core::auth::get_auth_file;
|
||||
pub use codex_core::auth::login_with_api_key;
|
||||
|
||||
@@ -132,7 +132,7 @@ pub(crate) fn create_tool_for_codex_tool_call_param() -> Tool {
|
||||
impl CodexToolCallParam {
|
||||
/// Returns the initial user prompt to start the Codex conversation and the
|
||||
/// effective Config object generated from the supplied parameters.
|
||||
pub fn into_config(
|
||||
pub async fn into_config(
|
||||
self,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
) -> std::io::Result<(String, codex_core::config::Config)> {
|
||||
@@ -172,7 +172,8 @@ impl CodexToolCallParam {
|
||||
.map(|(k, v)| (k, json_to_toml(v)))
|
||||
.collect();
|
||||
|
||||
let cfg = codex_core::config::Config::load_with_cli_overrides(cli_overrides, overrides)?;
|
||||
let cfg =
|
||||
codex_core::config::Config::load_with_cli_overrides(cli_overrides, overrides).await?;
|
||||
|
||||
Ok((prompt, cfg))
|
||||
}
|
||||
|
||||
@@ -280,6 +280,7 @@ async fn run_codex_tool_session_inner(
|
||||
| EventMsg::ConversationPath(_)
|
||||
| EventMsg::UserMessage(_)
|
||||
| EventMsg::ShutdownComplete
|
||||
| EventMsg::ViewImageToolCall(_)
|
||||
| EventMsg::EnteredReviewMode(_)
|
||||
| EventMsg::ExitedReviewMode(_) => {
|
||||
// For now, we do not do anything extra for these
|
||||
|
||||
@@ -91,6 +91,7 @@ pub async fn run_main(
|
||||
)
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides, ConfigOverrides::default())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
})?;
|
||||
|
||||
@@ -8,6 +8,7 @@ use crate::codex_tool_config::create_tool_for_codex_tool_call_reply_param;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ConversationManager;
|
||||
@@ -52,8 +53,9 @@ impl MessageProcessor {
|
||||
config: Arc<Config>,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone());
|
||||
let conversation_manager = Arc::new(ConversationManager::new(auth_manager));
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone(), false);
|
||||
let conversation_manager =
|
||||
Arc::new(ConversationManager::new(auth_manager, SessionSource::Mcp));
|
||||
Self {
|
||||
outgoing,
|
||||
initialized: false,
|
||||
@@ -340,7 +342,10 @@ impl MessageProcessor {
|
||||
async fn handle_tool_call_codex(&self, id: RequestId, arguments: Option<serde_json::Value>) {
|
||||
let (initial_prompt, config): (String, Config) = match arguments {
|
||||
Some(json_val) => match serde_json::from_value::<CodexToolCallParam>(json_val) {
|
||||
Ok(tool_cfg) => match tool_cfg.into_config(self.codex_linux_sandbox_exe.clone()) {
|
||||
Ok(tool_cfg) => match tool_cfg
|
||||
.into_config(self.codex_linux_sandbox_exe.clone())
|
||||
.await
|
||||
{
|
||||
Ok(cfg) => cfg,
|
||||
Err(e) => {
|
||||
let result = CallToolResult {
|
||||
|
||||
@@ -14,6 +14,7 @@ use eventsource_stream::EventStreamError as StreamError;
|
||||
use reqwest::Error;
|
||||
use reqwest::Response;
|
||||
use serde::Serialize;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
@@ -366,10 +367,10 @@ impl OtelEventManager {
|
||||
call_id: &str,
|
||||
arguments: &str,
|
||||
f: F,
|
||||
) -> Result<String, E>
|
||||
) -> Result<(String, bool), E>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: Future<Output = Result<String, E>>,
|
||||
Fut: Future<Output = Result<(String, bool), E>>,
|
||||
E: Display,
|
||||
{
|
||||
let start = Instant::now();
|
||||
@@ -377,10 +378,12 @@ impl OtelEventManager {
|
||||
let duration = start.elapsed();
|
||||
|
||||
let (output, success) = match &result {
|
||||
Ok(content) => (content, true),
|
||||
Err(error) => (&error.to_string(), false),
|
||||
Ok((preview, success)) => (Cow::Borrowed(preview.as_str()), *success),
|
||||
Err(error) => (Cow::Owned(error.to_string()), false),
|
||||
};
|
||||
|
||||
let success_str = if success { "true" } else { "false" };
|
||||
|
||||
tracing::event!(
|
||||
tracing::Level::INFO,
|
||||
event.name = "codex.tool_result",
|
||||
@@ -396,7 +399,8 @@ impl OtelEventManager {
|
||||
call_id = %call_id,
|
||||
arguments = %arguments,
|
||||
duration_ms = %duration.as_millis(),
|
||||
success = %success,
|
||||
success = %success_str,
|
||||
// `output` is truncated by the tool layer before reaching telemetry.
|
||||
output = %output,
|
||||
);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user