mirror of
https://github.com/openai/codex.git
synced 2026-02-06 17:03:42 +00:00
Compare commits
122 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2aba7b2913 | ||
|
|
a57fefa8fc | ||
|
|
ed7723607d | ||
|
|
7b3263e9a6 | ||
|
|
112bc1c39f | ||
|
|
b1d46c2320 | ||
|
|
b9e443f0c4 | ||
|
|
f2e64e9fa5 | ||
|
|
187924d761 | ||
|
|
66450f0445 | ||
|
|
e8421c761c | ||
|
|
fe460e0f9a | ||
|
|
1253d19641 | ||
|
|
4c9b4b684f | ||
|
|
018de994b0 | ||
|
|
c31960b13a | ||
|
|
9179c9deac | ||
|
|
a1e81180f8 | ||
|
|
fedcb8f63c | ||
|
|
116059c3a0 | ||
|
|
0d788e6263 | ||
|
|
4cef89a122 | ||
|
|
124a09e577 | ||
|
|
a59052341d | ||
|
|
8372d61be7 | ||
|
|
230a045ac9 | ||
|
|
3389465c8d | ||
|
|
8b4d27dfcd | ||
|
|
dc1a568dc7 | ||
|
|
54ded1a3c0 | ||
|
|
11d4f3f45e | ||
|
|
8b7ec31ba7 | ||
|
|
188f79afee | ||
|
|
a0b2d03302 | ||
|
|
4ce9d0aa7b | ||
|
|
1dd1355df3 | ||
|
|
915352b10c | ||
|
|
740bf0e755 | ||
|
|
d1c6329c32 | ||
|
|
cab7136fb3 | ||
|
|
32db8ea5ca | ||
|
|
06e21c7a65 | ||
|
|
7ecd0dc9b3 | ||
|
|
8858012fd1 | ||
|
|
6346e4f560 | ||
|
|
4c3d2a5bbe | ||
|
|
c92dbea7c1 | ||
|
|
771f1ca6ab | ||
|
|
b1c93e135b | ||
|
|
5f8776d34d | ||
|
|
58a91a0b50 | ||
|
|
c29afc0cf3 | ||
|
|
cafb07fe6e | ||
|
|
07f077dfb3 | ||
|
|
7cf6f1c723 | ||
|
|
57f8158608 | ||
|
|
95580f229e | ||
|
|
720fa67816 | ||
|
|
fabb797097 | ||
|
|
807f8a43c2 | ||
|
|
1d8e2b4da8 | ||
|
|
bba5e5e0d4 | ||
|
|
8f10d3bf05 | ||
|
|
468ee8a75c | ||
|
|
0b53aed2d0 | ||
|
|
649badd102 | ||
|
|
a8e0fe8bb9 | ||
|
|
e139ef3e67 | ||
|
|
db1423ae8b | ||
|
|
1d678c8187 | ||
|
|
181ff89cbd | ||
|
|
5678213058 | ||
|
|
279283fe02 | ||
|
|
0c1658d0ec | ||
|
|
19525efb22 | ||
|
|
90f37e8549 | ||
|
|
ee9d441777 | ||
|
|
1b5095b5d1 | ||
|
|
c673e7adb6 | ||
|
|
6846bc1115 | ||
|
|
efd2d76484 | ||
|
|
82fcc087b5 | ||
|
|
3cfa4bc8be | ||
|
|
ab753387cc | ||
|
|
2de731490e | ||
|
|
7078a0b676 | ||
|
|
79ce79a62e | ||
|
|
66b7c673e9 | ||
|
|
13c42a077c | ||
|
|
a48904de72 | ||
|
|
4313e0a710 | ||
|
|
ce3ff29932 | ||
|
|
810ebe0d2b | ||
|
|
bf732600ea | ||
|
|
38de0a1de4 | ||
|
|
e61bae12e3 | ||
|
|
96a65ff0ed | ||
|
|
40de81e7af | ||
|
|
972b5853a0 | ||
|
|
fb24c47bea | ||
|
|
f2b740c95d | ||
|
|
0130a2fa40 | ||
|
|
53eb2e9f27 | ||
|
|
2828549323 | ||
|
|
cbc5fb9acf | ||
|
|
310f2114ae | ||
|
|
e27d9bd88f | ||
|
|
414fbe0da9 | ||
|
|
277babba79 | ||
|
|
14dbd0610a | ||
|
|
f6275a5142 | ||
|
|
7d0c5c7bd5 | ||
|
|
4673090f73 | ||
|
|
8e900c210c | ||
|
|
6b2ef216f1 | ||
|
|
d65fe38b2c | ||
|
|
7809e36a92 | ||
|
|
0237459f71 | ||
|
|
314937fb11 | ||
|
|
8ff16a7714 | ||
|
|
96fdbdd434 | ||
|
|
33e1d0844a |
BIN
.github/codex-cli-login.png
vendored
BIN
.github/codex-cli-login.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 2.9 MiB |
BIN
.github/codex-cli-permissions.png
vendored
BIN
.github/codex-cli-permissions.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 408 KiB |
BIN
.github/codex-cli-splash.png
vendored
BIN
.github/codex-cli-splash.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 3.1 MiB After Width: | Height: | Size: 818 KiB |
BIN
.github/demo.gif
vendored
BIN
.github/demo.gif
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 19 MiB |
@@ -12,6 +12,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
close-stale-contributor-prs:
|
||||
# Prevent scheduled runs on forks
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Close inactive PRs from contributors
|
||||
|
||||
3
.github/workflows/issue-deduplicator.yml
vendored
3
.github/workflows/issue-deduplicator.yml
vendored
@@ -9,7 +9,8 @@ on:
|
||||
jobs:
|
||||
gather-duplicates:
|
||||
name: Identify potential duplicates
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate'))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
3
.github/workflows/issue-labeler.yml
vendored
3
.github/workflows/issue-labeler.yml
vendored
@@ -9,7 +9,8 @@ on:
|
||||
jobs:
|
||||
gather-labels:
|
||||
name: Generate label suggestions
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label'))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
2
.github/workflows/rust-release-prepare.yml
vendored
2
.github/workflows/rust-release-prepare.yml
vendored
@@ -14,6 +14,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
# Prevent scheduled runs on forks (no secrets, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
21
.github/workflows/rust-release.yml
vendored
21
.github/workflows/rust-release.yml
vendored
@@ -323,6 +323,26 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Generate release notes from tag commit message
|
||||
id: release_notes
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# On tag pushes, GITHUB_SHA may be a tag object for annotated tags;
|
||||
# peel it to the underlying commit.
|
||||
commit="$(git rev-parse "${GITHUB_SHA}^{commit}")"
|
||||
notes_path="${RUNNER_TEMP}/release-notes.md"
|
||||
|
||||
# Use the commit message for the commit the tag points at (not the
|
||||
# annotated tag message).
|
||||
git log -1 --format=%B "${commit}" > "${notes_path}"
|
||||
# Ensure trailing newline so GitHub's markdown renderer doesn't
|
||||
# occasionally run the last line into subsequent content.
|
||||
echo >> "${notes_path}"
|
||||
|
||||
echo "path=${notes_path}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
path: dist
|
||||
@@ -395,6 +415,7 @@ jobs:
|
||||
with:
|
||||
name: ${{ steps.release_name.outputs.name }}
|
||||
tag_name: ${{ github.ref_name }}
|
||||
body_path: ${{ steps.release_notes.outputs.path }}
|
||||
files: dist/**
|
||||
# Mark as prerelease only when the version has a suffix after x.y.z
|
||||
# (e.g. -alpha, -beta). Otherwise publish a normal release.
|
||||
|
||||
@@ -77,6 +77,12 @@ If you don’t have the tool:
|
||||
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
|
||||
- Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above.
|
||||
|
||||
### Spawning workspace binaries in tests (Cargo vs Buck2)
|
||||
|
||||
- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries.
|
||||
- Under Buck2, `CARGO_BIN_EXE_*` may be project-relative (e.g. `buck-out/...`), which breaks if a test changes its working directory. `codex_utils_cargo_bin::cargo_bin` resolves to an absolute path first.
|
||||
- When locating fixture files under Buck2, avoid `env!("CARGO_MANIFEST_DIR")` (Buck codegen sets it to `"."`). Prefer deriving paths from `codex_utils_cargo_bin::buck_project_root()` when needed.
|
||||
|
||||
### Integration tests (core)
|
||||
|
||||
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
|
||||
|
||||
77
README.md
77
README.md
@@ -1,13 +1,11 @@
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
|
||||
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
</br>
|
||||
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
|
||||
</p>
|
||||
</p>
|
||||
</br>
|
||||
If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE.</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
|
||||
|
||||
---
|
||||
|
||||
@@ -15,25 +13,19 @@
|
||||
|
||||
### Installing and running Codex CLI
|
||||
|
||||
Install globally with your preferred package manager. If you use npm:
|
||||
Install globally with your preferred package manager:
|
||||
|
||||
```shell
|
||||
# Install using npm
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
Alternatively, if you use Homebrew:
|
||||
|
||||
```shell
|
||||
# Install using Homebrew
|
||||
brew install --cask codex
|
||||
```
|
||||
|
||||
Then simply run `codex` to get started:
|
||||
|
||||
```shell
|
||||
codex
|
||||
```
|
||||
|
||||
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
|
||||
Then simply run `codex` to get started.
|
||||
|
||||
<details>
|
||||
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||
@@ -53,60 +45,15 @@ Each archive contains a single entry with the platform baked into the name (e.g.
|
||||
|
||||
### Using Codex with your ChatGPT plan
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" />
|
||||
</p>
|
||||
|
||||
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
|
||||
|
||||
You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243).
|
||||
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).
|
||||
|
||||
### Model Context Protocol (MCP)
|
||||
## Docs
|
||||
|
||||
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
|
||||
|
||||
### Configuration
|
||||
|
||||
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||
|
||||
### Execpolicy
|
||||
|
||||
See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute.
|
||||
|
||||
### Docs & FAQ
|
||||
|
||||
- [**Getting started**](./docs/getting-started.md)
|
||||
- [CLI usage](./docs/getting-started.md#cli-usage)
|
||||
- [Slash Commands](./docs/slash_commands.md)
|
||||
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
|
||||
- [Example prompts](./docs/getting-started.md#example-prompts)
|
||||
- [Custom prompts](./docs/prompts.md)
|
||||
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
|
||||
- [**Configuration**](./docs/config.md)
|
||||
- [Example config](./docs/example-config.md)
|
||||
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||
- [**Execpolicy quickstart**](./docs/execpolicy.md)
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- **Automating Codex**
|
||||
- [GitHub Action](https://github.com/openai/codex-action)
|
||||
- [TypeScript SDK](./sdk/typescript/README.md)
|
||||
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
|
||||
- [**Advanced**](./docs/advanced.md)
|
||||
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
|
||||
- [**Zero data retention (ZDR)**](./docs/zdr.md)
|
||||
- [**Codex Documentation**](https://developers.openai.com/codex)
|
||||
- [**Contributing**](./docs/contributing.md)
|
||||
- [**Install & build**](./docs/install.md)
|
||||
- [System Requirements](./docs/install.md#system-requirements)
|
||||
- [DotSlash](./docs/install.md#dotslash)
|
||||
- [Build from source](./docs/install.md#build-from-source)
|
||||
- [**FAQ**](./docs/faq.md)
|
||||
- [**Installing & building**](./docs/install.md)
|
||||
- [**Open source fund**](./docs/open-source-fund.md)
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
|
||||
16
announcement_tip.toml
Normal file
16
announcement_tip.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
# Example announcement tips for Codex TUI.
|
||||
# Each [[announcements]] entry is evaluated in order; the last matching one is shown.
|
||||
# Dates are UTC, formatted as YYYY-MM-DD. The from_date is inclusive and the to_date is exclusive.
|
||||
# version_regex matches against the CLI version (env!("CARGO_PKG_VERSION")); omit to apply to all versions.
|
||||
# target_app specify which app should display the announcement (cli, vsce, ...).
|
||||
|
||||
[[announcements]]
|
||||
content = "Welcome to Codex! Check out the new onboarding flow."
|
||||
from_date = "2024-10-01"
|
||||
to_date = "2024-10-15"
|
||||
target_app = "cli"
|
||||
|
||||
[[announcements]]
|
||||
content = "This is a test announcement"
|
||||
version_regex = "^0\\.0\\.0$"
|
||||
to_date = "2026-01-10"
|
||||
@@ -2,6 +2,7 @@
|
||||
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
|
||||
|
||||
import argparse
|
||||
from contextlib import contextmanager
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
@@ -12,6 +13,7 @@ import zipfile
|
||||
from dataclasses import dataclass
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from typing import Iterable, Sequence
|
||||
from urllib.parse import urlparse
|
||||
from urllib.request import urlopen
|
||||
@@ -77,6 +79,45 @@ RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [
|
||||
RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS}
|
||||
DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
|
||||
|
||||
# urllib.request.urlopen() defaults to no timeout (can hang indefinitely), which is painful in CI.
|
||||
DOWNLOAD_TIMEOUT_SECS = 60
|
||||
|
||||
|
||||
def _gha_enabled() -> bool:
|
||||
# GitHub Actions supports "workflow commands" (e.g. ::group:: / ::error::) that make logs
|
||||
# much easier to scan: groups collapse noisy sections and error annotations surface the
|
||||
# failure in the UI without changing the actual exception/traceback output.
|
||||
return os.environ.get("GITHUB_ACTIONS") == "true"
|
||||
|
||||
|
||||
def _gha_escape(value: str) -> str:
|
||||
# Workflow commands require percent/newline escaping.
|
||||
return value.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
|
||||
|
||||
|
||||
def _gha_error(*, title: str, message: str) -> None:
|
||||
# Emit a GitHub Actions error annotation. This does not replace stdout/stderr logs; it just
|
||||
# adds a prominent summary line to the job UI so the root cause is easier to spot.
|
||||
if not _gha_enabled():
|
||||
return
|
||||
print(
|
||||
f"::error title={_gha_escape(title)}::{_gha_escape(message)}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _gha_group(title: str):
|
||||
# Wrap a block in a collapsible log group on GitHub Actions. Outside of GHA this is a no-op
|
||||
# so local output remains unchanged.
|
||||
if _gha_enabled():
|
||||
print(f"::group::{_gha_escape(title)}", flush=True)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if _gha_enabled():
|
||||
print("::endgroup::", flush=True)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
|
||||
@@ -131,18 +172,20 @@ def main() -> int:
|
||||
workflow_id = workflow_url.rstrip("/").split("/")[-1]
|
||||
print(f"Downloading native artifacts from workflow {workflow_id}...")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
with _gha_group(f"Download native artifacts from workflow {workflow_id}"):
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
|
||||
if "rg" in components:
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
with _gha_group("Fetch ripgrep binaries"):
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
|
||||
print(f"Installed native dependencies into {vendor_dir}")
|
||||
return 0
|
||||
@@ -203,7 +246,14 @@ def fetch_rg(
|
||||
|
||||
for future in as_completed(future_map):
|
||||
target = future_map[future]
|
||||
results[target] = future.result()
|
||||
try:
|
||||
results[target] = future.result()
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep install failed",
|
||||
message=f"target={target} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(f"Failed to install ripgrep for target {target}.") from exc
|
||||
print(f" installed ripgrep for {target}")
|
||||
|
||||
return [results[target] for target in targets]
|
||||
@@ -301,6 +351,8 @@ def _fetch_single_rg(
|
||||
url = providers[0]["url"]
|
||||
archive_format = platform_info.get("format", "zst")
|
||||
archive_member = platform_info.get("path")
|
||||
digest = platform_info.get("digest")
|
||||
expected_size = platform_info.get("size")
|
||||
|
||||
dest_dir = vendor_dir / target / "path"
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -313,10 +365,32 @@ def _fetch_single_rg(
|
||||
tmp_dir = Path(tmp_dir_str)
|
||||
archive_filename = os.path.basename(urlparse(url).path)
|
||||
download_path = tmp_dir / archive_filename
|
||||
_download_file(url, download_path)
|
||||
print(
|
||||
f" downloading ripgrep for {target} ({platform_key}) from {url}",
|
||||
flush=True,
|
||||
)
|
||||
try:
|
||||
_download_file(url, download_path)
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep download failed",
|
||||
message=f"target={target} platform={platform_key} url={url} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(
|
||||
"Failed to download ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"expected_size={expected_size!r}, digest={digest!r}, url={url}, dest={download_path})."
|
||||
) from exc
|
||||
|
||||
dest.unlink(missing_ok=True)
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
try:
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
except Exception as exc:
|
||||
raise RuntimeError(
|
||||
"Failed to extract ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"member={archive_member!r}, url={url}, archive={download_path})."
|
||||
) from exc
|
||||
|
||||
if not is_windows:
|
||||
dest.chmod(0o755)
|
||||
@@ -326,7 +400,9 @@ def _fetch_single_rg(
|
||||
|
||||
def _download_file(url: str, dest: Path) -> None:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
with urlopen(url) as response, open(dest, "wb") as out:
|
||||
dest.unlink(missing_ok=True)
|
||||
|
||||
with urlopen(url, timeout=DOWNLOAD_TIMEOUT_SECS) as response, open(dest, "wb") as out:
|
||||
shutil.copyfileobj(response, out)
|
||||
|
||||
|
||||
|
||||
372
codex-rs/Cargo.lock
generated
372
codex-rs/Cargo.lock
generated
@@ -42,7 +42,7 @@ dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"bytes",
|
||||
"bytestring",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"encoding_rs",
|
||||
"foldhash 0.1.5",
|
||||
"futures-core",
|
||||
@@ -137,7 +137,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"bytestring",
|
||||
"cfg-if",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"encoding_rs",
|
||||
"foldhash 0.1.5",
|
||||
"futures-core",
|
||||
@@ -329,12 +329,12 @@ name = "app_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"base64",
|
||||
"chrono",
|
||||
"codex-app-server-protocol",
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -360,11 +360,17 @@ dependencies = [
|
||||
"objc2-foundation",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
"wl-clipboard-rs",
|
||||
"x11rb",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.6"
|
||||
@@ -906,9 +912,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_complete"
|
||||
version = "4.5.57"
|
||||
version = "4.5.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4d9501bd3f5f09f7bbee01da9a511073ed30a80cd7a509f1214bb74eadea71ad"
|
||||
checksum = "4c0da80818b2d95eca9aa614a30783e42f62bf5fdfee24e68cfb960b071ba8d1"
|
||||
dependencies = [
|
||||
"clap",
|
||||
]
|
||||
@@ -987,7 +993,6 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"app_test_support",
|
||||
"assert_cmd",
|
||||
"base64",
|
||||
"chrono",
|
||||
"codex-app-server-protocol",
|
||||
@@ -1058,6 +1063,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"assert_matches",
|
||||
"codex-utils-cargo-bin",
|
||||
"pretty_assertions",
|
||||
"similar",
|
||||
"tempfile",
|
||||
@@ -1155,6 +1161,7 @@ dependencies = [
|
||||
"codex-tui",
|
||||
"codex-tui2",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-windows-sandbox",
|
||||
"ctor 0.5.0",
|
||||
"libc",
|
||||
@@ -1256,6 +1263,7 @@ name = "codex-core"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"assert_cmd",
|
||||
"assert_matches",
|
||||
"async-channel",
|
||||
@@ -1278,6 +1286,7 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"codex-rmcp-client",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-readiness",
|
||||
"codex-utils-string",
|
||||
@@ -1327,7 +1336,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"toml 0.9.5",
|
||||
"toml_edit",
|
||||
"toml_edit 0.24.0+spec-1.1.0",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"tracing-test",
|
||||
@@ -1353,6 +1362,7 @@ dependencies = [
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"libc",
|
||||
"mcp-types",
|
||||
@@ -1378,11 +1388,11 @@ name = "codex-exec-server"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"async-trait",
|
||||
"clap",
|
||||
"codex-core",
|
||||
"codex-execpolicy",
|
||||
"codex-utils-cargo-bin",
|
||||
"exec_server_test_support",
|
||||
"libc",
|
||||
"maplit",
|
||||
@@ -1423,7 +1433,7 @@ dependencies = [
|
||||
"allocative",
|
||||
"anyhow",
|
||||
"clap",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"env_logger",
|
||||
"log",
|
||||
"multimap",
|
||||
@@ -1444,6 +1454,7 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"pretty_assertions",
|
||||
"sentry",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
@@ -1541,7 +1552,6 @@ name = "codex-mcp-server"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-arg0",
|
||||
"codex-common",
|
||||
"codex-core",
|
||||
@@ -1600,7 +1610,6 @@ dependencies = [
|
||||
"serde_json",
|
||||
"strum_macros 0.27.2",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"tracing-opentelemetry",
|
||||
"tracing-subscriber",
|
||||
@@ -1665,8 +1674,8 @@ dependencies = [
|
||||
"axum",
|
||||
"codex-keyring-store",
|
||||
"codex-protocol",
|
||||
"codex-utils-cargo-bin",
|
||||
"dirs",
|
||||
"escargot",
|
||||
"futures",
|
||||
"keyring",
|
||||
"mcp-types",
|
||||
@@ -1693,6 +1702,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-utils-cargo-bin",
|
||||
"pretty_assertions",
|
||||
"tempfile",
|
||||
"uds_windows",
|
||||
@@ -1722,7 +1732,7 @@ dependencies = [
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"crossterm",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"diffy",
|
||||
"dirs",
|
||||
"dunce",
|
||||
@@ -1749,6 +1759,7 @@ dependencies = [
|
||||
"supports-color 3.0.2",
|
||||
"tempfile",
|
||||
"textwrap 0.16.2",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
@@ -1763,6 +1774,9 @@ dependencies = [
|
||||
"url",
|
||||
"uuid",
|
||||
"vt100",
|
||||
"which",
|
||||
"windows-sys 0.52.0",
|
||||
"winsplit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1791,7 +1805,7 @@ dependencies = [
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"crossterm",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"diffy",
|
||||
"dirs",
|
||||
"dunce",
|
||||
@@ -1806,6 +1820,7 @@ dependencies = [
|
||||
"pulldown-cmark",
|
||||
"rand 0.9.2",
|
||||
"ratatui",
|
||||
"ratatui-core",
|
||||
"ratatui-macros",
|
||||
"regex-lite",
|
||||
"reqwest",
|
||||
@@ -1827,6 +1842,7 @@ dependencies = [
|
||||
"tracing-subscriber",
|
||||
"tree-sitter-bash",
|
||||
"tree-sitter-highlight",
|
||||
"tui-scrollbar",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.2.1",
|
||||
"url",
|
||||
@@ -1850,11 +1866,19 @@ dependencies = [
|
||||
name = "codex-utils-cache"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"lru 0.16.2",
|
||||
"lru 0.16.3",
|
||||
"sha1",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-cargo-bin"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"assert_cmd",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-image"
|
||||
version = "0.0.0"
|
||||
@@ -1983,6 +2007,20 @@ dependencies = [
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "compact_str"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a"
|
||||
dependencies = [
|
||||
"castaway",
|
||||
"cfg-if",
|
||||
"itoa",
|
||||
"rustversion",
|
||||
"ryu",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concurrent-queue"
|
||||
version = "2.5.0"
|
||||
@@ -2004,6 +2042,18 @@ dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-hex"
|
||||
version = "1.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"proptest",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.6.0"
|
||||
@@ -2015,9 +2065,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.7.1"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7"
|
||||
checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
|
||||
dependencies = [
|
||||
"unicode-segmentation",
|
||||
]
|
||||
@@ -2058,6 +2108,7 @@ dependencies = [
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"notify",
|
||||
"pretty_assertions",
|
||||
"regex-lite",
|
||||
@@ -2403,11 +2454,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "2.0.1"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
|
||||
checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134"
|
||||
dependencies = [
|
||||
"derive_more-impl 2.0.1",
|
||||
"derive_more-impl 2.1.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2425,13 +2476,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "derive_more-impl"
|
||||
version = "2.0.1"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
|
||||
checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
|
||||
dependencies = [
|
||||
"convert_case 0.7.1",
|
||||
"convert_case 0.10.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustc_version",
|
||||
"syn 2.0.104",
|
||||
"unicode-xid",
|
||||
]
|
||||
@@ -2547,6 +2599,15 @@ version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
|
||||
|
||||
[[package]]
|
||||
name = "document-features"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61"
|
||||
dependencies = [
|
||||
"litrs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dotenvy"
|
||||
version = "0.15.7"
|
||||
@@ -2720,7 +2781,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2777,8 +2838,8 @@ name = "exec_server_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-utils-cargo-bin",
|
||||
"rmcp",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -2828,7 +2889,7 @@ checksum = "0ce92ff622d6dadf7349484f42c93271a0d49b7cc4d466a936405bacbe10aa78"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"rustix 1.0.8",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3713,13 +3774,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "insta"
|
||||
version = "1.44.3"
|
||||
version = "1.46.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5c943d4415edd8153251b6f197de5eb1640e56d84e8d9159bea190421c73698"
|
||||
checksum = "1b66886d14d18d420ab5052cbff544fc5d34d0b2cdd35eb5976aaa10a4a472e5"
|
||||
dependencies = [
|
||||
"console",
|
||||
"once_cell",
|
||||
"similar",
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3744,17 +3806,6 @@ dependencies = [
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipnet"
|
||||
version = "2.11.0"
|
||||
@@ -3779,7 +3830,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3883,6 +3934,16 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kasuari"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fe90c1150662e858c7d5f945089b7517b0a80d8bf7ba4b1b5ffc984e7230a5b"
|
||||
dependencies = [
|
||||
"hashbrown 0.16.0",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "keyring"
|
||||
version = "3.6.3"
|
||||
@@ -4028,6 +4089,12 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
|
||||
|
||||
[[package]]
|
||||
name = "litrs"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092"
|
||||
|
||||
[[package]]
|
||||
name = "local-waker"
|
||||
version = "0.1.4"
|
||||
@@ -4084,9 +4151,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.16.2"
|
||||
version = "0.16.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96051b46fc183dc9cd4a223960ef37b9af631b55191852a8274bfef064cda20f"
|
||||
checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593"
|
||||
dependencies = [
|
||||
"hashbrown 0.16.0",
|
||||
]
|
||||
@@ -4146,9 +4213,9 @@ name = "mcp_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-mcp-server",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"mcp-types",
|
||||
"os_info",
|
||||
@@ -4649,9 +4716,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6"
|
||||
checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
@@ -4663,9 +4730,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-appender-tracing"
|
||||
version = "0.30.1"
|
||||
version = "0.31.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e68f63eca5fad47e570e00e893094fc17be959c80c79a7d6ec1abdd5ae6ffc16"
|
||||
checksum = "ef6a1ac5ca3accf562b8c306fa8483c85f4390f768185ab775f242f7fe8fdcc2"
|
||||
dependencies = [
|
||||
"opentelemetry",
|
||||
"tracing",
|
||||
@@ -4675,9 +4742,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-http"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d"
|
||||
checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -4688,9 +4755,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b"
|
||||
checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf"
|
||||
dependencies = [
|
||||
"http 1.3.1",
|
||||
"opentelemetry",
|
||||
@@ -4708,30 +4775,32 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc"
|
||||
checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"hex",
|
||||
"const-hex",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"prost",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2"
|
||||
checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_sdk"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b"
|
||||
checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-executor",
|
||||
@@ -4739,7 +4808,6 @@ dependencies = [
|
||||
"opentelemetry",
|
||||
"percent-encoding",
|
||||
"rand 0.9.2",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -5095,7 +5163,7 @@ version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983"
|
||||
dependencies = [
|
||||
"toml_edit",
|
||||
"toml_edit 0.23.10+spec-1.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5122,10 +5190,25 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.13.5"
|
||||
name = "proptest"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5"
|
||||
checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"num-traits",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
"rand_xorshift",
|
||||
"regex-syntax 0.8.5",
|
||||
"unarray",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive",
|
||||
@@ -5133,9 +5216,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.13.5"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||
checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.14.0",
|
||||
@@ -5248,7 +5331,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"socket2 0.6.1",
|
||||
"tracing",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5335,6 +5418,15 @@ dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_xorshift"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
|
||||
dependencies = [
|
||||
"rand_core 0.9.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ratatui"
|
||||
version = "0.29.0"
|
||||
@@ -5342,7 +5434,7 @@ source = "git+https://github.com/nornagon/ratatui?branch=nornagon-v0.29.0-patch#
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cassowary",
|
||||
"compact_str",
|
||||
"compact_str 0.8.1",
|
||||
"crossterm",
|
||||
"indoc",
|
||||
"instability",
|
||||
@@ -5351,7 +5443,27 @@ dependencies = [
|
||||
"paste",
|
||||
"strum 0.26.3",
|
||||
"unicode-segmentation",
|
||||
"unicode-truncate",
|
||||
"unicode-truncate 1.1.0",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ratatui-core"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ef8dea09a92caaf73bff7adb70b76162e5937524058a7e5bff37869cbbec293"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"compact_str 0.9.0",
|
||||
"hashbrown 0.16.0",
|
||||
"indoc",
|
||||
"itertools 0.14.0",
|
||||
"kasuari",
|
||||
"lru 0.16.3",
|
||||
"strum 0.27.2",
|
||||
"thiserror 2.0.17",
|
||||
"unicode-segmentation",
|
||||
"unicode-truncate 2.0.0",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
@@ -5440,9 +5552,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-lite"
|
||||
version = "0.1.7"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30"
|
||||
checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da"
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
@@ -5598,7 +5710,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.15",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5611,7 +5723,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.9.4",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6518,6 +6630,9 @@ name = "strum"
|
||||
version = "0.27.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
|
||||
dependencies = [
|
||||
"strum_macros 0.27.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum_macros"
|
||||
@@ -6909,29 +7024,26 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.47.1"
|
||||
version = "1.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
||||
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"slab",
|
||||
"socket2 0.6.1",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.61.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.5.0"
|
||||
version = "2.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -6960,9 +7072,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-stream"
|
||||
version = "0.1.17"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
|
||||
checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"pin-project-lite",
|
||||
@@ -7024,18 +7136,30 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.7.3"
|
||||
version = "0.7.5+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533"
|
||||
checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.23.7"
|
||||
version = "0.23.10+spec-1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d"
|
||||
checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269"
|
||||
dependencies = [
|
||||
"indexmap 2.12.0",
|
||||
"toml_datetime",
|
||||
"toml_parser",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.24.0+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c740b185920170a6d9191122cafef7010bd6270a3824594bff6784c04d7f09e"
|
||||
dependencies = [
|
||||
"indexmap 2.12.0",
|
||||
"toml_datetime",
|
||||
@@ -7046,30 +7170,28 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_parser"
|
||||
version = "1.0.4"
|
||||
version = "1.0.6+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e"
|
||||
checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44"
|
||||
dependencies = [
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_writer"
|
||||
version = "1.0.4"
|
||||
version = "1.0.6+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2"
|
||||
checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607"
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.13.1"
|
||||
version = "0.14.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9"
|
||||
checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64",
|
||||
"bytes",
|
||||
"h2",
|
||||
"http 1.3.1",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
@@ -7078,9 +7200,8 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prost",
|
||||
"rustls-native-certs",
|
||||
"socket2 0.5.10",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"tokio-stream",
|
||||
@@ -7090,6 +7211,17 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic-prost"
|
||||
version = "0.14.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost",
|
||||
"tonic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.5.2"
|
||||
@@ -7207,15 +7339,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-opentelemetry"
|
||||
version = "0.31.0"
|
||||
version = "0.32.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c"
|
||||
checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"rustversion",
|
||||
"smallvec",
|
||||
"thiserror 2.0.17",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
@@ -7225,9 +7358,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.20"
|
||||
version = "0.3.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
|
||||
checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e"
|
||||
dependencies = [
|
||||
"matchers",
|
||||
"nu-ansi-term",
|
||||
@@ -7346,6 +7479,16 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tui-scrollbar"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c42613099915b2e30e9f144670666e858e2538366f77742e1cf1c2f230efcacd"
|
||||
dependencies = [
|
||||
"document-features",
|
||||
"ratatui-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.18.0"
|
||||
@@ -7372,6 +7515,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unarray"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "2.8.1"
|
||||
@@ -7407,6 +7556,17 @@ dependencies = [
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-truncate"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fbf03860ff438702f3910ca5f28f8dac63c1c11e7efb5012b8b175493606330"
|
||||
dependencies = [
|
||||
"itertools 0.13.0",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
@@ -7851,7 +8011,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8372,6 +8532,12 @@ version = "0.0.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
|
||||
|
||||
[[package]]
|
||||
name = "winsplit"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ab703352da6a72f35c39a533526393725640575bb211f61987a2748323ad956"
|
||||
|
||||
[[package]]
|
||||
name = "wiremock"
|
||||
version = "0.6.5"
|
||||
|
||||
@@ -36,6 +36,7 @@ members = [
|
||||
"tui",
|
||||
"tui2",
|
||||
"utils/absolute-path",
|
||||
"utils/cargo-bin",
|
||||
"utils/git",
|
||||
"utils/cache",
|
||||
"utils/image",
|
||||
@@ -93,6 +94,7 @@ codex-tui = { path = "tui" }
|
||||
codex-tui2 = { path = "tui2" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
codex-utils-cache = { path = "utils/cache" }
|
||||
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
|
||||
codex-utils-image = { path = "utils/image" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
codex-utils-pty = { path = "utils/pty" }
|
||||
@@ -143,14 +145,14 @@ ignore = "0.4.23"
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
include_dir = "0.7.4"
|
||||
indexmap = "2.12.0"
|
||||
insta = "1.44.3"
|
||||
insta = "1.46.0"
|
||||
itertools = "0.14.0"
|
||||
keyring = { version = "3.6", default-features = false }
|
||||
landlock = "0.4.4"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.177"
|
||||
log = "0.4"
|
||||
lru = "0.16.2"
|
||||
lru = "0.16.3"
|
||||
maplit = "1.0.2"
|
||||
mime_guess = "2.0.5"
|
||||
multimap = "0.10.0"
|
||||
@@ -158,12 +160,12 @@ notify = "8.2.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
once_cell = "1.20.2"
|
||||
openssl-sys = "*"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-appender-tracing = "0.30.0"
|
||||
opentelemetry-otlp = "0.30.0"
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
opentelemetry_sdk = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
opentelemetry = "0.31.0"
|
||||
opentelemetry-appender-tracing = "0.31.0"
|
||||
opentelemetry-otlp = "0.31.0"
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
opentelemetry_sdk = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
os_info = "3.12.0"
|
||||
owo-colors = "4.2.0"
|
||||
path-absolutize = "3.1.1"
|
||||
@@ -174,9 +176,10 @@ pretty_assertions = "1.4.1"
|
||||
pulldown-cmark = "0.10"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-core = "0.1.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
regex = "1.12.2"
|
||||
regex-lite = "0.1.7"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.12.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
@@ -204,20 +207,20 @@ thiserror = "2.0.17"
|
||||
time = "0.3"
|
||||
tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
tokio-stream = "0.1.17"
|
||||
tokio-stream = "0.1.18"
|
||||
tokio-test = "0.4"
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.5"
|
||||
tonic = "0.13.1"
|
||||
toml_edit = "0.24.0"
|
||||
tracing = "0.1.43"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-subscriber = "0.3.22"
|
||||
tracing-test = "0.2.5"
|
||||
tree-sitter = "0.25.10"
|
||||
tree-sitter-bash = "0.25"
|
||||
tree-sitter-highlight = "0.25.10"
|
||||
ts-rs = "11"
|
||||
tui-scrollbar = "0.2.1"
|
||||
uds_windows = "1.1.0"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.2"
|
||||
|
||||
@@ -15,8 +15,8 @@ You can also install via Homebrew (`brew install --cask codex`) or download a pl
|
||||
|
||||
## Documentation quickstart
|
||||
|
||||
- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management.
|
||||
- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md).
|
||||
- First run with Codex? Start with [`docs/getting-started.md`](../docs/getting-started.md) (links to the walkthrough for prompts, keyboard shortcuts, and session management).
|
||||
- Want deeper control? See [`docs/config.md`](../docs/config.md) and [`docs/install.md`](../docs/install.md).
|
||||
|
||||
## What's new in the Rust CLI
|
||||
|
||||
@@ -30,7 +30,7 @@ Codex supports a rich set of configuration options. Note that the Rust CLI uses
|
||||
|
||||
#### MCP client
|
||||
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#connecting-to-mcp-servers) for details.
|
||||
|
||||
#### MCP server (experimental)
|
||||
|
||||
|
||||
@@ -113,6 +113,10 @@ client_request_definitions! {
|
||||
params: v2::ThreadArchiveParams,
|
||||
response: v2::ThreadArchiveResponse,
|
||||
},
|
||||
ThreadRollback => "thread/rollback" {
|
||||
params: v2::ThreadRollbackParams,
|
||||
response: v2::ThreadRollbackResponse,
|
||||
},
|
||||
ThreadList => "thread/list" {
|
||||
params: v2::ThreadListParams,
|
||||
response: v2::ThreadListResponse,
|
||||
@@ -565,7 +569,7 @@ client_notification_definitions! {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
@@ -614,7 +618,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn conversation_id_serializes_as_plain_string() -> Result<()> {
|
||||
let id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
|
||||
let id = ThreadId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
|
||||
|
||||
assert_eq!(
|
||||
json!("67e55044-10b1-426f-9247-bb680e5fe0c8"),
|
||||
@@ -625,11 +629,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn conversation_id_deserializes_from_plain_string() -> Result<()> {
|
||||
let id: ConversationId =
|
||||
serde_json::from_value(json!("67e55044-10b1-426f-9247-bb680e5fe0c8"))?;
|
||||
let id: ThreadId = serde_json::from_value(json!("67e55044-10b1-426f-9247-bb680e5fe0c8"))?;
|
||||
|
||||
assert_eq!(
|
||||
ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?,
|
||||
ThreadId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?,
|
||||
id,
|
||||
);
|
||||
Ok(())
|
||||
@@ -650,7 +653,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn serialize_server_request() -> Result<()> {
|
||||
let conversation_id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
|
||||
let conversation_id = ThreadId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
|
||||
let params = v1::ExecCommandApprovalParams {
|
||||
conversation_id,
|
||||
call_id: "call-42".to_string(),
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::protocol::v2::UserInput;
|
||||
use codex_protocol::protocol::AgentReasoningEvent;
|
||||
use codex_protocol::protocol::AgentReasoningRawContentEvent;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::ThreadRolledBackEvent;
|
||||
use codex_protocol::protocol::TurnAbortedEvent;
|
||||
use codex_protocol::protocol::UserMessageEvent;
|
||||
|
||||
@@ -57,6 +58,7 @@ impl ThreadHistoryBuilder {
|
||||
EventMsg::TokenCount(_) => {}
|
||||
EventMsg::EnteredReviewMode(_) => {}
|
||||
EventMsg::ExitedReviewMode(_) => {}
|
||||
EventMsg::ThreadRolledBack(payload) => self.handle_thread_rollback(payload),
|
||||
EventMsg::UndoCompleted(_) => {}
|
||||
EventMsg::TurnAborted(payload) => self.handle_turn_aborted(payload),
|
||||
_ => {}
|
||||
@@ -130,6 +132,23 @@ impl ThreadHistoryBuilder {
|
||||
turn.status = TurnStatus::Interrupted;
|
||||
}
|
||||
|
||||
fn handle_thread_rollback(&mut self, payload: &ThreadRolledBackEvent) {
|
||||
self.finish_current_turn();
|
||||
|
||||
let n = usize::try_from(payload.num_turns).unwrap_or(usize::MAX);
|
||||
if n >= self.turns.len() {
|
||||
self.turns.clear();
|
||||
} else {
|
||||
self.turns.truncate(self.turns.len().saturating_sub(n));
|
||||
}
|
||||
|
||||
// Re-number subsequent synthetic ids so the pruned history is consistent.
|
||||
self.next_turn_index =
|
||||
i64::try_from(self.turns.len().saturating_add(1)).unwrap_or(i64::MAX);
|
||||
let item_count: usize = self.turns.iter().map(|t| t.items.len()).sum();
|
||||
self.next_item_index = i64::try_from(item_count.saturating_add(1)).unwrap_or(i64::MAX);
|
||||
}
|
||||
|
||||
fn finish_current_turn(&mut self) {
|
||||
if let Some(turn) = self.current_turn.take() {
|
||||
if turn.items.is_empty() {
|
||||
@@ -213,6 +232,7 @@ mod tests {
|
||||
use codex_protocol::protocol::AgentMessageEvent;
|
||||
use codex_protocol::protocol::AgentReasoningEvent;
|
||||
use codex_protocol::protocol::AgentReasoningRawContentEvent;
|
||||
use codex_protocol::protocol::ThreadRolledBackEvent;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_protocol::protocol::TurnAbortedEvent;
|
||||
use codex_protocol::protocol::UserMessageEvent;
|
||||
@@ -410,4 +430,95 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drops_last_turns_on_thread_rollback() {
|
||||
let events = vec![
|
||||
EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "First".into(),
|
||||
images: None,
|
||||
}),
|
||||
EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "A1".into(),
|
||||
}),
|
||||
EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "Second".into(),
|
||||
images: None,
|
||||
}),
|
||||
EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "A2".into(),
|
||||
}),
|
||||
EventMsg::ThreadRolledBack(ThreadRolledBackEvent { num_turns: 1 }),
|
||||
EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "Third".into(),
|
||||
images: None,
|
||||
}),
|
||||
EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "A3".into(),
|
||||
}),
|
||||
];
|
||||
|
||||
let turns = build_turns_from_event_msgs(&events);
|
||||
let expected = vec![
|
||||
Turn {
|
||||
id: "turn-1".into(),
|
||||
status: TurnStatus::Completed,
|
||||
error: None,
|
||||
items: vec![
|
||||
ThreadItem::UserMessage {
|
||||
id: "item-1".into(),
|
||||
content: vec![UserInput::Text {
|
||||
text: "First".into(),
|
||||
}],
|
||||
},
|
||||
ThreadItem::AgentMessage {
|
||||
id: "item-2".into(),
|
||||
text: "A1".into(),
|
||||
},
|
||||
],
|
||||
},
|
||||
Turn {
|
||||
id: "turn-2".into(),
|
||||
status: TurnStatus::Completed,
|
||||
error: None,
|
||||
items: vec![
|
||||
ThreadItem::UserMessage {
|
||||
id: "item-3".into(),
|
||||
content: vec![UserInput::Text {
|
||||
text: "Third".into(),
|
||||
}],
|
||||
},
|
||||
ThreadItem::AgentMessage {
|
||||
id: "item-4".into(),
|
||||
text: "A3".into(),
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
assert_eq!(turns, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn thread_rollback_clears_all_turns_when_num_turns_exceeds_history() {
|
||||
let events = vec![
|
||||
EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "One".into(),
|
||||
images: None,
|
||||
}),
|
||||
EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "A1".into(),
|
||||
}),
|
||||
EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "Two".into(),
|
||||
images: None,
|
||||
}),
|
||||
EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "A2".into(),
|
||||
}),
|
||||
EventMsg::ThreadRolledBack(ThreadRolledBackEvent { num_turns: 99 }),
|
||||
];
|
||||
|
||||
let turns = build_turns_from_event_msgs(&events);
|
||||
assert_eq!(turns, Vec::<Turn>::new());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
@@ -68,7 +68,7 @@ pub struct NewConversationParams {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NewConversationResponse {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
pub model: String,
|
||||
pub reasoning_effort: Option<ReasoningEffort>,
|
||||
pub rollout_path: PathBuf,
|
||||
@@ -77,7 +77,7 @@ pub struct NewConversationResponse {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResumeConversationResponse {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
pub model: String,
|
||||
pub initial_messages: Option<Vec<EventMsg>>,
|
||||
pub rollout_path: PathBuf,
|
||||
@@ -90,9 +90,9 @@ pub enum GetConversationSummaryParams {
|
||||
#[serde(rename = "rolloutPath")]
|
||||
rollout_path: PathBuf,
|
||||
},
|
||||
ConversationId {
|
||||
ThreadId {
|
||||
#[serde(rename = "conversationId")]
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ pub struct ListConversationsParams {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ConversationSummary {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
pub path: PathBuf,
|
||||
pub preview: String,
|
||||
pub timestamp: Option<String>,
|
||||
@@ -143,7 +143,7 @@ pub struct ListConversationsResponse {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResumeConversationParams {
|
||||
pub path: Option<PathBuf>,
|
||||
pub conversation_id: Option<ConversationId>,
|
||||
pub conversation_id: Option<ThreadId>,
|
||||
pub history: Option<Vec<ResponseItem>>,
|
||||
pub overrides: Option<NewConversationParams>,
|
||||
}
|
||||
@@ -158,7 +158,7 @@ pub struct AddConversationSubscriptionResponse {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ArchiveConversationParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
pub rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ pub struct GitDiffToRemoteResponse {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApplyPatchApprovalParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
/// Use to correlate this with [codex_core::protocol::PatchApplyBeginEvent]
|
||||
/// and [codex_core::protocol::PatchApplyEndEvent].
|
||||
pub call_id: String,
|
||||
@@ -219,7 +219,7 @@ pub struct ApplyPatchApprovalResponse {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExecCommandApprovalParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
/// Use to correlate this with [codex_core::protocol::ExecCommandBeginEvent]
|
||||
/// and [codex_core::protocol::ExecCommandEndEvent].
|
||||
pub call_id: String,
|
||||
@@ -369,14 +369,14 @@ pub struct SandboxSettings {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendUserMessageParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
pub items: Vec<InputItem>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SendUserTurnParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
pub items: Vec<InputItem>,
|
||||
pub cwd: PathBuf,
|
||||
pub approval_policy: AskForApproval,
|
||||
@@ -384,6 +384,8 @@ pub struct SendUserTurnParams {
|
||||
pub model: String,
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
pub summary: ReasoningSummary,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -393,7 +395,7 @@ pub struct SendUserTurnResponse {}
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InterruptConversationParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
@@ -409,7 +411,7 @@ pub struct SendUserMessageResponse {}
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AddConversationListenerParams {
|
||||
pub conversation_id: ConversationId,
|
||||
pub conversation_id: ThreadId,
|
||||
#[serde(default)]
|
||||
pub experimental_raw_events: bool,
|
||||
}
|
||||
@@ -443,7 +445,7 @@ pub struct LoginChatGptCompleteNotification {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SessionConfiguredNotification {
|
||||
pub session_id: ConversationId,
|
||||
pub session_id: ThreadId,
|
||||
pub model: String,
|
||||
pub reasoning_effort: Option<ReasoningEffort>,
|
||||
pub history_log_id: u64,
|
||||
|
||||
@@ -89,6 +89,7 @@ pub enum CodexErrorInfo {
|
||||
InternalServerError,
|
||||
Unauthorized,
|
||||
BadRequest,
|
||||
ThreadRollbackFailed,
|
||||
SandboxError,
|
||||
/// The response SSE stream disconnected in the middle of a turn before completion.
|
||||
ResponseStreamDisconnected {
|
||||
@@ -119,6 +120,7 @@ impl From<CoreCodexErrorInfo> for CodexErrorInfo {
|
||||
CoreCodexErrorInfo::InternalServerError => CodexErrorInfo::InternalServerError,
|
||||
CoreCodexErrorInfo::Unauthorized => CodexErrorInfo::Unauthorized,
|
||||
CoreCodexErrorInfo::BadRequest => CodexErrorInfo::BadRequest,
|
||||
CoreCodexErrorInfo::ThreadRollbackFailed => CodexErrorInfo::ThreadRollbackFailed,
|
||||
CoreCodexErrorInfo::SandboxError => CodexErrorInfo::SandboxError,
|
||||
CoreCodexErrorInfo::ResponseStreamDisconnected { http_status_code } => {
|
||||
CodexErrorInfo::ResponseStreamDisconnected { http_status_code }
|
||||
@@ -227,6 +229,8 @@ pub enum ConfigLayerSource {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
System {
|
||||
/// This is the path to the system config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
@@ -237,9 +241,19 @@ pub enum ConfigLayerSource {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
User {
|
||||
/// This is the path to the user's config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Path to a .codex/ folder within a project. There could be multiple of
|
||||
/// these between `cwd` and the project/repo root.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
Project {
|
||||
dot_codex_folder: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Session-layer overrides supplied via `-c`/`--config`.
|
||||
SessionFlags,
|
||||
|
||||
@@ -247,6 +261,8 @@ pub enum ConfigLayerSource {
|
||||
/// as the last layer on top of everything else. This scheme did not quite
|
||||
/// work out as intended, but we keep this variant as a "best effort" while
|
||||
/// we phase out `managed_config.toml` in favor of `requirements.toml`.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
LegacyManagedConfigTomlFromFile {
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
@@ -262,6 +278,7 @@ impl ConfigLayerSource {
|
||||
ConfigLayerSource::Mdm { .. } => 0,
|
||||
ConfigLayerSource::System { .. } => 10,
|
||||
ConfigLayerSource::User { .. } => 20,
|
||||
ConfigLayerSource::Project { .. } => 25,
|
||||
ConfigLayerSource::SessionFlags => 30,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => 40,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => 50,
|
||||
@@ -315,6 +332,15 @@ pub struct ProfileV2 {
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct AnalyticsConfig {
|
||||
pub enabled: Option<bool>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -339,6 +365,7 @@ pub struct Config {
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub analytics: Option<AnalyticsConfig>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
@@ -460,14 +487,33 @@ pub struct ConfigEdit {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ApprovalDecision {
|
||||
pub enum CommandExecutionApprovalDecision {
|
||||
/// User approved the command.
|
||||
Accept,
|
||||
/// Approve and remember the approval for the session.
|
||||
/// User approved the command and future identical commands should run without prompting.
|
||||
AcceptForSession,
|
||||
/// User approved the command, and wants to apply the proposed execpolicy amendment so future
|
||||
/// matching commands can run without prompting.
|
||||
AcceptWithExecpolicyAmendment {
|
||||
execpolicy_amendment: ExecPolicyAmendment,
|
||||
},
|
||||
/// User denied the command. The agent will continue the turn.
|
||||
Decline,
|
||||
/// User denied the command. The turn will also be immediately interrupted.
|
||||
Cancel,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum FileChangeApprovalDecision {
|
||||
/// User approved the file changes.
|
||||
Accept,
|
||||
/// User approved the file changes and future changes to the same files should run without prompting.
|
||||
AcceptForSession,
|
||||
/// User denied the file changes. The agent will continue the turn.
|
||||
Decline,
|
||||
/// User denied the file changes. The turn will also be immediately interrupted.
|
||||
Cancel,
|
||||
}
|
||||
|
||||
@@ -1030,6 +1076,30 @@ pub struct ThreadArchiveParams {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ThreadArchiveResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ThreadRollbackParams {
|
||||
pub thread_id: String,
|
||||
/// The number of turns to drop from the end of the thread. Must be >= 1.
|
||||
///
|
||||
/// This only modifies the thread's history and does not revert local file changes
|
||||
/// that have been made by the agent. Clients are responsible for reverting these changes.
|
||||
pub num_turns: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ThreadRollbackResponse {
|
||||
/// The updated thread after applying the rollback, with `turns` populated.
|
||||
///
|
||||
/// The ThreadItems stored in each Turn are lossy since we explicitly do not
|
||||
/// persist all agent interactions, such as command executions. This is the same
|
||||
/// behavior as `thread/resume`.
|
||||
pub thread: Thread,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -1168,7 +1238,7 @@ pub struct Thread {
|
||||
pub source: SessionSource,
|
||||
/// Optional Git metadata captured when the thread was created.
|
||||
pub git_info: Option<GitInfo>,
|
||||
/// Only populated on a `thread/resume` response.
|
||||
/// Only populated on `thread/resume` and `thread/rollback` responses.
|
||||
/// For all other responses and notifications returning a Thread,
|
||||
/// the turns field will be an empty list.
|
||||
pub turns: Vec<Turn>,
|
||||
@@ -1196,6 +1266,7 @@ pub struct ThreadTokenUsageUpdatedNotification {
|
||||
pub struct ThreadTokenUsage {
|
||||
pub total: TokenUsageBreakdown,
|
||||
pub last: TokenUsageBreakdown,
|
||||
// TODO(aibrahim): make this not optional
|
||||
#[ts(type = "number | null")]
|
||||
pub model_context_window: Option<i64>,
|
||||
}
|
||||
@@ -1259,6 +1330,8 @@ pub struct Turn {
|
||||
pub struct TurnError {
|
||||
pub message: String,
|
||||
pub codex_error_info: Option<CodexErrorInfo>,
|
||||
#[serde(default)]
|
||||
pub additional_details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1302,6 +1375,8 @@ pub struct TurnStartParams {
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
/// Override the reasoning summary for this turn and subsequent turns.
|
||||
pub summary: Option<ReasoningSummary>,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1829,7 +1904,7 @@ pub struct CommandExecutionRequestApprovalParams {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecutionRequestApprovalResponse {
|
||||
pub decision: ApprovalDecision,
|
||||
pub decision: CommandExecutionApprovalDecision,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1849,7 +1924,7 @@ pub struct FileChangeRequestApprovalParams {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct FileChangeRequestApprovalResponse {
|
||||
pub decision: ApprovalDecision,
|
||||
pub decision: FileChangeApprovalDecision,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
|
||||
@@ -13,16 +13,18 @@ use std::time::Duration;
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::bail;
|
||||
use clap::ArgAction;
|
||||
use clap::Parser;
|
||||
use clap::Subcommand;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::AddConversationSubscriptionResponse;
|
||||
use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::CommandExecutionApprovalDecision;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::FileChangeApprovalDecision;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
use codex_app_server_protocol::GetAccountRateLimitsResponse;
|
||||
@@ -35,6 +37,8 @@ use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
||||
use codex_app_server_protocol::LoginChatGptResponse;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
use codex_app_server_protocol::NewConversationResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
@@ -49,7 +53,7 @@ use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::Event;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use serde::Serialize;
|
||||
@@ -65,6 +69,19 @@ struct Cli {
|
||||
#[arg(long, env = "CODEX_BIN", default_value = "codex")]
|
||||
codex_bin: String,
|
||||
|
||||
/// Forwarded to the `codex` CLI as `--config key=value`. Repeatable.
|
||||
///
|
||||
/// Example:
|
||||
/// `--config 'model_providers.mock.base_url="http://localhost:4010/v2"'`
|
||||
#[arg(
|
||||
short = 'c',
|
||||
long = "config",
|
||||
value_name = "key=value",
|
||||
action = ArgAction::Append,
|
||||
global = true
|
||||
)]
|
||||
config_overrides: Vec<String>,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: CliCommand,
|
||||
}
|
||||
@@ -113,37 +130,54 @@ enum CliCommand {
|
||||
TestLogin,
|
||||
/// Fetch the current account rate limits from the Codex app-server.
|
||||
GetAccountRateLimits,
|
||||
/// List the available models from the Codex app-server.
|
||||
#[command(name = "model-list")]
|
||||
ModelList,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let Cli { codex_bin, command } = Cli::parse();
|
||||
let Cli {
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
command,
|
||||
} = Cli::parse();
|
||||
|
||||
match command {
|
||||
CliCommand::SendMessage { user_message } => send_message(codex_bin, user_message),
|
||||
CliCommand::SendMessageV2 { user_message } => send_message_v2(codex_bin, user_message),
|
||||
CliCommand::SendMessage { user_message } => {
|
||||
send_message(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::SendMessageV2 { user_message } => {
|
||||
send_message_v2(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::TriggerCmdApproval { user_message } => {
|
||||
trigger_cmd_approval(codex_bin, user_message)
|
||||
trigger_cmd_approval(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::TriggerPatchApproval { user_message } => {
|
||||
trigger_patch_approval(codex_bin, user_message)
|
||||
trigger_patch_approval(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(codex_bin),
|
||||
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(&codex_bin, &config_overrides),
|
||||
CliCommand::SendFollowUpV2 {
|
||||
first_message,
|
||||
follow_up_message,
|
||||
} => send_follow_up_v2(codex_bin, first_message, follow_up_message),
|
||||
CliCommand::TestLogin => test_login(codex_bin),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(codex_bin),
|
||||
} => send_follow_up_v2(
|
||||
&codex_bin,
|
||||
&config_overrides,
|
||||
first_message,
|
||||
follow_up_message,
|
||||
),
|
||||
CliCommand::TestLogin => test_login(&codex_bin, &config_overrides),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(&codex_bin, &config_overrides),
|
||||
CliCommand::ModelList => model_list(&codex_bin, &config_overrides),
|
||||
}
|
||||
}
|
||||
|
||||
fn send_message(codex_bin: String, user_message: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
fn send_message(codex_bin: &str, config_overrides: &[String], user_message: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
|
||||
let conversation = client.new_conversation()?;
|
||||
let conversation = client.start_thread()?;
|
||||
println!("< newConversation response: {conversation:?}");
|
||||
|
||||
let subscription = client.add_conversation_listener(&conversation.conversation_id)?;
|
||||
@@ -154,51 +188,66 @@ fn send_message(codex_bin: String, user_message: String) -> Result<()> {
|
||||
|
||||
client.stream_conversation(&conversation.conversation_id)?;
|
||||
|
||||
client.remove_conversation_listener(subscription.subscription_id)?;
|
||||
client.remove_thread_listener(subscription.subscription_id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send_message_v2(codex_bin: String, user_message: String) -> Result<()> {
|
||||
send_message_v2_with_policies(codex_bin, user_message, None, None)
|
||||
fn send_message_v2(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
) -> Result<()> {
|
||||
send_message_v2_with_policies(codex_bin, config_overrides, user_message, None, None)
|
||||
}
|
||||
|
||||
fn trigger_cmd_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
|
||||
fn trigger_cmd_approval(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
let default_prompt =
|
||||
"Run `touch /tmp/should-trigger-approval` so I can confirm the file exists.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
)
|
||||
}
|
||||
|
||||
fn trigger_patch_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
|
||||
fn trigger_patch_approval(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
let default_prompt =
|
||||
"Create a file named APPROVAL_DEMO.txt containing a short hello message using apply_patch.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
)
|
||||
}
|
||||
|
||||
fn no_trigger_cmd_approval(codex_bin: String) -> Result<()> {
|
||||
fn no_trigger_cmd_approval(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let prompt = "Run `touch should_not_trigger_approval.txt`";
|
||||
send_message_v2_with_policies(codex_bin, prompt.to_string(), None, None)
|
||||
send_message_v2_with_policies(codex_bin, config_overrides, prompt.to_string(), None, None)
|
||||
}
|
||||
|
||||
fn send_message_v2_with_policies(
|
||||
codex_bin: String,
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -222,11 +271,12 @@ fn send_message_v2_with_policies(
|
||||
}
|
||||
|
||||
fn send_follow_up_v2(
|
||||
codex_bin: String,
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
first_message: String,
|
||||
follow_up_message: String,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -259,8 +309,8 @@ fn send_follow_up_v2(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_login(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
fn test_login(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -289,8 +339,8 @@ fn test_login(codex_bin: String) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_account_rate_limits(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
fn get_account_rate_limits(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -301,6 +351,18 @@ fn get_account_rate_limits(codex_bin: String) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn model_list(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
|
||||
let response = client.model_list(ModelListParams::default())?;
|
||||
println!("< model/list response: {response:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct CodexClient {
|
||||
child: Child,
|
||||
stdin: Option<ChildStdin>,
|
||||
@@ -309,8 +371,12 @@ struct CodexClient {
|
||||
}
|
||||
|
||||
impl CodexClient {
|
||||
fn spawn(codex_bin: String) -> Result<Self> {
|
||||
let mut codex_app_server = Command::new(&codex_bin)
|
||||
fn spawn(codex_bin: &str, config_overrides: &[String]) -> Result<Self> {
|
||||
let mut cmd = Command::new(codex_bin);
|
||||
for override_kv in config_overrides {
|
||||
cmd.arg("--config").arg(override_kv);
|
||||
}
|
||||
let mut codex_app_server = cmd
|
||||
.arg("app-server")
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
@@ -351,7 +417,7 @@ impl CodexClient {
|
||||
self.send_request(request, request_id, "initialize")
|
||||
}
|
||||
|
||||
fn new_conversation(&mut self) -> Result<NewConversationResponse> {
|
||||
fn start_thread(&mut self) -> Result<NewConversationResponse> {
|
||||
let request_id = self.request_id();
|
||||
let request = ClientRequest::NewConversation {
|
||||
request_id: request_id.clone(),
|
||||
@@ -363,7 +429,7 @@ impl CodexClient {
|
||||
|
||||
fn add_conversation_listener(
|
||||
&mut self,
|
||||
conversation_id: &ConversationId,
|
||||
conversation_id: &ThreadId,
|
||||
) -> Result<AddConversationSubscriptionResponse> {
|
||||
let request_id = self.request_id();
|
||||
let request = ClientRequest::AddConversationListener {
|
||||
@@ -377,7 +443,7 @@ impl CodexClient {
|
||||
self.send_request(request, request_id, "addConversationListener")
|
||||
}
|
||||
|
||||
fn remove_conversation_listener(&mut self, subscription_id: Uuid) -> Result<()> {
|
||||
fn remove_thread_listener(&mut self, subscription_id: Uuid) -> Result<()> {
|
||||
let request_id = self.request_id();
|
||||
let request = ClientRequest::RemoveConversationListener {
|
||||
request_id: request_id.clone(),
|
||||
@@ -395,7 +461,7 @@ impl CodexClient {
|
||||
|
||||
fn send_user_message(
|
||||
&mut self,
|
||||
conversation_id: &ConversationId,
|
||||
conversation_id: &ThreadId,
|
||||
message: &str,
|
||||
) -> Result<SendUserMessageResponse> {
|
||||
let request_id = self.request_id();
|
||||
@@ -452,7 +518,17 @@ impl CodexClient {
|
||||
self.send_request(request, request_id, "account/rateLimits/read")
|
||||
}
|
||||
|
||||
fn stream_conversation(&mut self, conversation_id: &ConversationId) -> Result<()> {
|
||||
fn model_list(&mut self, params: ModelListParams) -> Result<ModelListResponse> {
|
||||
let request_id = self.request_id();
|
||||
let request = ClientRequest::ModelList {
|
||||
request_id: request_id.clone(),
|
||||
params,
|
||||
};
|
||||
|
||||
self.send_request(request, request_id, "model/list")
|
||||
}
|
||||
|
||||
fn stream_conversation(&mut self, conversation_id: &ThreadId) -> Result<()> {
|
||||
loop {
|
||||
let notification = self.next_notification()?;
|
||||
|
||||
@@ -589,7 +665,7 @@ impl CodexClient {
|
||||
fn extract_event(
|
||||
&self,
|
||||
notification: JSONRPCNotification,
|
||||
conversation_id: &ConversationId,
|
||||
conversation_id: &ThreadId,
|
||||
) -> Result<Option<Event>> {
|
||||
let params = notification
|
||||
.params
|
||||
@@ -603,7 +679,7 @@ impl CodexClient {
|
||||
let conversation_value = map
|
||||
.remove("conversationId")
|
||||
.context("event missing conversationId")?;
|
||||
let notification_conversation: ConversationId = serde_json::from_value(conversation_value)
|
||||
let notification_conversation: ThreadId = serde_json::from_value(conversation_value)
|
||||
.context("conversationId was not a valid UUID")?;
|
||||
|
||||
if ¬ification_conversation != conversation_id {
|
||||
@@ -770,7 +846,7 @@ impl CodexClient {
|
||||
}
|
||||
|
||||
let response = CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Accept,
|
||||
decision: CommandExecutionApprovalDecision::Accept,
|
||||
};
|
||||
self.send_server_request_response(request_id, &response)?;
|
||||
println!("< approved commandExecution request for item {item_id}");
|
||||
@@ -801,7 +877,7 @@ impl CodexClient {
|
||||
}
|
||||
|
||||
let response = FileChangeRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Accept,
|
||||
decision: FileChangeApprovalDecision::Accept,
|
||||
};
|
||||
self.send_server_request_response(request_id, &response)?;
|
||||
println!("< approved fileChange request for item {item_id}");
|
||||
|
||||
@@ -48,7 +48,6 @@ uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
app_test_support = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
- [Initialization](#initialization)
|
||||
- [API Overview](#api-overview)
|
||||
- [Events](#events)
|
||||
- [Approvals](#approvals)
|
||||
- [Skills](#skills)
|
||||
- [Auth endpoints](#auth-endpoints)
|
||||
|
||||
## Protocol
|
||||
@@ -72,6 +74,7 @@ Example (from OpenAI's official VSCode extension):
|
||||
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
|
||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
|
||||
- `thread/archive` — move a thread’s rollout file into the archived directory; returns `{}` on success.
|
||||
- `thread/rollback` — drop the last N turns from the agent’s in-memory context and persist a rollback marker in the rollout so future resumes see the pruned history; returns the updated `thread` (with `turns` populated) on success.
|
||||
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
|
||||
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
|
||||
- `review/start` — kick off Codex’s automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
|
||||
@@ -162,7 +165,7 @@ Turns attach user input (text or images) to a thread and trigger Codex generatio
|
||||
- `{"type":"image","url":"https://…png"}`
|
||||
- `{"type":"localImage","path":"/tmp/screenshot.png"}`
|
||||
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread.
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread. `outputSchema` applies only to the current turn.
|
||||
|
||||
```json
|
||||
{ "method": "turn/start", "id": 30, "params": {
|
||||
@@ -178,7 +181,14 @@ You can optionally specify config overrides on the new turn. If specified, these
|
||||
},
|
||||
"model": "gpt-5.1-codex",
|
||||
"effort": "medium",
|
||||
"summary": "concise"
|
||||
"summary": "concise",
|
||||
// Optional JSON Schema to constrain the final assistant message for this turn.
|
||||
"outputSchema": {
|
||||
"type": "object",
|
||||
"properties": { "answer": { "type": "string" } },
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
} }
|
||||
{ "id": 30, "result": { "turn": {
|
||||
"id": "turn_456",
|
||||
@@ -188,6 +198,25 @@ You can optionally specify config overrides on the new turn. If specified, these
|
||||
} } }
|
||||
```
|
||||
|
||||
### Example: Start a turn (invoke a skill)
|
||||
|
||||
Invoke a skill by sending a text input that begins with `$<skill-name>`.
|
||||
|
||||
```json
|
||||
{ "method": "turn/start", "id": 33, "params": {
|
||||
"threadId": "thr_123",
|
||||
"input": [
|
||||
{ "type": "text", "text": "$skill-creator Add a new skill for triaging flaky CI and include step-by-step usage." }
|
||||
]
|
||||
} }
|
||||
{ "id": 33, "result": { "turn": {
|
||||
"id": "turn_457",
|
||||
"status": "inProgress",
|
||||
"items": [],
|
||||
"error": null
|
||||
} } }
|
||||
```
|
||||
|
||||
### Example: Interrupt an active turn
|
||||
|
||||
You can cancel a running Turn with `turn/interrupt`.
|
||||
@@ -302,7 +331,7 @@ Event notifications are the server-initiated event stream for thread lifecycles,
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo?, additionalDetails? } }`.
|
||||
- `turn/diff/updated` — `{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items.
|
||||
- `turn/plan/updated` — `{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
|
||||
|
||||
@@ -352,7 +381,7 @@ There are additional item-specific events:
|
||||
|
||||
### Errors
|
||||
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo?, additionalDetails? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
|
||||
@@ -397,6 +426,30 @@ Order of messages:
|
||||
|
||||
UI guidance for IDEs: surface an approval dialog as soon as the request arrives. The turn will proceed after the server receives a response to the approval request. The terminal `item/completed` notification will be sent with the appropriate status.
|
||||
|
||||
## Skills
|
||||
|
||||
Skills are invoked by sending a text input that starts with `$<skill-name>`. The rest of the text is passed to the skill as its input.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$skill-creator Add a new skill for triaging flaky CI and include step-by-step usage.
|
||||
```
|
||||
|
||||
Use `skills/list` to fetch the available skills (optionally scoped by `cwd` and/or with `forceReload`).
|
||||
|
||||
```json
|
||||
{ "method": "skills/list", "id": 25, "params": {
|
||||
"cwd": "/Users/me/project",
|
||||
"forceReload": false
|
||||
} }
|
||||
{ "id": 25, "result": {
|
||||
"skills": [
|
||||
{ "name": "skill-creator", "description": "Create or update a Codex skill" }
|
||||
]
|
||||
} }
|
||||
```
|
||||
|
||||
## Auth endpoints
|
||||
|
||||
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
use crate::codex_message_processor::ApiVersion;
|
||||
use crate::codex_message_processor::PendingInterrupts;
|
||||
use crate::codex_message_processor::PendingRollbacks;
|
||||
use crate::codex_message_processor::TurnSummary;
|
||||
use crate::codex_message_processor::TurnSummaryStore;
|
||||
use crate::codex_message_processor::read_event_msgs_from_rollout;
|
||||
use crate::codex_message_processor::read_summary_from_rollout;
|
||||
use crate::codex_message_processor::summary_to_thread;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
|
||||
use codex_app_server_protocol::AgentMessageDeltaNotification;
|
||||
use codex_app_server_protocol::ApplyPatchApprovalParams;
|
||||
use codex_app_server_protocol::ApplyPatchApprovalResponse;
|
||||
use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::CodexErrorInfo as V2CodexErrorInfo;
|
||||
use codex_app_server_protocol::CommandAction as V2ParsedCommand;
|
||||
use codex_app_server_protocol::CommandExecutionApprovalDecision;
|
||||
use codex_app_server_protocol::CommandExecutionOutputDeltaNotification;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
@@ -20,6 +26,7 @@ use codex_app_server_protocol::ErrorNotification;
|
||||
use codex_app_server_protocol::ExecCommandApprovalParams;
|
||||
use codex_app_server_protocol::ExecCommandApprovalResponse;
|
||||
use codex_app_server_protocol::ExecPolicyAmendment as V2ExecPolicyAmendment;
|
||||
use codex_app_server_protocol::FileChangeApprovalDecision;
|
||||
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
@@ -27,6 +34,7 @@ use codex_app_server_protocol::FileUpdateChange;
|
||||
use codex_app_server_protocol::InterruptConversationResponse;
|
||||
use codex_app_server_protocol::ItemCompletedNotification;
|
||||
use codex_app_server_protocol::ItemStartedNotification;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::McpToolCallError;
|
||||
use codex_app_server_protocol::McpToolCallResult;
|
||||
use codex_app_server_protocol::McpToolCallStatus;
|
||||
@@ -40,6 +48,7 @@ use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequestPayload;
|
||||
use codex_app_server_protocol::TerminalInteractionNotification;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadRollbackResponse;
|
||||
use codex_app_server_protocol::ThreadTokenUsage;
|
||||
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
|
||||
use codex_app_server_protocol::Turn;
|
||||
@@ -50,9 +59,11 @@ use codex_app_server_protocol::TurnInterruptResponse;
|
||||
use codex_app_server_protocol::TurnPlanStep;
|
||||
use codex_app_server_protocol::TurnPlanUpdatedNotification;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_core::CodexConversation;
|
||||
use codex_app_server_protocol::build_turns_from_event_msgs;
|
||||
use codex_core::CodexThread;
|
||||
use codex_core::parse_command::shlex_join;
|
||||
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
|
||||
use codex_core::protocol::CodexErrorInfo as CoreCodexErrorInfo;
|
||||
use codex_core::protocol::Event;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::ExecApprovalRequestEvent;
|
||||
@@ -66,7 +77,7 @@ use codex_core::protocol::TokenCountEvent;
|
||||
use codex_core::protocol::TurnDiffEvent;
|
||||
use codex_core::review_format::format_review_findings_block;
|
||||
use codex_core::review_prompts;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
use codex_protocol::protocol::ReviewOutputEvent;
|
||||
use std::collections::HashMap;
|
||||
@@ -78,14 +89,17 @@ use tracing::error;
|
||||
|
||||
type JsonValue = serde_json::Value;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn apply_bespoke_event_handling(
|
||||
event: Event,
|
||||
conversation_id: ConversationId,
|
||||
conversation: Arc<CodexConversation>,
|
||||
conversation_id: ThreadId,
|
||||
conversation: Arc<CodexThread>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pending_interrupts: PendingInterrupts,
|
||||
pending_rollbacks: PendingRollbacks,
|
||||
turn_summary_store: TurnSummaryStore,
|
||||
api_version: ApiVersion,
|
||||
fallback_model_provider: String,
|
||||
) {
|
||||
let Event {
|
||||
id: event_turn_id,
|
||||
@@ -337,14 +351,35 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.await;
|
||||
}
|
||||
EventMsg::Error(ev) => {
|
||||
let message = ev.message.clone();
|
||||
let codex_error_info = ev.codex_error_info.clone();
|
||||
|
||||
// If this error belongs to an in-flight `thread/rollback` request, fail that request
|
||||
// (and clear pending state) so subsequent rollbacks are unblocked.
|
||||
//
|
||||
// Don't send a notification for this error.
|
||||
if matches!(
|
||||
codex_error_info,
|
||||
Some(CoreCodexErrorInfo::ThreadRollbackFailed)
|
||||
) {
|
||||
return handle_thread_rollback_failed(
|
||||
conversation_id,
|
||||
message,
|
||||
&pending_rollbacks,
|
||||
&outgoing,
|
||||
)
|
||||
.await;
|
||||
};
|
||||
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: None,
|
||||
};
|
||||
handle_error(conversation_id, turn_error.clone(), &turn_summary_store).await;
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
error: turn_error,
|
||||
error: turn_error.clone(),
|
||||
will_retry: false,
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
@@ -357,6 +392,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: ev.additional_details,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
@@ -688,6 +724,58 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
EventMsg::ThreadRolledBack(_rollback_event) => {
|
||||
let pending = {
|
||||
let mut map = pending_rollbacks.lock().await;
|
||||
map.remove(&conversation_id)
|
||||
};
|
||||
|
||||
if let Some(request_id) = pending {
|
||||
let rollout_path = conversation.rollout_path();
|
||||
let response = match read_summary_from_rollout(
|
||||
rollout_path.as_path(),
|
||||
fallback_model_provider.as_str(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(summary) => {
|
||||
let mut thread = summary_to_thread(summary);
|
||||
match read_event_msgs_from_rollout(rollout_path.as_path()).await {
|
||||
Ok(events) => {
|
||||
thread.turns = build_turns_from_event_msgs(&events);
|
||||
ThreadRollbackResponse { thread }
|
||||
}
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!(
|
||||
"failed to load rollout `{}`: {err}",
|
||||
rollout_path.display()
|
||||
),
|
||||
data: None,
|
||||
};
|
||||
outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!(
|
||||
"failed to load rollout `{}`: {err}",
|
||||
rollout_path.display()
|
||||
),
|
||||
data: None,
|
||||
};
|
||||
outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
}
|
||||
EventMsg::TurnDiff(turn_diff_event) => {
|
||||
handle_turn_diff(
|
||||
conversation_id,
|
||||
@@ -714,7 +802,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
}
|
||||
|
||||
async fn handle_turn_diff(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
event_turn_id: &str,
|
||||
turn_diff_event: TurnDiffEvent,
|
||||
api_version: ApiVersion,
|
||||
@@ -733,7 +821,7 @@ async fn handle_turn_diff(
|
||||
}
|
||||
|
||||
async fn handle_turn_plan_update(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
event_turn_id: &str,
|
||||
plan_update_event: UpdatePlanArgs,
|
||||
api_version: ApiVersion,
|
||||
@@ -757,7 +845,7 @@ async fn handle_turn_plan_update(
|
||||
}
|
||||
|
||||
async fn emit_turn_completed_with_status(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
event_turn_id: String,
|
||||
status: TurnStatus,
|
||||
error: Option<TurnError>,
|
||||
@@ -778,7 +866,7 @@ async fn emit_turn_completed_with_status(
|
||||
}
|
||||
|
||||
async fn complete_file_change_item(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
changes: Vec<FileUpdateChange>,
|
||||
status: PatchApplyStatus,
|
||||
@@ -810,7 +898,7 @@ async fn complete_file_change_item(
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn complete_command_execution_item(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
turn_id: String,
|
||||
item_id: String,
|
||||
command: String,
|
||||
@@ -843,7 +931,7 @@ async fn complete_command_execution_item(
|
||||
|
||||
async fn maybe_emit_raw_response_item_completed(
|
||||
api_version: ApiVersion,
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
turn_id: &str,
|
||||
item: codex_protocol::models::ResponseItem,
|
||||
outgoing: &OutgoingMessageSender,
|
||||
@@ -863,7 +951,7 @@ async fn maybe_emit_raw_response_item_completed(
|
||||
}
|
||||
|
||||
async fn find_and_remove_turn_summary(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
turn_summary_store: &TurnSummaryStore,
|
||||
) -> TurnSummary {
|
||||
let mut map = turn_summary_store.lock().await;
|
||||
@@ -871,7 +959,7 @@ async fn find_and_remove_turn_summary(
|
||||
}
|
||||
|
||||
async fn handle_turn_complete(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
event_turn_id: String,
|
||||
outgoing: &OutgoingMessageSender,
|
||||
turn_summary_store: &TurnSummaryStore,
|
||||
@@ -887,7 +975,7 @@ async fn handle_turn_complete(
|
||||
}
|
||||
|
||||
async fn handle_turn_interrupted(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
event_turn_id: String,
|
||||
outgoing: &OutgoingMessageSender,
|
||||
turn_summary_store: &TurnSummaryStore,
|
||||
@@ -904,8 +992,33 @@ async fn handle_turn_interrupted(
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn handle_thread_rollback_failed(
|
||||
conversation_id: ThreadId,
|
||||
message: String,
|
||||
pending_rollbacks: &PendingRollbacks,
|
||||
outgoing: &OutgoingMessageSender,
|
||||
) {
|
||||
let pending_rollback = {
|
||||
let mut map = pending_rollbacks.lock().await;
|
||||
map.remove(&conversation_id)
|
||||
};
|
||||
|
||||
if let Some(request_id) = pending_rollback {
|
||||
outgoing
|
||||
.send_error(
|
||||
request_id,
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.clone(),
|
||||
data: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_token_count_event(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
turn_id: String,
|
||||
token_count_event: TokenCountEvent,
|
||||
outgoing: &OutgoingMessageSender,
|
||||
@@ -933,7 +1046,7 @@ async fn handle_token_count_event(
|
||||
}
|
||||
|
||||
async fn handle_error(
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
error: TurnError,
|
||||
turn_summary_store: &TurnSummaryStore,
|
||||
) {
|
||||
@@ -944,7 +1057,7 @@ async fn handle_error(
|
||||
async fn on_patch_approval_response(
|
||||
event_turn_id: String,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
codex: Arc<CodexConversation>,
|
||||
codex: Arc<CodexThread>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
@@ -986,7 +1099,7 @@ async fn on_patch_approval_response(
|
||||
async fn on_exec_approval_response(
|
||||
event_turn_id: String,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
conversation: Arc<CodexConversation>,
|
||||
conversation: Arc<CodexThread>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
let value = match response {
|
||||
@@ -1081,14 +1194,29 @@ fn format_file_change_diff(change: &CoreFileChange) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn map_file_change_approval_decision(
|
||||
decision: FileChangeApprovalDecision,
|
||||
) -> (ReviewDecision, Option<PatchApplyStatus>) {
|
||||
match decision {
|
||||
FileChangeApprovalDecision::Accept => (ReviewDecision::Approved, None),
|
||||
FileChangeApprovalDecision::AcceptForSession => (ReviewDecision::ApprovedForSession, None),
|
||||
FileChangeApprovalDecision::Decline => {
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
FileChangeApprovalDecision::Cancel => {
|
||||
(ReviewDecision::Abort, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn on_file_change_request_approval_response(
|
||||
event_turn_id: String,
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
changes: Vec<FileUpdateChange>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
codex: Arc<CodexConversation>,
|
||||
codex: Arc<CodexThread>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
turn_summary_store: TurnSummaryStore,
|
||||
) {
|
||||
@@ -1099,23 +1227,12 @@ async fn on_file_change_request_approval_response(
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize FileChangeRequestApprovalResponse: {err}");
|
||||
FileChangeRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
decision: FileChangeApprovalDecision::Decline,
|
||||
}
|
||||
});
|
||||
|
||||
let (decision, completion_status) = match response.decision {
|
||||
ApprovalDecision::Accept
|
||||
| ApprovalDecision::AcceptForSession
|
||||
| ApprovalDecision::AcceptWithExecpolicyAmendment { .. } => {
|
||||
(ReviewDecision::Approved, None)
|
||||
}
|
||||
ApprovalDecision::Decline => {
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
ApprovalDecision::Cancel => {
|
||||
(ReviewDecision::Abort, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
};
|
||||
let (decision, completion_status) =
|
||||
map_file_change_approval_decision(response.decision);
|
||||
// Allow EventMsg::PatchApplyEnd to emit ItemCompleted for accepted patches.
|
||||
// Only short-circuit on declines/cancels/failures.
|
||||
(decision, completion_status)
|
||||
@@ -1153,13 +1270,13 @@ async fn on_file_change_request_approval_response(
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn on_command_execution_request_approval_response(
|
||||
event_turn_id: String,
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
command: String,
|
||||
cwd: PathBuf,
|
||||
command_actions: Vec<V2ParsedCommand>,
|
||||
receiver: oneshot::Receiver<JsonValue>,
|
||||
conversation: Arc<CodexConversation>,
|
||||
conversation: Arc<CodexThread>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
@@ -1169,16 +1286,18 @@ async fn on_command_execution_request_approval_response(
|
||||
.unwrap_or_else(|err| {
|
||||
error!("failed to deserialize CommandExecutionRequestApprovalResponse: {err}");
|
||||
CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
decision: CommandExecutionApprovalDecision::Decline,
|
||||
}
|
||||
});
|
||||
|
||||
let decision = response.decision;
|
||||
|
||||
let (decision, completion_status) = match decision {
|
||||
ApprovalDecision::Accept => (ReviewDecision::Approved, None),
|
||||
ApprovalDecision::AcceptForSession => (ReviewDecision::ApprovedForSession, None),
|
||||
ApprovalDecision::AcceptWithExecpolicyAmendment {
|
||||
CommandExecutionApprovalDecision::Accept => (ReviewDecision::Approved, None),
|
||||
CommandExecutionApprovalDecision::AcceptForSession => {
|
||||
(ReviewDecision::ApprovedForSession, None)
|
||||
}
|
||||
CommandExecutionApprovalDecision::AcceptWithExecpolicyAmendment {
|
||||
execpolicy_amendment,
|
||||
} => (
|
||||
ReviewDecision::ApprovedExecpolicyAmendment {
|
||||
@@ -1186,11 +1305,11 @@ async fn on_command_execution_request_approval_response(
|
||||
},
|
||||
None,
|
||||
),
|
||||
ApprovalDecision::Decline => (
|
||||
CommandExecutionApprovalDecision::Decline => (
|
||||
ReviewDecision::Denied,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
ApprovalDecision::Cancel => (
|
||||
CommandExecutionApprovalDecision::Cancel => (
|
||||
ReviewDecision::Abort,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
),
|
||||
@@ -1330,9 +1449,17 @@ mod tests {
|
||||
Arc::new(Mutex::new(HashMap::new()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_change_accept_for_session_maps_to_approved_for_session() {
|
||||
let (decision, completion_status) =
|
||||
map_file_change_approval_decision(FileChangeApprovalDecision::AcceptForSession);
|
||||
assert_eq!(decision, ReviewDecision::ApprovedForSession);
|
||||
assert_eq!(completion_status, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_error_records_message() -> Result<()> {
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
let turn_summary_store = new_turn_summary_store();
|
||||
|
||||
handle_error(
|
||||
@@ -1340,6 +1467,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1351,6 +1479,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
@@ -1358,7 +1487,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_turn_complete_emits_completed_without_error() -> Result<()> {
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
let event_turn_id = "complete1".to_string();
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
|
||||
@@ -1390,7 +1519,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_turn_interrupted_emits_interrupted_with_error() -> Result<()> {
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
let event_turn_id = "interrupt1".to_string();
|
||||
let turn_summary_store = new_turn_summary_store();
|
||||
handle_error(
|
||||
@@ -1398,6 +1527,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "oops".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1431,7 +1561,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_turn_complete_emits_failed_with_error() -> Result<()> {
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
let event_turn_id = "complete_err1".to_string();
|
||||
let turn_summary_store = new_turn_summary_store();
|
||||
handle_error(
|
||||
@@ -1439,6 +1569,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1467,6 +1598,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1494,7 +1626,7 @@ mod tests {
|
||||
],
|
||||
};
|
||||
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
|
||||
handle_turn_plan_update(
|
||||
conversation_id,
|
||||
@@ -1528,7 +1660,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_token_count_event_emits_usage_and_rate_limits() -> Result<()> {
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
let turn_id = "turn-123".to_string();
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
|
||||
@@ -1613,7 +1745,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_token_count_event_without_usage_info() -> Result<()> {
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
let turn_id = "turn-456".to_string();
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
let outgoing = Arc::new(OutgoingMessageSender::new(tx));
|
||||
@@ -1647,7 +1779,7 @@ mod tests {
|
||||
},
|
||||
};
|
||||
|
||||
let thread_id = ConversationId::new().to_string();
|
||||
let thread_id = ThreadId::new().to_string();
|
||||
let turn_id = "turn_1".to_string();
|
||||
let notification = construct_mcp_tool_call_notification(
|
||||
begin_event.clone(),
|
||||
@@ -1677,8 +1809,8 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_handle_turn_complete_emits_error_multiple_turns() -> Result<()> {
|
||||
// Conversation A will have two turns; Conversation B will have one turn.
|
||||
let conversation_a = ConversationId::new();
|
||||
let conversation_b = ConversationId::new();
|
||||
let conversation_a = ThreadId::new();
|
||||
let conversation_b = ThreadId::new();
|
||||
let turn_summary_store = new_turn_summary_store();
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
@@ -1691,6 +1823,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1710,6 +1843,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1746,6 +1880,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1766,6 +1901,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1801,7 +1937,7 @@ mod tests {
|
||||
},
|
||||
};
|
||||
|
||||
let thread_id = ConversationId::new().to_string();
|
||||
let thread_id = ThreadId::new().to_string();
|
||||
let turn_id = "turn_2".to_string();
|
||||
let notification = construct_mcp_tool_call_notification(
|
||||
begin_event.clone(),
|
||||
@@ -1852,7 +1988,7 @@ mod tests {
|
||||
result: Ok(result),
|
||||
};
|
||||
|
||||
let thread_id = ConversationId::new().to_string();
|
||||
let thread_id = ThreadId::new().to_string();
|
||||
let turn_id = "turn_3".to_string();
|
||||
let notification = construct_mcp_tool_call_end_notification(
|
||||
end_event.clone(),
|
||||
@@ -1895,7 +2031,7 @@ mod tests {
|
||||
result: Err("boom".to_string()),
|
||||
};
|
||||
|
||||
let thread_id = ConversationId::new().to_string();
|
||||
let thread_id = ThreadId::new().to_string();
|
||||
let turn_id = "turn_4".to_string();
|
||||
let notification = construct_mcp_tool_call_end_notification(
|
||||
end_event.clone(),
|
||||
@@ -1929,7 +2065,7 @@ mod tests {
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
let outgoing = OutgoingMessageSender::new(tx);
|
||||
let unified_diff = "--- a\n+++ b\n".to_string();
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
|
||||
handle_turn_diff(
|
||||
conversation_id,
|
||||
@@ -1964,7 +2100,7 @@ mod tests {
|
||||
async fn test_handle_turn_diff_is_noop_for_v1() -> Result<()> {
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
let outgoing = OutgoingMessageSender::new(tx);
|
||||
let conversation_id = ConversationId::new();
|
||||
let conversation_id = ThreadId::new();
|
||||
|
||||
handle_turn_diff(
|
||||
conversation_id,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,7 @@ use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_core::config::ConfigService;
|
||||
use codex_core::config::ConfigServiceError;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
@@ -19,9 +20,13 @@ pub(crate) struct ConfigApi {
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
pub(crate) fn new(
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
) -> Self {
|
||||
Self {
|
||||
service: ConfigService::new(codex_home, cli_overrides),
|
||||
service: ConfigService::new(codex_home, cli_overrides, loader_overrides),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||||
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Result as IoResult;
|
||||
use std::path::PathBuf;
|
||||
@@ -17,13 +18,11 @@ use tokio::io::BufReader;
|
||||
use tokio::io::{self};
|
||||
use tokio::sync::mpsc;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Level;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::Layer;
|
||||
use tracing_subscriber::filter::Targets;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
@@ -44,6 +43,7 @@ const CHANNEL_CAPACITY: usize = 128;
|
||||
pub async fn run_main(
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
loader_overrides: LoaderOverrides,
|
||||
) -> IoResult<()> {
|
||||
// Set up channels.
|
||||
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
|
||||
@@ -80,7 +80,11 @@ pub async fn run_main(
|
||||
format!("error parsing -c overrides: {e}"),
|
||||
)
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides.clone())
|
||||
let loader_overrides_for_config_api = loader_overrides.clone();
|
||||
let config = ConfigBuilder::default()
|
||||
.cli_overrides(cli_kv_overrides.clone())
|
||||
.loader_overrides(loader_overrides)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
@@ -103,11 +107,8 @@ pub async fn run_main(
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL)
|
||||
.with_filter(EnvFilter::from_default_env());
|
||||
|
||||
let feedback_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(feedback.make_writer())
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_filter(Targets::new().with_default(Level::TRACE));
|
||||
let feedback_layer = feedback.logger_layer();
|
||||
let feedback_metadata_layer = feedback.metadata_layer();
|
||||
|
||||
let otel_logger_layer = otel.as_ref().and_then(|o| o.logger_layer());
|
||||
|
||||
@@ -116,6 +117,7 @@ pub async fn run_main(
|
||||
let _ = tracing_subscriber::registry()
|
||||
.with(stderr_fmt)
|
||||
.with(feedback_layer)
|
||||
.with(feedback_metadata_layer)
|
||||
.with(otel_logger_layer)
|
||||
.with(otel_tracing_layer)
|
||||
.try_init();
|
||||
@@ -124,11 +126,13 @@ pub async fn run_main(
|
||||
let processor_handle = tokio::spawn({
|
||||
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
||||
let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone();
|
||||
let loader_overrides = loader_overrides_for_config_api;
|
||||
let mut processor = MessageProcessor::new(
|
||||
outgoing_message_sender,
|
||||
codex_linux_sandbox_exe,
|
||||
std::sync::Arc::new(config),
|
||||
cli_overrides,
|
||||
loader_overrides,
|
||||
feedback.clone(),
|
||||
);
|
||||
async move {
|
||||
|
||||
@@ -1,10 +1,42 @@
|
||||
use codex_app_server::run_main;
|
||||
use codex_arg0::arg0_dispatch_or_else;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use std::path::PathBuf;
|
||||
|
||||
// Debug-only test hook: lets integration tests point the server at a temporary
|
||||
// managed config file without writing to /etc.
|
||||
const MANAGED_CONFIG_PATH_ENV_VAR: &str = "CODEX_APP_SERVER_MANAGED_CONFIG_PATH";
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
|
||||
run_main(codex_linux_sandbox_exe, CliConfigOverrides::default()).await?;
|
||||
let managed_config_path = managed_config_path_from_debug_env();
|
||||
let loader_overrides = LoaderOverrides {
|
||||
managed_config_path,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
run_main(
|
||||
codex_linux_sandbox_exe,
|
||||
CliConfigOverrides::default(),
|
||||
loader_overrides,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn managed_config_path_from_debug_env() -> Option<PathBuf> {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
if let Ok(value) = std::env::var(MANAGED_CONFIG_PATH_ENV_VAR) {
|
||||
return if value.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(PathBuf::from(value))
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
@@ -18,8 +18,9 @@ use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use codex_core::default_client::USER_AGENT_SUFFIX;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_feedback::CodexFeedback;
|
||||
@@ -41,6 +42,7 @@ impl MessageProcessor {
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
feedback: CodexFeedback,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
@@ -49,20 +51,21 @@ impl MessageProcessor {
|
||||
false,
|
||||
config.cli_auth_credentials_store_mode,
|
||||
);
|
||||
let conversation_manager = Arc::new(ConversationManager::new(
|
||||
let thread_manager = Arc::new(ThreadManager::new(
|
||||
config.codex_home.clone(),
|
||||
auth_manager.clone(),
|
||||
SessionSource::VSCode,
|
||||
));
|
||||
let codex_message_processor = CodexMessageProcessor::new(
|
||||
auth_manager,
|
||||
conversation_manager,
|
||||
thread_manager,
|
||||
outgoing.clone(),
|
||||
codex_linux_sandbox_exe,
|
||||
Arc::clone(&config),
|
||||
cli_overrides.clone(),
|
||||
feedback,
|
||||
);
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides);
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides, loader_overrides);
|
||||
|
||||
Self {
|
||||
outgoing,
|
||||
|
||||
@@ -2,19 +2,17 @@ use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::Model;
|
||||
use codex_app_server_protocol::ReasoningEffortOption;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
|
||||
pub async fn supported_models(
|
||||
conversation_manager: Arc<ConversationManager>,
|
||||
config: &Config,
|
||||
) -> Vec<Model> {
|
||||
conversation_manager
|
||||
pub async fn supported_models(thread_manager: Arc<ThreadManager>, config: &Config) -> Vec<Model> {
|
||||
thread_manager
|
||||
.list_models(config)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
.map(model_from_preset)
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -9,12 +9,12 @@ path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true, features = ["test-support"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
|
||||
@@ -11,7 +11,6 @@ use tokio::process::ChildStdin;
|
||||
use tokio::process::ChildStdout;
|
||||
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::ArchiveConversationParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
@@ -46,10 +45,10 @@ use codex_app_server_protocol::SetDefaultModelParams;
|
||||
use codex_app_server_protocol::ThreadArchiveParams;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadRollbackParams;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use std::process::Command as StdCommand;
|
||||
use tokio::process::Command;
|
||||
|
||||
pub struct McpProcess {
|
||||
@@ -78,12 +77,8 @@ impl McpProcess {
|
||||
codex_home: &Path,
|
||||
env_overrides: &[(&str, Option<&str>)],
|
||||
) -> anyhow::Result<Self> {
|
||||
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
|
||||
let std_cmd = StdCommand::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-mcp-server")?;
|
||||
|
||||
let program = std_cmd.get_program().to_owned();
|
||||
|
||||
let program = codex_utils_cargo_bin::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-app-server")?;
|
||||
let mut cmd = Command::new(program);
|
||||
|
||||
cmd.stdin(Stdio::piped());
|
||||
@@ -203,7 +198,7 @@ impl McpProcess {
|
||||
}
|
||||
|
||||
/// Send a `removeConversationListener` JSON-RPC request.
|
||||
pub async fn send_remove_conversation_listener_request(
|
||||
pub async fn send_remove_thread_listener_request(
|
||||
&mut self,
|
||||
params: RemoveConversationListenerParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
@@ -322,6 +317,15 @@ impl McpProcess {
|
||||
self.send_request("thread/archive", params).await
|
||||
}
|
||||
|
||||
/// Send a `thread/rollback` JSON-RPC request.
|
||||
pub async fn send_thread_rollback_request(
|
||||
&mut self,
|
||||
params: ThreadRollbackParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("thread/rollback", params).await
|
||||
}
|
||||
|
||||
/// Send a `thread/list` JSON-RPC request.
|
||||
pub async fn send_thread_list_request(
|
||||
&mut self,
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use codex_core::models_manager::model_presets::all_model_presets;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -17,7 +15,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
slug: preset.id.clone(),
|
||||
display_name: preset.display_name.clone(),
|
||||
description: Some(preset.description.clone()),
|
||||
default_reasoning_level: preset.default_reasoning_effort,
|
||||
default_reasoning_level: Some(preset.default_reasoning_effort),
|
||||
supported_reasoning_levels: preset.supported_reasoning_efforts.clone(),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: if preset.show_in_picker {
|
||||
@@ -25,24 +23,23 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
} else {
|
||||
ModelVisibility::Hide
|
||||
},
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority,
|
||||
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
|
||||
base_instructions: None,
|
||||
base_instructions: "base instructions".to_string(),
|
||||
supports_reasoning_summaries: false,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
context_window: Some(272_000),
|
||||
auto_compact_token_limit: None,
|
||||
effective_context_window_percent: 95,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// todo(aibrahim): fix the priorities to be the opposite here.
|
||||
/// Write a models_cache.json file to the codex home directory.
|
||||
/// This prevents ModelsManager from making network requests to refresh models.
|
||||
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
|
||||
@@ -53,14 +50,14 @@ pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> {
|
||||
.iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
.collect();
|
||||
// Convert presets to ModelInfo, assigning priorities (higher = earlier in list)
|
||||
// Priority is used for sorting, so first model gets highest priority
|
||||
// Convert presets to ModelInfo, assigning priorities (lower = earlier in list).
|
||||
// Priority is used for sorting, so the first model gets the lowest priority.
|
||||
let models: Vec<ModelInfo> = presets
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, preset)| {
|
||||
// Higher priority = earlier in list, so reverse the index
|
||||
let priority = (presets.len() - idx) as i32;
|
||||
// Lower priority = earlier in list.
|
||||
let priority = idx as i32;
|
||||
preset_to_info(preset, priority)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use anyhow::Result;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::GitInfo;
|
||||
use codex_protocol::protocol::SessionMeta;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
@@ -28,7 +28,7 @@ pub fn create_fake_rollout(
|
||||
) -> Result<String> {
|
||||
let uuid = Uuid::new_v4();
|
||||
let uuid_str = uuid.to_string();
|
||||
let conversation_id = ConversationId::from_string(&uuid_str)?;
|
||||
let conversation_id = ThreadId::from_string(&uuid_str)?;
|
||||
|
||||
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
|
||||
let year = &filename_ts[0..4];
|
||||
|
||||
@@ -145,9 +145,7 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
|
||||
|
||||
// 4) removeConversationListener
|
||||
let remove_listener_id = mcp
|
||||
.send_remove_conversation_listener_request(RemoveConversationListenerParams {
|
||||
subscription_id,
|
||||
})
|
||||
.send_remove_thread_listener_request(RemoveConversationListenerParams { subscription_id })
|
||||
.await?;
|
||||
let remove_listener_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
@@ -305,6 +303,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
// Acknowledge sendUserTurn
|
||||
@@ -418,6 +417,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -443,6 +443,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
|
||||
@@ -6,7 +6,7 @@ use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::ListConversationsParams;
|
||||
use codex_app_server_protocol::ListConversationsResponse;
|
||||
use codex_app_server_protocol::NewConversationParams; // reused for overrides shape
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ResumeConversationParams;
|
||||
use codex_app_server_protocol::ResumeConversationResponse;
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
mod archive_conversation;
|
||||
mod archive_thread;
|
||||
mod auth;
|
||||
mod codex_message_processor_flow;
|
||||
mod config;
|
||||
mod create_conversation;
|
||||
mod create_thread;
|
||||
mod fuzzy_file_search;
|
||||
mod interrupt;
|
||||
mod list_resume;
|
||||
mod login;
|
||||
mod output_schema;
|
||||
mod send_message;
|
||||
mod set_default_model;
|
||||
mod user_agent;
|
||||
|
||||
282
codex-rs/app-server/tests/suite/output_schema.rs
Normal file
282
codex-rs/app-server/tests/suite/output_schema.rs
Normal file
@@ -0,0 +1,282 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::InputItem;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
use codex_app_server_protocol::NewConversationResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SendUserTurnParams;
|
||||
use codex_app_server_protocol::SendUserTurnResponse;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_accepts_output_schema_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_output_schema_is_per_turn_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let send_turn_id_2 = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp_2: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id_2)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -13,7 +13,7 @@ use codex_app_server_protocol::NewConversationResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SendUserMessageParams;
|
||||
use codex_app_server_protocol::SendUserMessageResponse;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::RawResponseItemEvent;
|
||||
@@ -81,7 +81,7 @@ async fn test_send_message_success() -> Result<()> {
|
||||
#[expect(clippy::expect_used)]
|
||||
async fn send_message(
|
||||
message: &str,
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
mcp: &mut McpProcess,
|
||||
) -> Result<()> {
|
||||
// Now exercise sendUserMessage.
|
||||
@@ -220,7 +220,7 @@ async fn test_send_message_session_not_found() -> Result<()> {
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let unknown = ConversationId::new();
|
||||
let unknown = ThreadId::new();
|
||||
let req_id = mcp
|
||||
.send_send_user_message_request(SendUserMessageParams {
|
||||
conversation_id: unknown,
|
||||
@@ -268,10 +268,7 @@ stream_max_retries = 0
|
||||
}
|
||||
|
||||
#[expect(clippy::expect_used)]
|
||||
async fn read_raw_response_item(
|
||||
mcp: &mut McpProcess,
|
||||
conversation_id: ConversationId,
|
||||
) -> ResponseItem {
|
||||
async fn read_raw_response_item(mcp: &mut McpProcess, conversation_id: ThreadId) -> ResponseItem {
|
||||
loop {
|
||||
let raw_notification: JSONRPCNotification = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
|
||||
@@ -18,6 +18,7 @@ use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ToolsV2;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
@@ -73,8 +74,7 @@ sandbox_mode = "workspace-write"
|
||||
}
|
||||
);
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -136,8 +136,7 @@ view_image = false
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -185,7 +184,10 @@ writable_roots = [{}]
|
||||
|
||||
let mut mcp = McpProcess::new_with_env(
|
||||
codex_home.path(),
|
||||
&[("CODEX_MANAGED_CONFIG_PATH", Some(&managed_path_str))],
|
||||
&[(
|
||||
"CODEX_APP_SERVER_MANAGED_CONFIG_PATH",
|
||||
Some(&managed_path_str),
|
||||
)],
|
||||
)
|
||||
.await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
@@ -257,12 +259,7 @@ writable_roots = [{}]
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_managed_user_then_optional_system(&layers, managed_file, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -433,3 +430,50 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[1].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_managed_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
managed_file: AbsolutePathBuf,
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 3);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[2].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
mod account;
|
||||
mod config_rpc;
|
||||
mod model_list;
|
||||
mod output_schema;
|
||||
mod rate_limits;
|
||||
mod review;
|
||||
mod thread_archive;
|
||||
mod thread_list;
|
||||
mod thread_resume;
|
||||
mod thread_rollback;
|
||||
mod thread_start;
|
||||
mod turn_interrupt;
|
||||
mod turn_start;
|
||||
|
||||
@@ -48,57 +48,32 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
|
||||
let expected_models = vec![
|
||||
Model {
|
||||
id: "gpt-5.2".to_string(),
|
||||
model: "gpt-5.2".to_string(),
|
||||
display_name: "gpt-5.2".to_string(),
|
||||
description:
|
||||
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
||||
.to_string(),
|
||||
id: "gpt-5.2-codex".to_string(),
|
||||
model: "gpt-5.2-codex".to_string(),
|
||||
display_name: "gpt-5.2-codex".to_string(),
|
||||
description: "Latest frontier agentic coding model.".to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Balances speed with some reasoning; useful for straightforward \
|
||||
queries and short explanations"
|
||||
.to_string(),
|
||||
description: "Fast responses with lighter reasoning".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Provides a solid balance of reasoning depth and latency for \
|
||||
general-purpose tasks"
|
||||
description: "Balances speed and reasoning depth for everyday tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
description: "Greater reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
description: "Extra high reasoning for complex problems".to_string(),
|
||||
description: "Extra high reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: true,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1-codex-mini".to_string(),
|
||||
model: "gpt-5.1-codex-mini".to_string(),
|
||||
display_name: "gpt-5.1-codex-mini".to_string(),
|
||||
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Dynamically adjusts reasoning based on the task".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1-codex-max".to_string(),
|
||||
model: "gpt-5.1-codex-max".to_string(),
|
||||
@@ -127,23 +102,48 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.2-codex".to_string(),
|
||||
model: "gpt-5.2-codex".to_string(),
|
||||
display_name: "gpt-5.2-codex".to_string(),
|
||||
description: "Latest frontier agentic coding model.".to_string(),
|
||||
id: "gpt-5.1-codex-mini".to_string(),
|
||||
model: "gpt-5.1-codex-mini".to_string(),
|
||||
display_name: "gpt-5.1-codex-mini".to_string(),
|
||||
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Dynamically adjusts reasoning based on the task".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.2".to_string(),
|
||||
model: "gpt-5.2".to_string(),
|
||||
display_name: "gpt-5.2".to_string(),
|
||||
description:
|
||||
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
||||
.to_string(),
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Fast responses with lighter reasoning".to_string(),
|
||||
description: "Balances speed with some reasoning; useful for straightforward \
|
||||
queries and short explanations"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Balances speed and reasoning depth for everyday tasks"
|
||||
description: "Provides a solid balance of reasoning depth and latency for \
|
||||
general-purpose tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Greater reasoning depth for complex problems".to_string(),
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
@@ -187,7 +187,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(first_response)?;
|
||||
|
||||
assert_eq!(first_items.len(), 1);
|
||||
assert_eq!(first_items[0].id, "gpt-5.2");
|
||||
assert_eq!(first_items[0].id, "gpt-5.2-codex");
|
||||
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
|
||||
|
||||
let second_request = mcp
|
||||
@@ -209,7 +209,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(second_response)?;
|
||||
|
||||
assert_eq!(second_items.len(), 1);
|
||||
assert_eq!(second_items[0].id, "gpt-5.1-codex-mini");
|
||||
assert_eq!(second_items[0].id, "gpt-5.1-codex-max");
|
||||
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
|
||||
|
||||
let third_request = mcp
|
||||
@@ -231,7 +231,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(third_response)?;
|
||||
|
||||
assert_eq!(third_items.len(), 1);
|
||||
assert_eq!(third_items[0].id, "gpt-5.1-codex-max");
|
||||
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
|
||||
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
|
||||
|
||||
let fourth_request = mcp
|
||||
@@ -253,7 +253,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(fourth_response)?;
|
||||
|
||||
assert_eq!(fourth_items.len(), 1);
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.2-codex");
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.2");
|
||||
assert!(fourth_cursor.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
231
codex-rs/app-server/tests/suite/v2/output_schema.rs
Normal file
231
codex-rs/app-server/tests/suite/v2/output_schema.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_accepts_output_schema_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_output_schema_is_per_turn_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req_1 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_1: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_1)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_1)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let turn_req_2 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
output_schema: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_2: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_2)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_2)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -8,7 +8,7 @@ use codex_app_server_protocol::ThreadArchiveResponse;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
|
||||
use codex_core::find_conversation_path_by_id_str;
|
||||
use codex_core::find_thread_path_by_id_str;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
@@ -39,7 +39,7 @@ async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> {
|
||||
assert!(!thread.id.is_empty());
|
||||
|
||||
// Locate the rollout path recorded for this thread id.
|
||||
let rollout_path = find_conversation_path_by_id_str(codex_home.path(), &thread.id)
|
||||
let rollout_path = find_thread_path_by_id_str(codex_home.path(), &thread.id)
|
||||
.await?
|
||||
.expect("expected rollout path for thread id to exist");
|
||||
assert!(
|
||||
|
||||
177
codex-rs/app-server/tests/suite/v2/thread_rollback.rs
Normal file
177
codex-rs/app-server/tests/suite/v2/thread_rollback.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_chat_completions_server_unchecked;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
use codex_app_server_protocol::ThreadRollbackParams;
|
||||
use codex_app_server_protocol::ThreadRollbackResponse;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_rollback_drops_last_turns_and_persists_to_rollout() -> Result<()> {
|
||||
// Three Codex turns hit the mock model (session start + two turn/start calls).
|
||||
let responses = vec![
|
||||
create_final_assistant_message_sse_response("Done")?,
|
||||
create_final_assistant_message_sse_response("Done")?,
|
||||
create_final_assistant_message_sse_response("Done")?,
|
||||
];
|
||||
let server = create_mock_chat_completions_server_unchecked(responses).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
// Start a thread.
|
||||
let start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
// Two turns.
|
||||
let first_text = "First";
|
||||
let turn1_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: first_text.to_string(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let _turn1_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn1_id)),
|
||||
)
|
||||
.await??;
|
||||
let _completed1 = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let turn2_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Second".to_string(),
|
||||
}],
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let _turn2_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn2_id)),
|
||||
)
|
||||
.await??;
|
||||
let _completed2 = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
// Roll back the last turn.
|
||||
let rollback_id = mcp
|
||||
.send_thread_rollback_request(ThreadRollbackParams {
|
||||
thread_id: thread.id.clone(),
|
||||
num_turns: 1,
|
||||
})
|
||||
.await?;
|
||||
let rollback_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(rollback_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadRollbackResponse {
|
||||
thread: rolled_back_thread,
|
||||
} = to_response::<ThreadRollbackResponse>(rollback_resp)?;
|
||||
|
||||
assert_eq!(rolled_back_thread.turns.len(), 1);
|
||||
assert_eq!(rolled_back_thread.turns[0].items.len(), 2);
|
||||
match &rolled_back_thread.turns[0].items[0] {
|
||||
ThreadItem::UserMessage { content, .. } => {
|
||||
assert_eq!(
|
||||
content,
|
||||
&vec![V2UserInput::Text {
|
||||
text: first_text.to_string()
|
||||
}]
|
||||
);
|
||||
}
|
||||
other => panic!("expected user message item, got {other:?}"),
|
||||
}
|
||||
|
||||
// Resume and confirm the history is pruned.
|
||||
let resume_id = mcp
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: thread.id,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let resume_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
|
||||
|
||||
assert_eq!(thread.turns.len(), 1);
|
||||
assert_eq!(thread.turns[0].items.len(), 2);
|
||||
match &thread.turns[0].items[0] {
|
||||
ThreadItem::UserMessage { content, .. } => {
|
||||
assert_eq!(
|
||||
content,
|
||||
&vec![V2UserInput::Text {
|
||||
text: first_text.to_string()
|
||||
}]
|
||||
);
|
||||
}
|
||||
other => panic!("expected user message item, got {other:?}"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "chat"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -8,9 +8,10 @@ use app_test_support::create_mock_chat_completions_server_unchecked;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::format_with_current_shell_display;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::ApprovalDecision;
|
||||
use codex_app_server_protocol::CommandExecutionApprovalDecision;
|
||||
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
|
||||
use codex_app_server_protocol::CommandExecutionStatus;
|
||||
use codex_app_server_protocol::FileChangeApprovalDecision;
|
||||
use codex_app_server_protocol::FileChangeOutputDeltaNotification;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
use codex_app_server_protocol::ItemCompletedNotification;
|
||||
@@ -426,7 +427,7 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(CommandExecutionRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
decision: CommandExecutionApprovalDecision::Decline,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
@@ -540,6 +541,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -566,6 +568,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -720,7 +723,7 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(FileChangeRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Accept,
|
||||
decision: FileChangeApprovalDecision::Accept,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
@@ -780,6 +783,190 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
std::fs::create_dir(&codex_home)?;
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let patch_1 = r#"*** Begin Patch
|
||||
*** Add File: README.md
|
||||
+new line
|
||||
*** End Patch
|
||||
"#;
|
||||
let patch_2 = r#"*** Begin Patch
|
||||
*** Update File: README.md
|
||||
@@
|
||||
-new line
|
||||
+updated line
|
||||
*** End Patch
|
||||
"#;
|
||||
|
||||
let responses = vec![
|
||||
create_apply_patch_sse_response(patch_1, "patch-call-1")?,
|
||||
create_final_assistant_message_sse_response("patch 1 applied")?,
|
||||
create_apply_patch_sse_response(patch_2, "patch-call-2")?,
|
||||
create_final_assistant_message_sse_response("patch 2 applied")?,
|
||||
];
|
||||
let server = create_mock_chat_completions_server(responses).await;
|
||||
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
|
||||
|
||||
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
cwd: Some(workspace.to_string_lossy().into_owned()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(start_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
|
||||
|
||||
// First turn: expect FileChangeRequestApproval, respond with AcceptForSession, and verify the file exists.
|
||||
let turn_1_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "apply patch 1".into(),
|
||||
}],
|
||||
cwd: Some(workspace.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_1_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_1_req)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { turn: turn_1 } = to_response::<TurnStartResponse>(turn_1_resp)?;
|
||||
|
||||
let started_file_change_1 = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let started_notif = mcp
|
||||
.read_stream_until_notification_message("item/started")
|
||||
.await?;
|
||||
let started: ItemStartedNotification =
|
||||
serde_json::from_value(started_notif.params.clone().expect("item/started params"))?;
|
||||
if let ThreadItem::FileChange { .. } = started.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(started.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::FileChange { id, status, .. } = started_file_change_1 else {
|
||||
unreachable!("loop ensures we break on file change items");
|
||||
};
|
||||
assert_eq!(id, "patch-call-1");
|
||||
assert_eq!(status, PatchApplyStatus::InProgress);
|
||||
|
||||
let server_req = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_request_message(),
|
||||
)
|
||||
.await??;
|
||||
let ServerRequest::FileChangeRequestApproval { request_id, params } = server_req else {
|
||||
panic!("expected FileChangeRequestApproval request")
|
||||
};
|
||||
assert_eq!(params.item_id, "patch-call-1");
|
||||
assert_eq!(params.thread_id, thread.id);
|
||||
assert_eq!(params.turn_id, turn_1.id);
|
||||
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(FileChangeRequestApprovalResponse {
|
||||
decision: FileChangeApprovalDecision::AcceptForSession,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/fileChange/outputDelta"),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/completed"),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let readme_path = workspace.join("README.md");
|
||||
assert_eq!(std::fs::read_to_string(&readme_path)?, "new line\n");
|
||||
|
||||
// Second turn: apply a patch to the same file. Approval should be skipped due to AcceptForSession.
|
||||
let turn_2_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "apply patch 2".into(),
|
||||
}],
|
||||
cwd: Some(workspace.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_2_req)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let started_file_change_2 = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let started_notif = mcp
|
||||
.read_stream_until_notification_message("item/started")
|
||||
.await?;
|
||||
let started: ItemStartedNotification =
|
||||
serde_json::from_value(started_notif.params.clone().expect("item/started params"))?;
|
||||
if let ThreadItem::FileChange { .. } = started.item {
|
||||
return Ok::<ThreadItem, anyhow::Error>(started.item);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
let ThreadItem::FileChange { id, status, .. } = started_file_change_2 else {
|
||||
unreachable!("loop ensures we break on file change items");
|
||||
};
|
||||
assert_eq!(id, "patch-call-2");
|
||||
assert_eq!(status, PatchApplyStatus::InProgress);
|
||||
|
||||
// If the server incorrectly emits FileChangeRequestApproval, the helper below will error
|
||||
// (it bails on unexpected JSONRPCMessage::Request), causing the test to fail.
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/fileChange/outputDelta"),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("item/completed"),
|
||||
)
|
||||
.await??;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
assert_eq!(std::fs::read_to_string(readme_path)?, "updated line\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
@@ -886,7 +1073,7 @@ async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(FileChangeRequestApprovalResponse {
|
||||
decision: ApprovalDecision::Decline,
|
||||
decision: FileChangeApprovalDecision::Decline,
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -25,5 +25,6 @@ tree-sitter-bash = { workspace = true }
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -227,11 +227,14 @@ fn check_start_and_end_lines_strict(
|
||||
first_line: Option<&&str>,
|
||||
last_line: Option<&&str>,
|
||||
) -> Result<(), ParseError> {
|
||||
let first_line = first_line.map(|line| line.trim());
|
||||
let last_line = last_line.map(|line| line.trim());
|
||||
|
||||
match (first_line, last_line) {
|
||||
(Some(&first), Some(&last)) if first == BEGIN_PATCH_MARKER && last == END_PATCH_MARKER => {
|
||||
(Some(first), Some(last)) if first == BEGIN_PATCH_MARKER && last == END_PATCH_MARKER => {
|
||||
Ok(())
|
||||
}
|
||||
(Some(&first), _) if first != BEGIN_PATCH_MARKER => Err(InvalidPatchError(String::from(
|
||||
(Some(first), _) if first != BEGIN_PATCH_MARKER => Err(InvalidPatchError(String::from(
|
||||
"The first line of the patch must be '*** Begin Patch'",
|
||||
))),
|
||||
_ => Err(InvalidPatchError(String::from(
|
||||
@@ -444,6 +447,25 @@ fn test_parse_patch() {
|
||||
"The last line of the patch must be '*** End Patch'".to_string()
|
||||
))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
concat!(
|
||||
"*** Begin Patch",
|
||||
" ",
|
||||
"\n*** Add File: foo\n+hi\n",
|
||||
" ",
|
||||
"*** End Patch"
|
||||
),
|
||||
ParseMode::Strict
|
||||
)
|
||||
.unwrap()
|
||||
.hunks,
|
||||
vec![AddFile {
|
||||
path: PathBuf::from("foo"),
|
||||
contents: "hi\n".to_string()
|
||||
}]
|
||||
);
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
"*** Begin Patch\n\
|
||||
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/expected/keep.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/expected/keep.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
keep
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/keep.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/keep.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
keep
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/obsolete.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/obsolete.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
obsolete
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/patch.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/patch.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*** Begin Patch
|
||||
*** Delete File: obsolete.txt
|
||||
*** End Patch
|
||||
@@ -0,0 +1 @@
|
||||
two
|
||||
@@ -0,0 +1 @@
|
||||
one
|
||||
@@ -0,0 +1,6 @@
|
||||
*** Begin Patch
|
||||
*** Update File: file.txt
|
||||
@@
|
||||
-one
|
||||
+two
|
||||
*** End Patch
|
||||
@@ -0,0 +1,2 @@
|
||||
line1
|
||||
line3
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/input/lines.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/input/lines.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
line1
|
||||
line2
|
||||
line3
|
||||
7
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/patch.txt
vendored
Normal file
7
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/patch.txt
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
*** Begin Patch
|
||||
*** Update File: lines.txt
|
||||
@@
|
||||
line1
|
||||
-line2
|
||||
line3
|
||||
*** End Patch
|
||||
@@ -0,0 +1,2 @@
|
||||
first
|
||||
second updated
|
||||
@@ -0,0 +1,2 @@
|
||||
first
|
||||
second
|
||||
8
codex-rs/apply-patch/tests/fixtures/scenarios/022_update_file_end_of_file_marker/patch.txt
vendored
Normal file
8
codex-rs/apply-patch/tests/fixtures/scenarios/022_update_file_end_of_file_marker/patch.txt
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
*** Begin Patch
|
||||
*** Update File: tail.txt
|
||||
@@
|
||||
first
|
||||
-second
|
||||
+second updated
|
||||
*** End of File
|
||||
*** End Patch
|
||||
@@ -1,8 +1,13 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use assert_cmd::Command;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn apply_patch_command() -> anyhow::Result<Command> {
|
||||
Ok(Command::new(codex_utils_cargo_bin::cargo_bin(
|
||||
"apply_patch",
|
||||
)?))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
@@ -16,8 +21,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
apply_patch_command()?
|
||||
.arg(add_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -34,8 +38,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
apply_patch_command()?
|
||||
.arg(update_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -59,10 +62,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(add_patch)
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(add_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
@@ -77,10 +79,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(update_patch)
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(update_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
@@ -9,7 +8,8 @@ use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
|
||||
for scenario in fs::read_dir("tests/fixtures/scenarios")? {
|
||||
let scenarios_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/scenarios");
|
||||
for scenario in fs::read_dir(scenarios_dir)? {
|
||||
let scenario = scenario?;
|
||||
let path = scenario.path();
|
||||
if path.is_dir() {
|
||||
@@ -36,7 +36,7 @@ fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> {
|
||||
// Run apply_patch in the temporary directory. We intentionally do not assert
|
||||
// on the exit status here; the scenarios are specified purely in terms of
|
||||
// final filesystem state, which we compare below.
|
||||
Command::cargo_bin("apply_patch")?
|
||||
Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?)
|
||||
.arg(patch)
|
||||
.current_dir(tmp.path())
|
||||
.output()?;
|
||||
@@ -82,11 +82,15 @@ fn snapshot_dir_recursive(
|
||||
continue;
|
||||
};
|
||||
let rel = stripped.to_path_buf();
|
||||
let file_type = entry.file_type()?;
|
||||
if file_type.is_dir() {
|
||||
|
||||
// Under Buck2, files in `__srcs` are often materialized as symlinks.
|
||||
// Use `metadata()` (follows symlinks) so our fixture snapshots work
|
||||
// under both Cargo and Buck2.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
entries.insert(rel.clone(), Entry::Dir);
|
||||
snapshot_dir_recursive(base, &path, entries)?;
|
||||
} else if file_type.is_file() {
|
||||
} else if metadata.is_file() {
|
||||
let contents = fs::read(&path)?;
|
||||
entries.insert(rel, Entry::File(contents));
|
||||
}
|
||||
@@ -98,12 +102,14 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> {
|
||||
for entry in fs::read_dir(src)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
let file_type = entry.file_type()?;
|
||||
let dest_path = dst.join(entry.file_name());
|
||||
if file_type.is_dir() {
|
||||
|
||||
// See note in `snapshot_dir_recursive` about Buck2 symlink trees.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
fs::create_dir_all(&dest_path)?;
|
||||
copy_dir_recursive(&path, &dest_path)?;
|
||||
} else if file_type.is_file() {
|
||||
} else if metadata.is_file() {
|
||||
if let Some(parent) = dest_path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
@@ -5,13 +5,13 @@ use std::path::Path;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn run_apply_patch_in_dir(dir: &Path, patch: &str) -> anyhow::Result<assert_cmd::assert::Assert> {
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd.arg(patch).assert())
|
||||
}
|
||||
|
||||
fn apply_patch_command(dir: &Path) -> anyhow::Result<Command> {
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -283,7 +283,7 @@ struct StdioToUdsCommand {
|
||||
fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<String> {
|
||||
let AppExitInfo {
|
||||
token_usage,
|
||||
conversation_id,
|
||||
thread_id: conversation_id,
|
||||
..
|
||||
} = exit_info;
|
||||
|
||||
@@ -480,7 +480,12 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
}
|
||||
Some(Subcommand::AppServer(app_server_cli)) => match app_server_cli.subcommand {
|
||||
None => {
|
||||
codex_app_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?;
|
||||
codex_app_server::run_main(
|
||||
codex_linux_sandbox_exe,
|
||||
root_config_overrides,
|
||||
codex_core::config_loader::LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Some(AppServerSubcommand::GenerateTs(gen_cli)) => {
|
||||
codex_app_server_protocol::generate_ts(
|
||||
@@ -785,7 +790,7 @@ mod tests {
|
||||
use super::*;
|
||||
use assert_matches::assert_matches;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn finalize_from_args(args: &[&str]) -> TuiCli {
|
||||
@@ -825,9 +830,7 @@ mod tests {
|
||||
};
|
||||
AppExitInfo {
|
||||
token_usage,
|
||||
conversation_id: conversation
|
||||
.map(ConversationId::from_string)
|
||||
.map(Result::unwrap),
|
||||
thread_id: conversation.map(ThreadId::from_string).map(Result::unwrap),
|
||||
update_action: None,
|
||||
}
|
||||
}
|
||||
@@ -836,7 +839,7 @@ mod tests {
|
||||
fn format_exit_messages_skips_zero_usage() {
|
||||
let exit_info = AppExitInfo {
|
||||
token_usage: TokenUsage::default(),
|
||||
conversation_id: None,
|
||||
thread_id: None,
|
||||
update_action: None,
|
||||
};
|
||||
let lines = format_exit_messages(exit_info, false);
|
||||
|
||||
@@ -24,7 +24,7 @@ prefix_rule(
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let output = Command::cargo_bin("codex")?
|
||||
let output = Command::new(codex_utils_cargo_bin::cargo_bin("codex")?)
|
||||
.env("CODEX_HOME", codex_home.path())
|
||||
.args([
|
||||
"execpolicy",
|
||||
@@ -59,3 +59,61 @@ prefix_rule(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn execpolicy_check_includes_justification_when_present() -> Result<(), Box<dyn std::error::Error>>
|
||||
{
|
||||
let codex_home = TempDir::new()?;
|
||||
let policy_path = codex_home.path().join("rules").join("policy.rules");
|
||||
fs::create_dir_all(
|
||||
policy_path
|
||||
.parent()
|
||||
.expect("policy path should have a parent"),
|
||||
)?;
|
||||
fs::write(
|
||||
&policy_path,
|
||||
r#"
|
||||
prefix_rule(
|
||||
pattern = ["git", "push"],
|
||||
decision = "forbidden",
|
||||
justification = "pushing is blocked in this repo",
|
||||
)
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let output = Command::new(codex_utils_cargo_bin::cargo_bin("codex")?)
|
||||
.env("CODEX_HOME", codex_home.path())
|
||||
.args([
|
||||
"execpolicy",
|
||||
"check",
|
||||
"--rules",
|
||||
policy_path
|
||||
.to_str()
|
||||
.expect("policy path should be valid UTF-8"),
|
||||
"git",
|
||||
"push",
|
||||
"origin",
|
||||
"main",
|
||||
])
|
||||
.output()?;
|
||||
|
||||
assert!(output.status.success());
|
||||
let result: serde_json::Value = serde_json::from_slice(&output.stdout)?;
|
||||
assert_eq!(
|
||||
result,
|
||||
json!({
|
||||
"decision": "forbidden",
|
||||
"matchedRules": [
|
||||
{
|
||||
"prefixRuleMatch": {
|
||||
"matchedPrefix": ["git", "push"],
|
||||
"decision": "forbidden",
|
||||
"justification": "pushing is blocked in this repo"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ pub enum ResponseEvent {
|
||||
summary_index: i64,
|
||||
},
|
||||
RateLimits(RateLimitSnapshot),
|
||||
ModelsEtag(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
|
||||
@@ -152,6 +152,9 @@ impl Stream for AggregatedStream {
|
||||
Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
|
||||
@@ -5,6 +5,7 @@ use crate::provider::Provider;
|
||||
use crate::telemetry::run_with_request_telemetry;
|
||||
use codex_client::HttpTransport;
|
||||
use codex_client::RequestTelemetry;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -41,7 +42,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
&self,
|
||||
client_version: &str,
|
||||
extra_headers: HeaderMap,
|
||||
) -> Result<ModelsResponse, ApiError> {
|
||||
) -> Result<(Vec<ModelInfo>, Option<String>), ApiError> {
|
||||
let builder = || {
|
||||
let mut req = self.provider.build_request(Method::GET, self.path());
|
||||
req.headers.extend(extra_headers.clone());
|
||||
@@ -66,7 +67,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(ToString::to_string);
|
||||
|
||||
let ModelsResponse { models, etag } = serde_json::from_slice::<ModelsResponse>(&resp.body)
|
||||
let ModelsResponse { models } = serde_json::from_slice::<ModelsResponse>(&resp.body)
|
||||
.map_err(|e| {
|
||||
ApiError::Stream(format!(
|
||||
"failed to decode models response: {e}; body: {}",
|
||||
@@ -74,9 +75,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
))
|
||||
})?;
|
||||
|
||||
let etag = header_etag.unwrap_or(etag);
|
||||
|
||||
Ok(ModelsResponse { models, etag })
|
||||
Ok((models, header_etag))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,16 +101,15 @@ mod tests {
|
||||
struct CapturingTransport {
|
||||
last_request: Arc<Mutex<Option<Request>>>,
|
||||
body: Arc<ModelsResponse>,
|
||||
etag: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for CapturingTransport {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: String::new(),
|
||||
}),
|
||||
body: Arc::new(ModelsResponse { models: Vec::new() }),
|
||||
etag: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,8 +120,8 @@ mod tests {
|
||||
*self.last_request.lock().unwrap() = Some(req);
|
||||
let body = serde_json::to_vec(&*self.body).unwrap();
|
||||
let mut headers = HeaderMap::new();
|
||||
if !self.body.etag.is_empty() {
|
||||
headers.insert(ETAG, self.body.etag.parse().unwrap());
|
||||
if let Some(etag) = &self.etag {
|
||||
headers.insert(ETAG, etag.parse().unwrap());
|
||||
}
|
||||
Ok(Response {
|
||||
status: StatusCode::OK,
|
||||
@@ -166,14 +164,12 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn appends_client_version_query() {
|
||||
let response = ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: String::new(),
|
||||
};
|
||||
let response = ModelsResponse { models: Vec::new() };
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -182,12 +178,12 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 0);
|
||||
assert_eq!(models.len(), 0);
|
||||
|
||||
let url = transport
|
||||
.last_request
|
||||
@@ -219,25 +215,24 @@ mod tests {
|
||||
"supported_in_api": true,
|
||||
"priority": 1,
|
||||
"upgrade": null,
|
||||
"base_instructions": null,
|
||||
"base_instructions": "base instructions",
|
||||
"supports_reasoning_summaries": false,
|
||||
"support_verbosity": false,
|
||||
"default_verbosity": null,
|
||||
"apply_patch_tool_type": null,
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"context_window": 272_000,
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.unwrap(),
|
||||
],
|
||||
etag: String::new(),
|
||||
};
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -246,27 +241,25 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 1);
|
||||
assert_eq!(result.models[0].slug, "gpt-test");
|
||||
assert_eq!(result.models[0].supported_in_api, true);
|
||||
assert_eq!(result.models[0].priority, 1);
|
||||
assert_eq!(models.len(), 1);
|
||||
assert_eq!(models[0].slug, "gpt-test");
|
||||
assert_eq!(models[0].supported_in_api, true);
|
||||
assert_eq!(models[0].priority, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_includes_etag() {
|
||||
let response = ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: "\"abc\"".to_string(),
|
||||
};
|
||||
let response = ModelsResponse { models: Vec::new() };
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: Some("\"abc\"".to_string()),
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -275,12 +268,12 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, etag) = client
|
||||
.list_models("0.1.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 0);
|
||||
assert_eq!(result.etag, "\"abc\"");
|
||||
assert_eq!(models.len(), 0);
|
||||
assert_eq!(etag, Some("\"abc\"".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,24 +204,16 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
call_id,
|
||||
..
|
||||
} => {
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
}]
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
});
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::LocalShellCall {
|
||||
id,
|
||||
@@ -229,22 +221,14 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
status,
|
||||
action,
|
||||
} => {
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
}]
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
});
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
let content_value = if let Some(items) = &output.content_items {
|
||||
@@ -277,18 +261,16 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
input,
|
||||
status: _,
|
||||
} => {
|
||||
messages.push(json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
}]
|
||||
}));
|
||||
let tool_call = json!({
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
});
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { call_id, output } => {
|
||||
messages.push(json!({
|
||||
@@ -328,11 +310,50 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn push_tool_call_message(messages: &mut Vec<Value>, tool_call: Value, reasoning: Option<&str>) {
|
||||
// Chat Completions requires that tool calls are grouped into a single assistant message
|
||||
// (with `tool_calls: [...]`) followed by tool role responses.
|
||||
if let Some(Value::Object(obj)) = messages.last_mut()
|
||||
&& obj.get("role").and_then(Value::as_str) == Some("assistant")
|
||||
&& obj.get("content").is_some_and(Value::is_null)
|
||||
&& let Some(tool_calls) = obj.get_mut("tool_calls").and_then(Value::as_array_mut)
|
||||
{
|
||||
tool_calls.push(tool_call);
|
||||
if let Some(reasoning) = reasoning {
|
||||
if let Some(Value::String(existing)) = obj.get_mut("reasoning") {
|
||||
if !existing.is_empty() {
|
||||
existing.push('\n');
|
||||
}
|
||||
existing.push_str(reasoning);
|
||||
} else {
|
||||
obj.insert(
|
||||
"reasoning".to_string(),
|
||||
Value::String(reasoning.to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [tool_call],
|
||||
});
|
||||
if let Some(reasoning) = reasoning
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::provider::RetryConfig;
|
||||
use crate::provider::WireApi;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use http::HeaderValue;
|
||||
@@ -385,4 +406,89 @@ mod tests {
|
||||
Some(&HeaderValue::from_static("review"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn groups_consecutive_tool_calls_into_a_single_assistant_message() {
|
||||
let prompt_input = vec![
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "read these".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"a.txt"}"#.to_string(),
|
||||
call_id: "call-a".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"b.txt"}"#.to_string(),
|
||||
call_id: "call-b".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"c.txt"}"#.to_string(),
|
||||
call_id: "call-c".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-a".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "A".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-b".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "B".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-c".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "C".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
let req = ChatRequestBuilder::new("gpt-test", "inst", &prompt_input, &[])
|
||||
.build(&provider())
|
||||
.expect("request");
|
||||
|
||||
let messages = req
|
||||
.body
|
||||
.get("messages")
|
||||
.and_then(|v| v.as_array())
|
||||
.expect("messages array");
|
||||
// system + user + assistant(tool_calls=[...]) + 3 tool outputs
|
||||
assert_eq!(messages.len(), 6);
|
||||
|
||||
assert_eq!(messages[0]["role"], "system");
|
||||
assert_eq!(messages[1]["role"], "user");
|
||||
|
||||
let tool_calls_msg = &messages[2];
|
||||
assert_eq!(tool_calls_msg["role"], "assistant");
|
||||
assert_eq!(tool_calls_msg["content"], serde_json::Value::Null);
|
||||
let tool_calls = tool_calls_msg["tool_calls"]
|
||||
.as_array()
|
||||
.expect("tool_calls array");
|
||||
assert_eq!(tool_calls.len(), 3);
|
||||
assert_eq!(tool_calls[0]["id"], "call-a");
|
||||
assert_eq!(tool_calls[1]["id"], "call-b");
|
||||
assert_eq!(tool_calls[2]["id"], "call-c");
|
||||
|
||||
assert_eq!(messages[3]["role"], "tool");
|
||||
assert_eq!(messages[3]["tool_call_id"], "call-a");
|
||||
assert_eq!(messages[4]["role"], "tool");
|
||||
assert_eq!(messages[4]["tool_call_id"], "call-b");
|
||||
assert_eq!(messages[5]["role"], "tool");
|
||||
assert_eq!(messages[5]["tool_call_id"], "call-c");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,21 @@ pub(crate) fn spawn_chat_stream(
|
||||
ResponseStream { rx_event }
|
||||
}
|
||||
|
||||
/// Processes Server-Sent Events from the legacy Chat Completions streaming API.
|
||||
///
|
||||
/// The upstream protocol terminates a streaming response with a final sentinel event
|
||||
/// (`data: [DONE]`). Historically, some of our test stubs have emitted `data: DONE`
|
||||
/// (without brackets) instead.
|
||||
///
|
||||
/// `eventsource_stream` delivers these sentinels as regular events rather than signaling
|
||||
/// end-of-stream. If we try to parse them as JSON, we log and skip them, then keep
|
||||
/// polling for more events.
|
||||
///
|
||||
/// On servers that keep the HTTP connection open after emitting the sentinel (notably
|
||||
/// wiremock on Windows), skipping the sentinel means we never emit `ResponseEvent::Completed`.
|
||||
/// Higher-level workflows/tests that wait for completion before issuing subsequent model
|
||||
/// calls will then stall, which shows up as "expected N requests, got 1" verification
|
||||
/// failures in the mock server.
|
||||
pub async fn process_chat_sse<S>(
|
||||
stream: S,
|
||||
tx_event: mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
@@ -57,6 +72,31 @@ pub async fn process_chat_sse<S>(
|
||||
let mut reasoning_item: Option<ResponseItem> = None;
|
||||
let mut completed_sent = false;
|
||||
|
||||
async fn flush_and_complete(
|
||||
tx_event: &mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
reasoning_item: &mut Option<ResponseItem>,
|
||||
assistant_item: &mut Option<ResponseItem>,
|
||||
) {
|
||||
if let Some(reasoning) = reasoning_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
|
||||
loop {
|
||||
let start = Instant::now();
|
||||
let response = timeout(idle_timeout, stream.next()).await;
|
||||
@@ -70,24 +110,8 @@ pub async fn process_chat_sse<S>(
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
if let Some(reasoning) = reasoning_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
if !completed_sent {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -101,16 +125,25 @@ pub async fn process_chat_sse<S>(
|
||||
|
||||
trace!("SSE event: {}", sse.data);
|
||||
|
||||
if sse.data.trim().is_empty() {
|
||||
let data = sse.data.trim();
|
||||
|
||||
if data.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let value: serde_json::Value = match serde_json::from_str(&sse.data) {
|
||||
if data == "[DONE]" || data == "DONE" {
|
||||
if !completed_sent {
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let value: serde_json::Value = match serde_json::from_str(data) {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
debug!(
|
||||
"Failed to parse ChatCompletions SSE event: {err}, data: {}",
|
||||
&sse.data
|
||||
data
|
||||
);
|
||||
continue;
|
||||
}
|
||||
@@ -362,6 +395,16 @@ mod tests {
|
||||
body
|
||||
}
|
||||
|
||||
/// Regression test: the stream should complete when we see a `[DONE]` sentinel.
|
||||
///
|
||||
/// This is important for tests/mocks that don't immediately close the underlying
|
||||
/// connection after emitting the sentinel.
|
||||
#[tokio::test]
|
||||
async fn completes_on_done_sentinel_without_json() {
|
||||
let events = collect_events("event: message\ndata: [DONE]\n\n").await;
|
||||
assert_matches!(&events[..], [ResponseEvent::Completed { .. }]);
|
||||
}
|
||||
|
||||
async fn collect_events(body: &str) -> Vec<ResponseEvent> {
|
||||
let reader = ReaderStream::new(std::io::Cursor::new(body.to_string()))
|
||||
.map_err(|err| codex_client::TransportError::Network(err.to_string()));
|
||||
|
||||
@@ -51,11 +51,19 @@ pub fn spawn_response_stream(
|
||||
telemetry: Option<Arc<dyn SseTelemetry>>,
|
||||
) -> ResponseStream {
|
||||
let rate_limits = parse_rate_limit(&stream_response.headers);
|
||||
let models_etag = stream_response
|
||||
.headers
|
||||
.get("X-Models-Etag")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(ToString::to_string);
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600);
|
||||
tokio::spawn(async move {
|
||||
if let Some(snapshot) = rate_limits {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::RateLimits(snapshot))).await;
|
||||
}
|
||||
if let Some(etag) = models_etag {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await;
|
||||
}
|
||||
process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await;
|
||||
});
|
||||
|
||||
|
||||
@@ -4,14 +4,12 @@ use codex_api::provider::Provider;
|
||||
use codex_api::provider::RetryConfig;
|
||||
use codex_api::provider::WireApi;
|
||||
use codex_client::ReqwestTransport;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -58,7 +56,7 @@ async fn models_client_hits_models_endpoint() {
|
||||
slug: "gpt-test".to_string(),
|
||||
display_name: "gpt-test".to_string(),
|
||||
description: Some("desc".to_string()),
|
||||
default_reasoning_level: ReasoningEffort::Medium,
|
||||
default_reasoning_level: Some(ReasoningEffort::Medium),
|
||||
supported_reasoning_levels: vec![
|
||||
ReasoningEffortPreset {
|
||||
effort: ReasoningEffort::Low,
|
||||
@@ -75,22 +73,21 @@ async fn models_client_hits_models_endpoint() {
|
||||
],
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 1,
|
||||
upgrade: None,
|
||||
base_instructions: None,
|
||||
base_instructions: "base instructions".to_string(),
|
||||
supports_reasoning_summaries: false,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
apply_patch_tool_type: None,
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
context_window: Some(272_000),
|
||||
auto_compact_token_limit: None,
|
||||
effective_context_window_percent: 95,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}],
|
||||
etag: String::new(),
|
||||
};
|
||||
|
||||
Mock::given(method("GET"))
|
||||
@@ -106,13 +103,13 @@ async fn models_client_hits_models_endpoint() {
|
||||
let transport = ReqwestTransport::new(reqwest::Client::new());
|
||||
let client = ModelsClient::new(transport, provider(&base_url), DummyAuth);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.1.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("models request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 1);
|
||||
assert_eq!(result.models[0].slug, "gpt-test");
|
||||
assert_eq!(models.len(), 1);
|
||||
assert_eq!(models[0].slug, "gpt-test");
|
||||
|
||||
let received = server
|
||||
.received_requests()
|
||||
|
||||
@@ -69,6 +69,15 @@ impl ReqwestTransport {
|
||||
#[async_trait]
|
||||
impl HttpTransport for ReqwestTransport {
|
||||
async fn execute(&self, req: Request) -> Result<Response, TransportError> {
|
||||
if enabled!(Level::TRACE) {
|
||||
trace!(
|
||||
"{} to {}: {}",
|
||||
req.method,
|
||||
req.url,
|
||||
req.body.as_ref().unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
let builder = self.build(req)?;
|
||||
let resp = builder.send().await.map_err(Self::map_error)?;
|
||||
let status = resp.status();
|
||||
|
||||
@@ -16,6 +16,7 @@ workspace = true
|
||||
anyhow = { workspace = true }
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
arc-swap = "1.7.1"
|
||||
base64 = { workspace = true }
|
||||
chardetng = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
@@ -122,6 +123,7 @@ assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-core = { path = ".", features = ["deterministic_process_ids"] }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
escargot = { workspace = true }
|
||||
|
||||
@@ -36,11 +36,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "list",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
62,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.62.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 1,
|
||||
@@ -79,11 +75,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 2,
|
||||
@@ -118,11 +110,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "list",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 3,
|
||||
@@ -165,11 +153,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "list",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 4,
|
||||
@@ -208,11 +192,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 5,
|
||||
@@ -251,11 +231,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 6,
|
||||
@@ -298,11 +274,7 @@
|
||||
],
|
||||
"shell_type": "default",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 7,
|
||||
@@ -337,11 +309,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 8,
|
||||
@@ -380,11 +348,7 @@
|
||||
],
|
||||
"shell_type": "local",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": null,
|
||||
"priority": 9,
|
||||
@@ -427,11 +391,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": null,
|
||||
"priority": 10,
|
||||
@@ -474,11 +434,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": null,
|
||||
"priority": 11,
|
||||
|
||||
386
codex-rs/core/prompt_with_apply_patch_instructions.md
Normal file
386
codex-rs/core/prompt_with_apply_patch_instructions.md
Normal file
@@ -0,0 +1,386 @@
|
||||
You are a coding agent running in the Codex CLI, a terminal-based coding assistant. Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful.
|
||||
|
||||
Your capabilities:
|
||||
|
||||
- Receive user prompts and other context provided by the harness, such as files in the workspace.
|
||||
- Communicate with the user by streaming thinking & responses, and by making & updating plans.
|
||||
- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, you can request that these function calls be escalated to the user for approval before running. More on this in the "Sandbox and approvals" section.
|
||||
|
||||
Within this context, Codex refers to the open-source agentic coding interface (not the old Codex language model built by OpenAI).
|
||||
|
||||
# How you work
|
||||
|
||||
## Personality
|
||||
|
||||
Your default personality and tone is concise, direct, and friendly. You communicate efficiently, always keeping the user clearly informed about ongoing actions without unnecessary detail. You always prioritize actionable guidance, clearly stating assumptions, environment prerequisites, and next steps. Unless explicitly asked, you avoid excessively verbose explanations about your work.
|
||||
|
||||
# AGENTS.md spec
|
||||
- Repos often contain AGENTS.md files. These files can appear anywhere within the repository.
|
||||
- These files are a way for humans to give you (the agent) instructions or tips for working within the container.
|
||||
- Some examples might be: coding conventions, info about how code is organized, or instructions for how to run or test code.
|
||||
- Instructions in AGENTS.md files:
|
||||
- The scope of an AGENTS.md file is the entire directory tree rooted at the folder that contains it.
|
||||
- For every file you touch in the final patch, you must obey instructions in any AGENTS.md file whose scope includes that file.
|
||||
- Instructions about code style, structure, naming, etc. apply only to code within the AGENTS.md file's scope, unless the file states otherwise.
|
||||
- More-deeply-nested AGENTS.md files take precedence in the case of conflicting instructions.
|
||||
- Direct system/developer/user instructions (as part of a prompt) take precedence over AGENTS.md instructions.
|
||||
- The contents of the AGENTS.md file at the root of the repo and any directories from the CWD up to the root are included with the developer message and don't need to be re-read. When working in a subdirectory of CWD, or a directory outside the CWD, check for any AGENTS.md files that may be applicable.
|
||||
|
||||
## Responsiveness
|
||||
|
||||
### Preamble messages
|
||||
|
||||
Before making tool calls, send a brief preamble to the user explaining what you’re about to do. When sending preamble messages, follow these principles and examples:
|
||||
|
||||
- **Logically group related actions**: if you’re about to run several related commands, describe them together in one preamble rather than sending a separate note for each.
|
||||
- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words for quick updates).
|
||||
- **Build on prior context**: if this is not your first tool call, use the preamble message to connect the dots with what’s been done so far and create a sense of momentum and clarity for the user to understand your next actions.
|
||||
- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging.
|
||||
- **Exception**: Avoid adding a preamble for every trivial read (e.g., `cat` a single file) unless it’s part of a larger grouped action.
|
||||
|
||||
**Examples:**
|
||||
|
||||
- “I’ve explored the repo; now checking the API route definitions.”
|
||||
- “Next, I’ll patch the config and update the related tests.”
|
||||
- “I’m about to scaffold the CLI commands and helper functions.”
|
||||
- “Ok cool, so I’ve wrapped my head around the repo. Now digging into the API routes.”
|
||||
- “Config’s looking tidy. Next up is patching helpers to keep things in sync.”
|
||||
- “Finished poking at the DB gateway. I will now chase down error handling.”
|
||||
- “Alright, build pipeline order is interesting. Checking how it reports failures.”
|
||||
- “Spotted a clever caching util; now hunting where it gets used.”
|
||||
|
||||
## Planning
|
||||
|
||||
You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go.
|
||||
|
||||
Note that plans are not for padding out simple work with filler steps or stating the obvious. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately.
|
||||
|
||||
Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step.
|
||||
|
||||
Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step. It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so.
|
||||
|
||||
Use a plan when:
|
||||
|
||||
- The task is non-trivial and will require multiple actions over a long time horizon.
|
||||
- There are logical phases or dependencies where sequencing matters.
|
||||
- The work has ambiguity that benefits from outlining high-level goals.
|
||||
- You want intermediate checkpoints for feedback and validation.
|
||||
- When the user asked you to do more than one thing in a single prompt
|
||||
- The user has asked you to use the plan tool (aka "TODOs")
|
||||
- You generate additional steps while working, and plan to do them before yielding to the user
|
||||
|
||||
### Examples
|
||||
|
||||
**High-quality plans**
|
||||
|
||||
Example 1:
|
||||
|
||||
1. Add CLI entry with file args
|
||||
2. Parse Markdown via CommonMark library
|
||||
3. Apply semantic HTML template
|
||||
4. Handle code blocks, images, links
|
||||
5. Add error handling for invalid files
|
||||
|
||||
Example 2:
|
||||
|
||||
1. Define CSS variables for colors
|
||||
2. Add toggle with localStorage state
|
||||
3. Refactor components to use variables
|
||||
4. Verify all views for readability
|
||||
5. Add smooth theme-change transition
|
||||
|
||||
Example 3:
|
||||
|
||||
1. Set up Node.js + WebSocket server
|
||||
2. Add join/leave broadcast events
|
||||
3. Implement messaging with timestamps
|
||||
4. Add usernames + mention highlighting
|
||||
5. Persist messages in lightweight DB
|
||||
6. Add typing indicators + unread count
|
||||
|
||||
**Low-quality plans**
|
||||
|
||||
Example 1:
|
||||
|
||||
1. Create CLI tool
|
||||
2. Add Markdown parser
|
||||
3. Convert to HTML
|
||||
|
||||
Example 2:
|
||||
|
||||
1. Add dark mode toggle
|
||||
2. Save preference
|
||||
3. Make styles look good
|
||||
|
||||
Example 3:
|
||||
|
||||
1. Create single-file HTML game
|
||||
2. Run quick sanity check
|
||||
3. Summarize usage instructions
|
||||
|
||||
If you need to write a plan, only write high quality plans, not low quality ones.
|
||||
|
||||
## Task execution
|
||||
|
||||
You are a coding agent. Please keep going until the query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Autonomously resolve the query to the best of your ability, using the tools available to you, before coming back to the user. Do NOT guess or make up an answer.
|
||||
|
||||
You MUST adhere to the following criteria when solving queries:
|
||||
|
||||
- Working on the repo(s) in the current environment is allowed, even if they are proprietary.
|
||||
- Analyzing code for vulnerabilities is allowed.
|
||||
- Showing user code and tool call details is allowed.
|
||||
- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]}
|
||||
|
||||
If completing the user's task requires writing or modifying files, your code and final answer should follow these coding guidelines, though user instructions (i.e. AGENTS.md) may override these guidelines:
|
||||
|
||||
- Fix the problem at the root cause rather than applying surface-level patches, when possible.
|
||||
- Avoid unneeded complexity in your solution.
|
||||
- Do not attempt to fix unrelated bugs or broken tests. It is not your responsibility to fix them. (You may mention them to the user in your final message though.)
|
||||
- Update documentation as necessary.
|
||||
- Keep changes consistent with the style of the existing codebase. Changes should be minimal and focused on the task.
|
||||
- Use `git log` and `git blame` to search the history of the codebase if additional context is required.
|
||||
- NEVER add copyright or license headers unless specifically requested.
|
||||
- Do not waste tokens by re-reading files after calling `apply_patch` on them. The tool call will fail if it didn't work. The same goes for making folders, deleting folders, etc.
|
||||
- Do not `git commit` your changes or create new git branches unless explicitly requested.
|
||||
- Do not add inline comments within code unless explicitly requested.
|
||||
- Do not use one-letter variable names unless explicitly requested.
|
||||
- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor.
|
||||
|
||||
## Sandbox and approvals
|
||||
|
||||
The Codex CLI harness supports several different sandboxing, and approval configurations that the user can choose from.
|
||||
|
||||
Filesystem sandboxing prevents you from editing files without user approval. The options are:
|
||||
|
||||
- **read-only**: You can only read files.
|
||||
- **workspace-write**: You can read files. You can write to files in your workspace folder, but not outside it.
|
||||
- **danger-full-access**: No filesystem sandboxing.
|
||||
|
||||
Network sandboxing prevents you from accessing network without approval. Options are
|
||||
|
||||
- **restricted**
|
||||
- **enabled**
|
||||
|
||||
Approvals are your mechanism to get user consent to perform more privileged actions. Although they introduce friction to the user because your work is paused until the user responds, you should leverage them to accomplish your important work. Do not let these settings or the sandbox deter you from attempting to accomplish the user's task. Approval options are
|
||||
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with approvals `on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /tmp)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (For all of these, you should weigh alternative paths that do not require approval.)
|
||||
|
||||
Note that when sandboxing is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing ON, and approval on-failure.
|
||||
|
||||
## Validating your work
|
||||
|
||||
If the codebase has tests or the ability to build or run, consider using them to verify that your work is complete.
|
||||
|
||||
When testing, your philosophy should be to start as specific as possible to the code you changed so that you can catch issues efficiently, then make your way to broader tests as you build confidence. If there's no test for the code you changed, and if the adjacent patterns in the codebases show that there's a logical place for you to add a test, you may do so. However, do not add tests to codebases with no tests.
|
||||
|
||||
Similarly, once you're confident in correctness, you can suggest or use formatting commands to ensure that your code is well formatted. If there are issues you can iterate up to 3 times to get formatting right, but if you still can't manage it's better to save the user time and present them a correct solution where you call out the formatting in your final message. If the codebase does not have a formatter configured, do not add one.
|
||||
|
||||
For all of testing, running, building, and formatting, do not attempt to fix unrelated bugs. It is not your responsibility to fix them. (You may mention them to the user in your final message though.)
|
||||
|
||||
Be mindful of whether to run validation commands proactively. In the absence of behavioral guidance:
|
||||
|
||||
- When running in non-interactive approval modes like **never** or **on-failure**, proactively run tests, lint and do whatever you need to ensure you've completed the task.
|
||||
- When working in interactive approval modes like **untrusted**, or **on-request**, hold off on running tests or lint commands until the user is ready for you to finalize your output, because these commands take time to run and slow down iteration. Instead suggest what you want to do next, and let the user confirm first.
|
||||
- When working on test-related tasks, such as adding tests, fixing tests, or reproducing a bug to verify behavior, you may proactively run tests regardless of approval mode. Use your judgement to decide whether this is a test-related task.
|
||||
|
||||
## Ambition vs. precision
|
||||
|
||||
For tasks that have no prior context (i.e. the user is starting something brand new), you should feel free to be ambitious and demonstrate creativity with your implementation.
|
||||
|
||||
If you're operating in an existing codebase, you should make sure you do exactly what the user asks with surgical precision. Treat the surrounding codebase with respect, and don't overstep (i.e. changing filenames or variables unnecessarily). You should balance being sufficiently ambitious and proactive when completing tasks of this nature.
|
||||
|
||||
You should use judicious initiative to decide on the right level of detail and complexity to deliver based on the user's needs. This means showing good judgment that you're capable of doing the right extras without gold-plating. This might be demonstrated by high-value, creative touches when scope of the task is vague; while being surgical and targeted when scope is tightly specified.
|
||||
|
||||
## Sharing progress updates
|
||||
|
||||
For especially longer tasks that you work on (i.e. requiring many tool calls, or a plan with multiple steps), you should provide progress updates back to the user at reasonable intervals. These updates should be structured as a concise sentence or two (no more than 8-10 words long) recapping progress so far in plain language: this update demonstrates your understanding of what needs to be done, progress so far (i.e. files explores, subtasks complete), and where you're going next.
|
||||
|
||||
Before doing large chunks of work that may incur latency as experienced by the user (i.e. writing a new file), you should send a concise message to the user with an update indicating what you're about to do to ensure they know what you're spending time on. Don't start editing or writing large files before informing the user what you are doing and why.
|
||||
|
||||
The messages you send before tool calls should describe what is immediately about to be done next in very concise language. If there was previous work done, this preamble message should also include a note about the work done so far to bring the user along.
|
||||
|
||||
## Presenting your work and final message
|
||||
|
||||
Your final message should read naturally, like an update from a concise teammate. For casual conversation, brainstorming tasks, or quick questions from the user, respond in a friendly, conversational tone. You should ask questions, suggest ideas, and adapt to the user’s style. If you've finished a large amount of work, when describing what you've done to the user, you should follow the final answer formatting guidelines to communicate substantive changes. You don't need to add structured formatting for one-word answers, greetings, or purely conversational exchanges.
|
||||
|
||||
You can skip heavy formatting for single, simple actions or confirmations. In these cases, respond in plain sentences with any relevant next step or quick option. Reserve multi-section structured responses for results that need grouping or explanation.
|
||||
|
||||
The user is working on the same computer as you, and has access to your work. As such there's no need to show the full contents of large files you have already written unless the user explicitly asks for them. Similarly, if you've created or modified files using `apply_patch`, there's no need to tell users to "save the file" or "copy the code into a file"—just reference the file path.
|
||||
|
||||
If there's something that you think you could help with as a logical next step, concisely ask the user if they want you to do so. Good examples of this are running tests, committing changes, or building out the next logical component. If there’s something that you couldn't do (even with approval) but that the user might want to do (such as verifying changes by running the app), include those instructions succinctly.
|
||||
|
||||
Brevity is very important as a default. You should be very concise (i.e. no more than 10 lines), but can relax this requirement for tasks where additional detail and comprehensiveness is important for the user's understanding.
|
||||
|
||||
### Final answer structure and style guidelines
|
||||
|
||||
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
|
||||
|
||||
**Section Headers**
|
||||
|
||||
- Use only when they improve clarity — they are not mandatory for every answer.
|
||||
- Choose descriptive names that fit the content
|
||||
- Keep headers short (1–3 words) and in `**Title Case**`. Always start headers with `**` and end with `**`
|
||||
- Leave no blank line before the first bullet under a header.
|
||||
- Section headers should only be used where they genuinely improve scanability; avoid fragmenting the answer.
|
||||
|
||||
**Bullets**
|
||||
|
||||
- Use `-` followed by a space for every bullet.
|
||||
- Merge related points when possible; avoid a bullet for every trivial detail.
|
||||
- Keep bullets to one line unless breaking for clarity is unavoidable.
|
||||
- Group into short lists (4–6 bullets) ordered by importance.
|
||||
- Use consistent keyword phrasing and formatting across sections.
|
||||
|
||||
**Monospace**
|
||||
|
||||
- Wrap all commands, file paths, env vars, and code identifiers in backticks (`` `...` ``).
|
||||
- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command.
|
||||
- Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``).
|
||||
|
||||
**File References**
|
||||
When referencing files in your response, make sure to include the relevant start line and always follow the below rules:
|
||||
* Use inline code to make file paths clickable.
|
||||
* Each reference should have a stand alone path. Even if it's the same file.
|
||||
* Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
|
||||
* Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
|
||||
* Do not use URIs like file://, vscode://, or https://.
|
||||
* Do not provide range of lines
|
||||
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5
|
||||
|
||||
**Structure**
|
||||
|
||||
- Place related bullets together; don’t mix unrelated concepts in the same section.
|
||||
- Order sections from general → specific → supporting info.
|
||||
- For subsections (e.g., “Binaries” under “Rust Workspace”), introduce with a bolded keyword bullet, then list items under it.
|
||||
- Match structure to complexity:
|
||||
- Multi-part or detailed results → use clear headers and grouped bullets.
|
||||
- Simple results → minimal headers, possibly just a short list or paragraph.
|
||||
|
||||
**Tone**
|
||||
|
||||
- Keep the voice collaborative and natural, like a coding partner handing off work.
|
||||
- Be concise and factual — no filler or conversational commentary and avoid unnecessary repetition
|
||||
- Use present tense and active voice (e.g., “Runs tests” not “This will run tests”).
|
||||
- Keep descriptions self-contained; don’t refer to “above” or “below”.
|
||||
- Use parallel structure in lists for consistency.
|
||||
|
||||
**Don’t**
|
||||
|
||||
- Don’t use literal words “bold” or “monospace” in the content.
|
||||
- Don’t nest bullets or create deep hierarchies.
|
||||
- Don’t output ANSI escape codes directly — the CLI renderer applies them.
|
||||
- Don’t cram unrelated keywords into a single bullet; split for clarity.
|
||||
- Don’t let keyword lists run long — wrap or reformat for scanability.
|
||||
|
||||
Generally, ensure your final answers adapt their shape and depth to the request. For example, answers to code explanations should have a precise, structured explanation with code references that answer the question directly. For tasks with a simple implementation, lead with the outcome and supplement only with what’s needed for clarity. Larger changes can be presented as a logical walkthrough of your approach, grouping related steps, explaining rationale where it adds value, and highlighting next actions to accelerate the user. Your answers should provide the right level of detail while being easily scannable.
|
||||
|
||||
For casual greetings, acknowledgements, or other one-off conversational messages that are not delivering substantive information or structured results, respond naturally without section headers or bullet formatting.
|
||||
|
||||
# Tool Guidelines
|
||||
|
||||
## Shell commands
|
||||
|
||||
When using the shell, you must adhere to the following guidelines:
|
||||
|
||||
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
|
||||
- Do not use python scripts to attempt to output larger chunks of a file.
|
||||
|
||||
## `update_plan`
|
||||
|
||||
A tool named `update_plan` is available to you. You can use it to keep an up‑to‑date, step‑by‑step plan for the task.
|
||||
|
||||
To create a new plan, call `update_plan` with a short list of 1‑sentence steps (no more than 5-7 words each) with a `status` for each step (`pending`, `in_progress`, or `completed`).
|
||||
|
||||
When steps have been completed, use `update_plan` to mark each finished step as `completed` and the next step you are working on as `in_progress`. There should always be exactly one `in_progress` step until everything is done. You can mark multiple items as complete in a single `update_plan` call.
|
||||
|
||||
If all steps are complete, ensure you call `update_plan` to mark all steps as `completed`.
|
||||
|
||||
## `apply_patch`
|
||||
|
||||
Use the `apply_patch` shell command to edit files.
|
||||
Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope:
|
||||
|
||||
*** Begin Patch
|
||||
[ one or more file sections ]
|
||||
*** End Patch
|
||||
|
||||
Within that envelope, you get a sequence of file operations.
|
||||
You MUST include a header to specify the action you are taking.
|
||||
Each operation starts with one of three headers:
|
||||
|
||||
*** Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
||||
*** Delete File: <path> - remove an existing file. Nothing follows.
|
||||
*** Update File: <path> - patch an existing file in place (optionally with a rename).
|
||||
|
||||
May be immediately followed by *** Move to: <new path> if you want to rename the file.
|
||||
Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header).
|
||||
Within a hunk each line starts with:
|
||||
|
||||
For instructions on [context_before] and [context_after]:
|
||||
- By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines.
|
||||
- If 3 lines of context is insufficient to uniquely identify the snippet of code within the file, use the @@ operator to indicate the class or function to which the snippet belongs. For instance, we might have:
|
||||
@@ class BaseClass
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
- If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance:
|
||||
|
||||
@@ class BaseClass
|
||||
@@ def method():
|
||||
[3 lines of pre-context]
|
||||
- [old_code]
|
||||
+ [new_code]
|
||||
[3 lines of post-context]
|
||||
|
||||
The full grammar definition is below:
|
||||
Patch := Begin { FileOp } End
|
||||
Begin := "*** Begin Patch" NEWLINE
|
||||
End := "*** End Patch" NEWLINE
|
||||
FileOp := AddFile | DeleteFile | UpdateFile
|
||||
AddFile := "*** Add File: " path NEWLINE { "+" line NEWLINE }
|
||||
DeleteFile := "*** Delete File: " path NEWLINE
|
||||
UpdateFile := "*** Update File: " path NEWLINE [ MoveTo ] { Hunk }
|
||||
MoveTo := "*** Move to: " newPath NEWLINE
|
||||
Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ]
|
||||
HunkLine := (" " | "-" | "+") text NEWLINE
|
||||
|
||||
A full patch can combine several operations:
|
||||
|
||||
*** Begin Patch
|
||||
*** Add File: hello.txt
|
||||
+Hello world
|
||||
*** Update File: src/app.py
|
||||
*** Move to: src/main.py
|
||||
@@ def greet():
|
||||
-print("Hi")
|
||||
+print("Hello, world!")
|
||||
*** Delete File: obsolete.txt
|
||||
*** End Patch
|
||||
|
||||
It is important to remember:
|
||||
|
||||
- You must include a header with your intended action (Add/Delete/Update)
|
||||
- You must prefix new lines with `+` even when creating a new file
|
||||
- File references can only be relative, NEVER ABSOLUTE.
|
||||
|
||||
You can invoke apply_patch like:
|
||||
|
||||
```
|
||||
shell {"command":["apply_patch","*** Begin Patch\n*** Add File: hello.txt\n+Hello, world!\n*** End Patch\n"]}
|
||||
```
|
||||
188
codex-rs/core/src/agent/control.rs
Normal file
188
codex-rs/core/src/agent/control.rs
Normal file
@@ -0,0 +1,188 @@
|
||||
use crate::CodexThread;
|
||||
use crate::agent::AgentStatus;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::thread_manager::ThreadManagerState;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::Op;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Weak;
|
||||
|
||||
/// Control-plane handle for multi-agent operations.
|
||||
/// `AgentControl` is held by each session (via `SessionServices`). It provides capability to
|
||||
/// spawn new agents and the inter-agent communication layer.
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct AgentControl {
|
||||
/// Weak handle back to the global thread registry/state.
|
||||
/// This is `Weak` to avoid reference cycles and shadow persistence of the form
|
||||
/// `ThreadManagerState -> CodexThread -> Session -> SessionServices -> ThreadManagerState`.
|
||||
manager: Weak<ThreadManagerState>,
|
||||
}
|
||||
|
||||
impl AgentControl {
|
||||
/// Construct a new `AgentControl` that can spawn/message agents via the given manager state.
|
||||
pub(crate) fn new(manager: Weak<ThreadManagerState>) -> Self {
|
||||
Self { manager }
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Used by upcoming multi-agent tooling.
|
||||
/// Spawn a new agent thread and submit the initial prompt.
|
||||
///
|
||||
/// If `headless` is true, a background drain task is spawned to prevent unbounded event growth
|
||||
/// of the channel queue when there is no client actively reading the thread events.
|
||||
pub(crate) async fn spawn_agent(
|
||||
&self,
|
||||
config: crate::config::Config,
|
||||
prompt: String,
|
||||
headless: bool,
|
||||
) -> CodexResult<ThreadId> {
|
||||
let state = self.upgrade()?;
|
||||
let new_thread = state.spawn_new_thread(config, self.clone()).await?;
|
||||
|
||||
if headless {
|
||||
spawn_headless_drain(Arc::clone(&new_thread.thread));
|
||||
}
|
||||
|
||||
self.send_prompt(new_thread.thread_id, prompt).await?;
|
||||
|
||||
Ok(new_thread.thread_id)
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Used by upcoming multi-agent tooling.
|
||||
/// Send a `user` prompt to an existing agent thread.
|
||||
pub(crate) async fn send_prompt(
|
||||
&self,
|
||||
agent_id: ThreadId,
|
||||
prompt: String,
|
||||
) -> CodexResult<String> {
|
||||
let state = self.upgrade()?;
|
||||
state
|
||||
.send_op(
|
||||
agent_id,
|
||||
Op::UserInput {
|
||||
items: vec![UserInput::Text { text: prompt }],
|
||||
final_output_json_schema: None,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Used by upcoming multi-agent tooling.
|
||||
/// Fetch the last known status for `agent_id`, returning `NotFound` when unavailable.
|
||||
pub(crate) async fn get_status(&self, agent_id: ThreadId) -> AgentStatus {
|
||||
let Ok(state) = self.upgrade() else {
|
||||
// No agent available if upgrade fails.
|
||||
return AgentStatus::NotFound;
|
||||
};
|
||||
let Ok(thread) = state.get_thread(agent_id).await else {
|
||||
return AgentStatus::NotFound;
|
||||
};
|
||||
thread.agent_status().await
|
||||
}
|
||||
|
||||
fn upgrade(&self) -> CodexResult<Arc<ThreadManagerState>> {
|
||||
self.manager
|
||||
.upgrade()
|
||||
.ok_or_else(|| CodexErr::UnsupportedOperation("thread manager dropped".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// When an agent is spawned "headless" (no UI/view attached), there may be no consumer polling
|
||||
/// `CodexThread::next_event()`. The underlying event channel is unbounded, so the producer can
|
||||
/// accumulate events indefinitely. This drain task prevents that memory growth by polling and
|
||||
/// discarding events until shutdown.
|
||||
fn spawn_headless_drain(thread: Arc<CodexThread>) {
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
match thread.next_event().await {
|
||||
Ok(event) => {
|
||||
if matches!(event.msg, EventMsg::ShutdownComplete) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!("failed to receive event from agent: {err:?}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::agent::agent_status_from_event;
|
||||
use codex_protocol::protocol::ErrorEvent;
|
||||
use codex_protocol::protocol::TaskCompleteEvent;
|
||||
use codex_protocol::protocol::TaskStartedEvent;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_protocol::protocol::TurnAbortedEvent;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_prompt_errors_when_manager_dropped() {
|
||||
let control = AgentControl::default();
|
||||
let err = control
|
||||
.send_prompt(ThreadId::new(), "hello".to_string())
|
||||
.await
|
||||
.expect_err("send_prompt should fail without a manager");
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"unsupported operation: thread manager dropped"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn get_status_returns_not_found_without_manager() {
|
||||
let control = AgentControl::default();
|
||||
let got = control.get_status(ThreadId::new()).await;
|
||||
assert_eq!(got, AgentStatus::NotFound);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn on_event_updates_status_from_task_started() {
|
||||
let status = agent_status_from_event(&EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: None,
|
||||
}));
|
||||
assert_eq!(status, Some(AgentStatus::Running));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn on_event_updates_status_from_task_complete() {
|
||||
let status = agent_status_from_event(&EventMsg::TaskComplete(TaskCompleteEvent {
|
||||
last_agent_message: Some("done".to_string()),
|
||||
}));
|
||||
let expected = AgentStatus::Completed(Some("done".to_string()));
|
||||
assert_eq!(status, Some(expected));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn on_event_updates_status_from_error() {
|
||||
let status = agent_status_from_event(&EventMsg::Error(ErrorEvent {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: None,
|
||||
}));
|
||||
|
||||
let expected = AgentStatus::Errored("boom".to_string());
|
||||
assert_eq!(status, Some(expected));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn on_event_updates_status_from_turn_aborted() {
|
||||
let status = agent_status_from_event(&EventMsg::TurnAborted(TurnAbortedEvent {
|
||||
reason: TurnAbortReason::Interrupted,
|
||||
}));
|
||||
|
||||
let expected = AgentStatus::Errored("Interrupted".to_string());
|
||||
assert_eq!(status, Some(expected));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn on_event_updates_status_from_shutdown_complete() {
|
||||
let status = agent_status_from_event(&EventMsg::ShutdownComplete);
|
||||
assert_eq!(status, Some(AgentStatus::Shutdown));
|
||||
}
|
||||
}
|
||||
6
codex-rs/core/src/agent/mod.rs
Normal file
6
codex-rs/core/src/agent/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
pub(crate) mod control;
|
||||
pub(crate) mod status;
|
||||
|
||||
pub(crate) use codex_protocol::protocol::AgentStatus;
|
||||
pub(crate) use control::AgentControl;
|
||||
pub(crate) use status::agent_status_from_event;
|
||||
15
codex-rs/core/src/agent/status.rs
Normal file
15
codex-rs/core/src/agent/status.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use codex_protocol::protocol::AgentStatus;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
|
||||
/// Derive the next agent status from a single emitted event.
|
||||
/// Returns `None` when the event does not affect status tracking.
|
||||
pub(crate) fn agent_status_from_event(msg: &EventMsg) -> Option<AgentStatus> {
|
||||
match msg {
|
||||
EventMsg::TaskStarted(_) => Some(AgentStatus::Running),
|
||||
EventMsg::TaskComplete(ev) => Some(AgentStatus::Completed(ev.last_agent_message.clone())),
|
||||
EventMsg::TurnAborted(ev) => Some(AgentStatus::Errored(format!("{:?}", ev.reason))),
|
||||
EventMsg::Error(ev) => Some(AgentStatus::Errored(ev.message.clone())),
|
||||
EventMsg::ShutdownComplete => Some(AgentStatus::Shutdown),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::protocol::FileChange;
|
||||
use crate::protocol::ReviewDecision;
|
||||
use crate::safety::SafetyCheck;
|
||||
use crate::safety::assess_patch_safety;
|
||||
use crate::tools::sandboxing::ExecApprovalRequirement;
|
||||
use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::ApplyPatchFileChange;
|
||||
use std::collections::HashMap;
|
||||
@@ -30,13 +29,12 @@ pub(crate) enum InternalApplyPatchInvocation {
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ApplyPatchExec {
|
||||
pub(crate) action: ApplyPatchAction,
|
||||
pub(crate) user_explicitly_approved_this_action: bool,
|
||||
pub(crate) auto_approved: bool,
|
||||
pub(crate) exec_approval_requirement: ExecApprovalRequirement,
|
||||
}
|
||||
|
||||
pub(crate) async fn apply_patch(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
call_id: &str,
|
||||
action: ApplyPatchAction,
|
||||
) -> InternalApplyPatchInvocation {
|
||||
match assess_patch_safety(
|
||||
@@ -50,40 +48,24 @@ pub(crate) async fn apply_patch(
|
||||
..
|
||||
} => InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: user_explicitly_approved,
|
||||
auto_approved: !user_explicitly_approved,
|
||||
exec_approval_requirement: ExecApprovalRequirement::Skip {
|
||||
bypass_sandbox: false,
|
||||
proposed_execpolicy_amendment: None,
|
||||
},
|
||||
}),
|
||||
SafetyCheck::AskUser => {
|
||||
// Compute a readable summary of path changes to include in the
|
||||
// approval request so the user can make an informed decision.
|
||||
//
|
||||
// Note that it might be worth expanding this approval request to
|
||||
// give the user the option to expand the set of writable roots so
|
||||
// that similar patches can be auto-approved in the future during
|
||||
// this session.
|
||||
let rx_approve = sess
|
||||
.request_patch_approval(
|
||||
turn_context,
|
||||
call_id.to_owned(),
|
||||
convert_apply_patch_to_protocol(&action),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
match rx_approve.await.unwrap_or_default() {
|
||||
ReviewDecision::Approved
|
||||
| ReviewDecision::ApprovedExecpolicyAmendment { .. }
|
||||
| ReviewDecision::ApprovedForSession => {
|
||||
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: true,
|
||||
})
|
||||
}
|
||||
ReviewDecision::Denied | ReviewDecision::Abort => {
|
||||
InternalApplyPatchInvocation::Output(Err(FunctionCallError::RespondToModel(
|
||||
"patch rejected by user".to_string(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
// Delegate the approval prompt (including cached approvals) to the
|
||||
// tool runtime, consistent with how shell/unified_exec approvals
|
||||
// are orchestrator-driven.
|
||||
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
action,
|
||||
auto_approved: false,
|
||||
exec_approval_requirement: ExecApprovalRequirement::NeedsApproval {
|
||||
reason: None,
|
||||
proposed_execpolicy_amendment: None,
|
||||
},
|
||||
})
|
||||
}
|
||||
SafetyCheck::Reject { reason } => InternalApplyPatchInvocation::Output(Err(
|
||||
FunctionCallError::RespondToModel(format!("patch rejected: {reason}")),
|
||||
|
||||
@@ -32,11 +32,7 @@ use crate::token_data::parse_id_token;
|
||||
use crate::util::try_parse_error_message;
|
||||
use codex_client::CodexHttpClient;
|
||||
use codex_protocol::account::PlanType as AccountPlanType;
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
use once_cell::sync::Lazy;
|
||||
use serde_json::Value;
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
use tempfile::TempDir;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -66,9 +62,6 @@ const REFRESH_TOKEN_UNKNOWN_MESSAGE: &str =
|
||||
const REFRESH_TOKEN_URL: &str = "https://auth.openai.com/oauth/token";
|
||||
pub const REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR: &str = "CODEX_REFRESH_TOKEN_URL_OVERRIDE";
|
||||
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
static TEST_AUTH_TEMP_DIRS: Lazy<Mutex<Vec<TempDir>>> = Lazy::new(|| Mutex::new(Vec::new()));
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RefreshTokenError {
|
||||
#[error("{0}")]
|
||||
@@ -630,6 +623,155 @@ struct CachedAuth {
|
||||
auth: Option<CodexAuth>,
|
||||
}
|
||||
|
||||
/// Central manager providing a single source of truth for auth.json derived
|
||||
/// authentication data. It loads once (or on preference change) and then
|
||||
/// hands out cloned `CodexAuth` values so the rest of the program has a
|
||||
/// consistent snapshot.
|
||||
///
|
||||
/// External modifications to `auth.json` will NOT be observed until
|
||||
/// `reload()` is called explicitly. This matches the design goal of avoiding
|
||||
/// different parts of the program seeing inconsistent auth data mid‑run.
|
||||
#[derive(Debug)]
|
||||
pub struct AuthManager {
|
||||
codex_home: PathBuf,
|
||||
inner: RwLock<CachedAuth>,
|
||||
enable_codex_api_key_env: bool,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||
}
|
||||
|
||||
impl AuthManager {
|
||||
/// Create a new manager loading the initial auth using the provided
|
||||
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
|
||||
/// simply return `None` in that case so callers can treat it as an
|
||||
/// unauthenticated state.
|
||||
pub fn new(
|
||||
codex_home: PathBuf,
|
||||
enable_codex_api_key_env: bool,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||
) -> Self {
|
||||
let auth = load_auth(
|
||||
&codex_home,
|
||||
enable_codex_api_key_env,
|
||||
auth_credentials_store_mode,
|
||||
)
|
||||
.ok()
|
||||
.flatten();
|
||||
Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(CachedAuth { auth }),
|
||||
enable_codex_api_key_env,
|
||||
auth_credentials_store_mode,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
/// Create an AuthManager with a specific CodexAuth, for testing only.
|
||||
pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> {
|
||||
let cached = CachedAuth { auth: Some(auth) };
|
||||
|
||||
Arc::new(Self {
|
||||
codex_home: PathBuf::from("non-existent"),
|
||||
inner: RwLock::new(cached),
|
||||
enable_codex_api_key_env: false,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode::File,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
/// Create an AuthManager with a specific CodexAuth and codex home, for testing only.
|
||||
pub fn from_auth_for_testing_with_home(auth: CodexAuth, codex_home: PathBuf) -> Arc<Self> {
|
||||
let cached = CachedAuth { auth: Some(auth) };
|
||||
Arc::new(Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(cached),
|
||||
enable_codex_api_key_env: false,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode::File,
|
||||
})
|
||||
}
|
||||
|
||||
/// Current cached auth (clone). May be `None` if not logged in or load failed.
|
||||
pub fn auth(&self) -> Option<CodexAuth> {
|
||||
self.inner.read().ok().and_then(|c| c.auth.clone())
|
||||
}
|
||||
|
||||
/// Force a reload of the auth information from auth.json. Returns
|
||||
/// whether the auth value changed.
|
||||
pub fn reload(&self) -> bool {
|
||||
let new_auth = load_auth(
|
||||
&self.codex_home,
|
||||
self.enable_codex_api_key_env,
|
||||
self.auth_credentials_store_mode,
|
||||
)
|
||||
.ok()
|
||||
.flatten();
|
||||
if let Ok(mut guard) = self.inner.write() {
|
||||
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
|
||||
guard.auth = new_auth;
|
||||
changed
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn auths_equal(a: &Option<CodexAuth>, b: &Option<CodexAuth>) -> bool {
|
||||
match (a, b) {
|
||||
(None, None) => true,
|
||||
(Some(a), Some(b)) => a == b,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience constructor returning an `Arc` wrapper.
|
||||
pub fn shared(
|
||||
codex_home: PathBuf,
|
||||
enable_codex_api_key_env: bool,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||
) -> Arc<Self> {
|
||||
Arc::new(Self::new(
|
||||
codex_home,
|
||||
enable_codex_api_key_env,
|
||||
auth_credentials_store_mode,
|
||||
))
|
||||
}
|
||||
|
||||
/// Attempt to refresh the current auth token (if any). On success, reload
|
||||
/// the auth state from disk so other components observe refreshed token.
|
||||
/// If the token refresh fails in a permanent (non‑transient) way, logs out
|
||||
/// to clear invalid auth state.
|
||||
pub async fn refresh_token(&self) -> Result<Option<String>, RefreshTokenError> {
|
||||
let auth = match self.auth() {
|
||||
Some(a) => a,
|
||||
None => return Ok(None),
|
||||
};
|
||||
match auth.refresh_token().await {
|
||||
Ok(token) => {
|
||||
// Reload to pick up persisted changes.
|
||||
self.reload();
|
||||
Ok(Some(token))
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to refresh token: {}", e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Log out by deleting the on‑disk auth.json (if present). Returns Ok(true)
|
||||
/// if a file was removed, Ok(false) if no auth file existed. On success,
|
||||
/// reloads the in‑memory auth cache so callers immediately observe the
|
||||
/// unauthenticated state.
|
||||
pub fn logout(&self) -> std::io::Result<bool> {
|
||||
let removed = super::auth::logout(&self.codex_home, self.auth_credentials_store_mode)?;
|
||||
// Always reload to clear any cached auth (even if file absent).
|
||||
self.reload();
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
pub fn get_auth_mode(&self) -> Option<AuthMode> {
|
||||
self.auth().map(|a| a.mode)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -1051,162 +1193,3 @@ mod tests {
|
||||
pretty_assertions::assert_eq!(auth.account_plan_type(), Some(AccountPlanType::Unknown));
|
||||
}
|
||||
}
|
||||
|
||||
/// Central manager providing a single source of truth for auth.json derived
|
||||
/// authentication data. It loads once (or on preference change) and then
|
||||
/// hands out cloned `CodexAuth` values so the rest of the program has a
|
||||
/// consistent snapshot.
|
||||
///
|
||||
/// External modifications to `auth.json` will NOT be observed until
|
||||
/// `reload()` is called explicitly. This matches the design goal of avoiding
|
||||
/// different parts of the program seeing inconsistent auth data mid‑run.
|
||||
#[derive(Debug)]
|
||||
pub struct AuthManager {
|
||||
codex_home: PathBuf,
|
||||
inner: RwLock<CachedAuth>,
|
||||
enable_codex_api_key_env: bool,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||
}
|
||||
|
||||
impl AuthManager {
|
||||
/// Create a new manager loading the initial auth using the provided
|
||||
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
|
||||
/// simply return `None` in that case so callers can treat it as an
|
||||
/// unauthenticated state.
|
||||
pub fn new(
|
||||
codex_home: PathBuf,
|
||||
enable_codex_api_key_env: bool,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||
) -> Self {
|
||||
let auth = load_auth(
|
||||
&codex_home,
|
||||
enable_codex_api_key_env,
|
||||
auth_credentials_store_mode,
|
||||
)
|
||||
.ok()
|
||||
.flatten();
|
||||
Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(CachedAuth { auth }),
|
||||
enable_codex_api_key_env,
|
||||
auth_credentials_store_mode,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
#[expect(clippy::expect_used)]
|
||||
/// Create an AuthManager with a specific CodexAuth, for testing only.
|
||||
pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> {
|
||||
let cached = CachedAuth { auth: Some(auth) };
|
||||
let temp_dir = tempfile::tempdir().expect("temp codex home");
|
||||
let codex_home = temp_dir.path().to_path_buf();
|
||||
TEST_AUTH_TEMP_DIRS
|
||||
.lock()
|
||||
.expect("lock test codex homes")
|
||||
.push(temp_dir);
|
||||
Arc::new(Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(cached),
|
||||
enable_codex_api_key_env: false,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode::File,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
/// Create an AuthManager with a specific CodexAuth and codex home, for testing only.
|
||||
pub fn from_auth_for_testing_with_home(auth: CodexAuth, codex_home: PathBuf) -> Arc<Self> {
|
||||
let cached = CachedAuth { auth: Some(auth) };
|
||||
Arc::new(Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(cached),
|
||||
enable_codex_api_key_env: false,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode::File,
|
||||
})
|
||||
}
|
||||
|
||||
/// Current cached auth (clone). May be `None` if not logged in or load failed.
|
||||
pub fn auth(&self) -> Option<CodexAuth> {
|
||||
self.inner.read().ok().and_then(|c| c.auth.clone())
|
||||
}
|
||||
|
||||
pub fn codex_home(&self) -> &Path {
|
||||
&self.codex_home
|
||||
}
|
||||
|
||||
/// Force a reload of the auth information from auth.json. Returns
|
||||
/// whether the auth value changed.
|
||||
pub fn reload(&self) -> bool {
|
||||
let new_auth = load_auth(
|
||||
&self.codex_home,
|
||||
self.enable_codex_api_key_env,
|
||||
self.auth_credentials_store_mode,
|
||||
)
|
||||
.ok()
|
||||
.flatten();
|
||||
if let Ok(mut guard) = self.inner.write() {
|
||||
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
|
||||
guard.auth = new_auth;
|
||||
changed
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn auths_equal(a: &Option<CodexAuth>, b: &Option<CodexAuth>) -> bool {
|
||||
match (a, b) {
|
||||
(None, None) => true,
|
||||
(Some(a), Some(b)) => a == b,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience constructor returning an `Arc` wrapper.
|
||||
pub fn shared(
|
||||
codex_home: PathBuf,
|
||||
enable_codex_api_key_env: bool,
|
||||
auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||
) -> Arc<Self> {
|
||||
Arc::new(Self::new(
|
||||
codex_home,
|
||||
enable_codex_api_key_env,
|
||||
auth_credentials_store_mode,
|
||||
))
|
||||
}
|
||||
|
||||
/// Attempt to refresh the current auth token (if any). On success, reload
|
||||
/// the auth state from disk so other components observe refreshed token.
|
||||
/// If the token refresh fails in a permanent (non‑transient) way, logs out
|
||||
/// to clear invalid auth state.
|
||||
pub async fn refresh_token(&self) -> Result<Option<String>, RefreshTokenError> {
|
||||
let auth = match self.auth() {
|
||||
Some(a) => a,
|
||||
None => return Ok(None),
|
||||
};
|
||||
match auth.refresh_token().await {
|
||||
Ok(token) => {
|
||||
// Reload to pick up persisted changes.
|
||||
self.reload();
|
||||
Ok(Some(token))
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to refresh token: {}", e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Log out by deleting the on‑disk auth.json (if present). Returns Ok(true)
|
||||
/// if a file was removed, Ok(false) if no auth file existed. On success,
|
||||
/// reloads the in‑memory auth cache so callers immediately observe the
|
||||
/// unauthenticated state.
|
||||
pub fn logout(&self) -> std::io::Result<bool> {
|
||||
let removed = super::auth::logout(&self.codex_home, self.auth_credentials_store_mode)?;
|
||||
// Always reload to clear any cached auth (even if file absent).
|
||||
self.reload();
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
pub fn get_auth_mode(&self) -> Option<AuthMode> {
|
||||
self.auth().map(|a| a.mode)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
"string_content",
|
||||
"raw_string",
|
||||
"number",
|
||||
"concatenation",
|
||||
];
|
||||
// Allow only safe punctuation / operator tokens; anything else causes reject.
|
||||
const ALLOWED_PUNCT_TOKENS: &[&str] = &["&&", "||", ";", "|", "\"", "'"];
|
||||
@@ -158,6 +159,48 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"concatenation" => {
|
||||
// Handle concatenated arguments like -g"*.py"
|
||||
let mut concatenated = String::new();
|
||||
let mut concat_cursor = child.walk();
|
||||
for part in child.named_children(&mut concat_cursor) {
|
||||
match part.kind() {
|
||||
"word" | "number" => {
|
||||
concatenated
|
||||
.push_str(part.utf8_text(src.as_bytes()).ok()?.to_owned().as_str());
|
||||
}
|
||||
"string" => {
|
||||
if part.child_count() == 3
|
||||
&& part.child(0)?.kind() == "\""
|
||||
&& part.child(1)?.kind() == "string_content"
|
||||
&& part.child(2)?.kind() == "\""
|
||||
{
|
||||
concatenated.push_str(
|
||||
part.child(1)?
|
||||
.utf8_text(src.as_bytes())
|
||||
.ok()?
|
||||
.to_owned()
|
||||
.as_str(),
|
||||
);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"raw_string" => {
|
||||
let raw_string = part.utf8_text(src.as_bytes()).ok()?;
|
||||
let stripped = raw_string
|
||||
.strip_prefix('\'')
|
||||
.and_then(|s| s.strip_suffix('\''))?;
|
||||
concatenated.push_str(stripped);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
if concatenated.is_empty() {
|
||||
return None;
|
||||
}
|
||||
words.push(concatenated);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
@@ -256,4 +299,47 @@ mod tests {
|
||||
let parsed = parse_shell_lc_plain_commands(&command).unwrap();
|
||||
assert_eq!(parsed, vec![vec!["ls".to_string()]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_and_value() {
|
||||
// Test case: -g"*.py" (flag directly concatenated with quoted value)
|
||||
let cmds = parse_seq("rg -n \"foo\" -g\"*.py\"").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"rg".to_string(),
|
||||
"-n".to_string(),
|
||||
"foo".to_string(),
|
||||
"-g*.py".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_with_single_quotes() {
|
||||
let cmds = parse_seq("grep -n 'pattern' -g'*.txt'").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"grep".to_string(),
|
||||
"-n".to_string(),
|
||||
"pattern".to_string(),
|
||||
"-g*.txt".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_variable_substitution() {
|
||||
// Environment variables in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$VAR\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"${VAR}\" pattern").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_command_substitution() {
|
||||
// Command substitution in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$(pwd)\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"$(echo '*.py')\" pattern").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,9 +19,10 @@ use codex_api::create_text_param_for_request;
|
||||
use codex_api::error::ApiError;
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_otel::otel_manager::OtelManager;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use eventsource_stream::Event;
|
||||
@@ -49,7 +50,6 @@ use crate::features::FEATURES;
|
||||
use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::tools::spec::create_tools_json_for_chat_completions_api;
|
||||
use crate::tools::spec::create_tools_json_for_responses_api;
|
||||
|
||||
@@ -57,10 +57,10 @@ use crate::tools::spec::create_tools_json_for_responses_api;
|
||||
pub struct ModelClient {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
model_family: ModelFamily,
|
||||
model_info: ModelInfo,
|
||||
otel_manager: OtelManager,
|
||||
provider: ModelProviderInfo,
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
effort: Option<ReasoningEffortConfig>,
|
||||
summary: ReasoningSummaryConfig,
|
||||
session_source: SessionSource,
|
||||
@@ -71,18 +71,18 @@ impl ModelClient {
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
model_family: ModelFamily,
|
||||
model_info: ModelInfo,
|
||||
otel_manager: OtelManager,
|
||||
provider: ModelProviderInfo,
|
||||
effort: Option<ReasoningEffortConfig>,
|
||||
summary: ReasoningSummaryConfig,
|
||||
conversation_id: ConversationId,
|
||||
conversation_id: ThreadId,
|
||||
session_source: SessionSource,
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
auth_manager,
|
||||
model_family,
|
||||
model_info,
|
||||
otel_manager,
|
||||
provider,
|
||||
conversation_id,
|
||||
@@ -93,11 +93,11 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
pub fn get_model_context_window(&self) -> Option<i64> {
|
||||
let model_family = self.get_model_family();
|
||||
let effective_context_window_percent = model_family.effective_context_window_percent;
|
||||
model_family
|
||||
.context_window
|
||||
.map(|w| w.saturating_mul(effective_context_window_percent) / 100)
|
||||
let model_info = self.get_model_info();
|
||||
let effective_context_window_percent = model_info.effective_context_window_percent;
|
||||
model_info.context_window.map(|context_window| {
|
||||
context_window.saturating_mul(effective_context_window_percent) / 100
|
||||
})
|
||||
}
|
||||
|
||||
pub fn config(&self) -> Arc<Config> {
|
||||
@@ -146,8 +146,8 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let model_family = self.get_model_family();
|
||||
let instructions = prompt.get_full_instructions(&model_family).into_owned();
|
||||
let model_info = self.get_model_info();
|
||||
let instructions = prompt.get_full_instructions(&model_info).into_owned();
|
||||
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
|
||||
let api_prompt = build_api_prompt(prompt, instructions, tools_json);
|
||||
let conversation_id = self.conversation_id.to_string();
|
||||
@@ -200,13 +200,14 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let model_family = self.get_model_family();
|
||||
let instructions = prompt.get_full_instructions(&model_family).into_owned();
|
||||
let model_info = self.get_model_info();
|
||||
let instructions = prompt.get_full_instructions(&model_info).into_owned();
|
||||
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
|
||||
let reasoning = if model_family.supports_reasoning_summaries {
|
||||
let default_reasoning_effort = model_info.default_reasoning_level;
|
||||
let reasoning = if model_info.supports_reasoning_summaries {
|
||||
Some(Reasoning {
|
||||
effort: self.effort.or(model_family.default_reasoning_effort),
|
||||
effort: self.effort.or(default_reasoning_effort),
|
||||
summary: if self.summary == ReasoningSummaryConfig::None {
|
||||
None
|
||||
} else {
|
||||
@@ -223,15 +224,13 @@ impl ModelClient {
|
||||
vec![]
|
||||
};
|
||||
|
||||
let verbosity = if model_family.support_verbosity {
|
||||
self.config
|
||||
.model_verbosity
|
||||
.or(model_family.default_verbosity)
|
||||
let verbosity = if model_info.support_verbosity {
|
||||
self.config.model_verbosity.or(model_info.default_verbosity)
|
||||
} else {
|
||||
if self.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored as the model does not support verbosity: {}",
|
||||
model_family.family
|
||||
model_info.slug
|
||||
);
|
||||
}
|
||||
None
|
||||
@@ -298,12 +297,11 @@ impl ModelClient {
|
||||
|
||||
/// Returns the currently configured model slug.
|
||||
pub fn get_model(&self) -> String {
|
||||
self.get_model_family().get_model_slug().to_string()
|
||||
self.model_info.slug.clone()
|
||||
}
|
||||
|
||||
/// Returns the currently configured model family.
|
||||
pub fn get_model_family(&self) -> ModelFamily {
|
||||
self.model_family.clone()
|
||||
pub fn get_model_info(&self) -> ModelInfo {
|
||||
self.model_info.clone()
|
||||
}
|
||||
|
||||
/// Returns the current reasoning effort setting.
|
||||
@@ -340,7 +338,7 @@ impl ModelClient {
|
||||
.with_telemetry(Some(request_telemetry));
|
||||
|
||||
let instructions = prompt
|
||||
.get_full_instructions(&self.get_model_family())
|
||||
.get_full_instructions(&self.get_model_info())
|
||||
.into_owned();
|
||||
let payload = ApiCompactionInput {
|
||||
model: &self.get_model(),
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::error::Result;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
pub use codex_api::common::ResponseEvent;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use futures::Stream;
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashSet;
|
||||
use std::ops::Deref;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
@@ -44,28 +42,12 @@ pub struct Prompt {
|
||||
}
|
||||
|
||||
impl Prompt {
|
||||
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> {
|
||||
let base = self
|
||||
.base_instructions_override
|
||||
.as_deref()
|
||||
.unwrap_or(model.base_instructions.deref());
|
||||
// When there are no custom instructions, add apply_patch_tool_instructions if:
|
||||
// - the model needs special instructions (4.1)
|
||||
// AND
|
||||
// - there is no apply_patch tool present
|
||||
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
|
||||
ToolSpec::Function(f) => f.name == "apply_patch",
|
||||
ToolSpec::Freeform(f) => f.name == "apply_patch",
|
||||
_ => false,
|
||||
});
|
||||
if self.base_instructions_override.is_none()
|
||||
&& model.needs_special_apply_patch_instructions
|
||||
&& !is_apply_patch_tool_present
|
||||
{
|
||||
Cow::Owned(format!("{base}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}"))
|
||||
} else {
|
||||
Cow::Borrowed(base)
|
||||
}
|
||||
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelInfo) -> Cow<'a, str> {
|
||||
Cow::Borrowed(
|
||||
self.base_instructions_override
|
||||
.as_deref()
|
||||
.unwrap_or(model.base_instructions.as_str()),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
|
||||
@@ -195,8 +177,13 @@ pub(crate) mod tools {
|
||||
LocalShell {},
|
||||
// TODO: Understand why we get an error on web_search although the API docs say it's supported.
|
||||
// https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#:~:text=%7B%20type%3A%20%22web_search%22%20%7D%2C
|
||||
// The `external_web_access` field determines whether the web search is over cached or live content.
|
||||
// https://platform.openai.com/docs/guides/tools-web-search#live-internet-access
|
||||
#[serde(rename = "web_search")]
|
||||
WebSearch {},
|
||||
WebSearch {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
external_web_access: Option<bool>,
|
||||
},
|
||||
#[serde(rename = "custom")]
|
||||
Freeform(FreeformTool),
|
||||
}
|
||||
@@ -206,7 +193,7 @@ pub(crate) mod tools {
|
||||
match self {
|
||||
ToolSpec::Function(tool) => tool.name.as_str(),
|
||||
ToolSpec::LocalShell {} => "local_shell",
|
||||
ToolSpec::WebSearch {} => "web_search",
|
||||
ToolSpec::WebSearch { .. } => "web_search",
|
||||
ToolSpec::Freeform(tool) => tool.name.as_str(),
|
||||
}
|
||||
}
|
||||
@@ -272,6 +259,8 @@ mod tests {
|
||||
let prompt = Prompt {
|
||||
..Default::default()
|
||||
};
|
||||
let prompt_with_apply_patch_instructions =
|
||||
include_str!("../prompt_with_apply_patch_instructions.md");
|
||||
let test_cases = vec![
|
||||
InstructionsTestCase {
|
||||
slug: "gpt-3.5",
|
||||
@@ -312,19 +301,16 @@ mod tests {
|
||||
];
|
||||
for test_case in test_cases {
|
||||
let config = test_config();
|
||||
let model_family =
|
||||
ModelsManager::construct_model_family_offline(test_case.slug, &config);
|
||||
let expected = if test_case.expects_apply_patch_instructions {
|
||||
format!(
|
||||
"{}\n{}",
|
||||
model_family.clone().base_instructions,
|
||||
APPLY_PATCH_TOOL_INSTRUCTIONS
|
||||
)
|
||||
} else {
|
||||
model_family.clone().base_instructions
|
||||
};
|
||||
let model_info = ModelsManager::construct_model_info_offline(test_case.slug, &config);
|
||||
if test_case.expects_apply_patch_instructions {
|
||||
assert_eq!(
|
||||
model_info.base_instructions.as_str(),
|
||||
prompt_with_apply_patch_instructions
|
||||
);
|
||||
}
|
||||
|
||||
let full = prompt.get_full_instructions(&model_family);
|
||||
let expected = model_info.base_instructions.as_str();
|
||||
let full = prompt.get_full_instructions(&model_info);
|
||||
assert_eq!(full, expected);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,12 +28,12 @@ use crate::error::CodexErr;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
|
||||
/// Start an interactive sub-Codex conversation and return IO channels.
|
||||
/// Start an interactive sub-Codex thread and return IO channels.
|
||||
///
|
||||
/// The returned `events_rx` yields non-approval events emitted by the sub-agent.
|
||||
/// Approval requests are handled via `parent_session` and are not surfaced.
|
||||
/// The returned `ops_tx` allows the caller to submit additional `Op`s to the sub-agent.
|
||||
pub(crate) async fn run_codex_conversation_interactive(
|
||||
pub(crate) async fn run_codex_thread_interactive(
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
@@ -52,6 +52,7 @@ pub(crate) async fn run_codex_conversation_interactive(
|
||||
Arc::clone(&parent_session.services.skills_manager),
|
||||
initial_history.unwrap_or(InitialHistory::New),
|
||||
SessionSource::SubAgent(SubAgentSource::Review),
|
||||
parent_session.services.agent_control.clone(),
|
||||
)
|
||||
.await?;
|
||||
let codex = Arc::new(codex);
|
||||
@@ -86,6 +87,7 @@ pub(crate) async fn run_codex_conversation_interactive(
|
||||
next_id: AtomicU64::new(0),
|
||||
tx_sub: tx_ops,
|
||||
rx_event: rx_sub,
|
||||
agent_status: Arc::clone(&codex.agent_status),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -93,7 +95,7 @@ pub(crate) async fn run_codex_conversation_interactive(
|
||||
///
|
||||
/// Internally calls the interactive variant, then immediately submits the provided input.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn run_codex_conversation_one_shot(
|
||||
pub(crate) async fn run_codex_thread_one_shot(
|
||||
config: Config,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
@@ -106,7 +108,7 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
// Use a child token so we can stop the delegate after completion without
|
||||
// requiring the caller to cancel the parent token.
|
||||
let child_cancel = cancel_token.child_token();
|
||||
let io = run_codex_conversation_interactive(
|
||||
let io = run_codex_thread_interactive(
|
||||
config,
|
||||
auth_manager,
|
||||
models_manager,
|
||||
@@ -118,11 +120,16 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
.await?;
|
||||
|
||||
// Send the initial input to kick off the one-shot turn.
|
||||
io.submit(Op::UserInput { items: input }).await?;
|
||||
io.submit(Op::UserInput {
|
||||
items: input,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Bridge events so we can observe completion and shut down automatically.
|
||||
let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let ops_tx = io.tx_sub.clone();
|
||||
let agent_status = Arc::clone(&io.agent_status);
|
||||
let io_for_bridge = io;
|
||||
tokio::spawn(async move {
|
||||
while let Ok(event) = io_for_bridge.next_event().await {
|
||||
@@ -154,6 +161,7 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
next_id: AtomicU64::new(0),
|
||||
rx_event: rx_bridge,
|
||||
tx_sub: tx_closed,
|
||||
agent_status,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -184,6 +192,10 @@ async fn forward_events(
|
||||
id: _,
|
||||
msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::TokenCount(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::SessionConfigured(_),
|
||||
@@ -364,6 +376,7 @@ mod tests {
|
||||
next_id: AtomicU64::new(0),
|
||||
tx_sub,
|
||||
rx_event: rx_events,
|
||||
agent_status: Default::default(),
|
||||
});
|
||||
|
||||
let (session, ctx, _rx_evt) = crate::codex::make_session_and_context_with_rx().await;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::agent::AgentStatus;
|
||||
use crate::codex::Codex;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::protocol::Event;
|
||||
@@ -5,14 +6,14 @@ use crate::protocol::Op;
|
||||
use crate::protocol::Submission;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub struct CodexConversation {
|
||||
pub struct CodexThread {
|
||||
codex: Codex,
|
||||
rollout_path: PathBuf,
|
||||
}
|
||||
|
||||
/// Conduit for the bidirectional stream of messages that compose a conversation
|
||||
/// in Codex.
|
||||
impl CodexConversation {
|
||||
/// Conduit for the bidirectional stream of messages that compose a thread
|
||||
/// (formerly called a conversation) in Codex.
|
||||
impl CodexThread {
|
||||
pub(crate) fn new(codex: Codex, rollout_path: PathBuf) -> Self {
|
||||
Self {
|
||||
codex,
|
||||
@@ -33,6 +34,10 @@ impl CodexConversation {
|
||||
self.codex.next_event().await
|
||||
}
|
||||
|
||||
pub async fn agent_status(&self) -> AgentStatus {
|
||||
self.codex.agent_status().await
|
||||
}
|
||||
|
||||
pub fn rollout_path(&self) -> PathBuf {
|
||||
self.rollout_path.clone()
|
||||
}
|
||||
@@ -86,6 +86,11 @@ async fn run_compact_task_inner(
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
developer_instructions: turn_context.developer_instructions.clone(),
|
||||
final_output_json_schema: turn_context.final_output_json_schema.clone(),
|
||||
truncation_policy: Some(turn_context.truncation_policy.into()),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
@@ -103,7 +108,7 @@ async fn run_compact_task_inner(
|
||||
sess.notify_background_event(
|
||||
turn_context.as_ref(),
|
||||
format!(
|
||||
"Trimmed {truncated_count} older conversation item(s) before compacting so the prompt fits the model context window."
|
||||
"Trimmed {truncated_count} older thread item(s) before compacting so the prompt fits the model context window."
|
||||
),
|
||||
)
|
||||
.await;
|
||||
@@ -177,7 +182,7 @@ async fn run_compact_task_inner(
|
||||
sess.send_event(&turn_context, event).await;
|
||||
|
||||
let warning = EventMsg::Warning(WarningEvent {
|
||||
message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start a new conversation when possible to keep conversations small and targeted.".to_string(),
|
||||
message: "Heads up: Long threads and multiple compactions can cause the model to be less accurate. Start a new thread when possible to keep threads small and targeted.".to_string(),
|
||||
});
|
||||
sess.send_event(&turn_context, warning).await;
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::config::types::ShellEnvironmentPolicyToml;
|
||||
use crate::config::types::Tui;
|
||||
use crate::config::types::UriBasedFileOpener;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::LoaderOverrides;
|
||||
use crate::config_loader::load_config_layers_state;
|
||||
@@ -37,12 +38,12 @@ use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_rmcp_client::OAuthCredentialsStoreMode;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use dirs::home_dir;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use similar::DiffableStr;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
@@ -93,6 +94,10 @@ pub(crate) fn test_config() -> Config {
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
/// Provenance for how this [`Config`] was derived (merged layers + enforced
|
||||
/// requirements).
|
||||
pub config_layer_stack: ConfigLayerStack,
|
||||
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
|
||||
@@ -263,7 +268,6 @@ pub struct Config {
|
||||
/// Additional filenames to try when looking for project-level docs.
|
||||
pub project_doc_fallback_filenames: Vec<String>,
|
||||
|
||||
// todo(aibrahim): this should be used in the override model family
|
||||
/// Token budget applied when storing tool/function outputs in the context manager.
|
||||
pub tool_output_token_limit: Option<usize>,
|
||||
|
||||
@@ -297,9 +301,6 @@ pub struct Config {
|
||||
/// Optional override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Optional override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
@@ -314,7 +315,7 @@ pub struct Config {
|
||||
|
||||
/// Include the `apply_patch` tool for models that benefit from invoking
|
||||
/// file edits as a structured tool call. When unset, this falls back to the
|
||||
/// model family's default preference.
|
||||
/// model info's default preference.
|
||||
pub include_apply_patch_tool: bool,
|
||||
|
||||
pub tools_web_search_request: bool,
|
||||
@@ -351,6 +352,10 @@ pub struct Config {
|
||||
/// or placeholder replacement will occur for fast keypress bursts.
|
||||
pub disable_paste_burst: bool,
|
||||
|
||||
/// When `false`, disables analytics across Codex product surfaces in this machine.
|
||||
/// Defaults to `true`.
|
||||
pub analytics: bool,
|
||||
|
||||
/// OTEL configuration (exporter type, endpoint, headers, etc.).
|
||||
pub otel: crate::config::types::OtelConfig,
|
||||
}
|
||||
@@ -411,11 +416,11 @@ impl ConfigBuilder {
|
||||
let config_toml: ConfigToml = merged_toml
|
||||
.try_into()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
|
||||
Config::load_config_with_requirements(
|
||||
Config::load_config_with_layer_stack(
|
||||
config_toml,
|
||||
harness_overrides,
|
||||
codex_home,
|
||||
config_layer_stack.requirements().clone(),
|
||||
config_layer_stack,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -664,7 +669,7 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R
|
||||
}
|
||||
|
||||
/// Base config deserialized from ~/.codex/config.toml.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigToml {
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
@@ -780,9 +785,6 @@ pub struct ConfigToml {
|
||||
/// Override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
|
||||
@@ -799,6 +801,11 @@ pub struct ConfigToml {
|
||||
#[serde(default)]
|
||||
pub ghost_snapshot: Option<GhostSnapshotToml>,
|
||||
|
||||
/// Markers used to detect the project root when searching parent
|
||||
/// directories for `.codex` folders. Defaults to [".git"] when unset.
|
||||
#[serde(default)]
|
||||
pub project_root_markers: Option<Vec<String>>,
|
||||
|
||||
/// When `true`, checks for Codex updates on startup and surfaces update prompts.
|
||||
/// Set to `false` only if your Codex updates are centrally managed.
|
||||
/// Defaults to `true`.
|
||||
@@ -809,6 +816,10 @@ pub struct ConfigToml {
|
||||
/// or placeholder replacement will occur for fast keypress bursts.
|
||||
pub disable_paste_burst: Option<bool>,
|
||||
|
||||
/// When `false`, disables analytics across Codex product surfaces in this machine.
|
||||
/// Defaults to `true`.
|
||||
pub analytics: Option<crate::config::types::AnalyticsConfigToml>,
|
||||
|
||||
/// OTEL configuration.
|
||||
pub otel: Option<crate::config::types::OtelConfigToml>,
|
||||
|
||||
@@ -853,7 +864,7 @@ impl From<ConfigToml> for UserSavedConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ProjectConfig {
|
||||
pub trust_level: Option<TrustLevel>,
|
||||
}
|
||||
@@ -868,7 +879,7 @@ impl ProjectConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ToolsToml {
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
@@ -887,7 +898,7 @@ impl From<ToolsToml> for Tools {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct GhostSnapshotToml {
|
||||
/// Exclude untracked files larger than this many bytes from ghost snapshots.
|
||||
#[serde(alias = "ignore_untracked_files_over_bytes")]
|
||||
@@ -1062,16 +1073,17 @@ impl Config {
|
||||
codex_home: PathBuf,
|
||||
) -> std::io::Result<Self> {
|
||||
// Note this ignores requirements.toml enforcement for tests.
|
||||
let requirements = ConfigRequirements::default();
|
||||
Self::load_config_with_requirements(cfg, overrides, codex_home, requirements)
|
||||
let config_layer_stack = ConfigLayerStack::default();
|
||||
Self::load_config_with_layer_stack(cfg, overrides, codex_home, config_layer_stack)
|
||||
}
|
||||
|
||||
fn load_config_with_requirements(
|
||||
fn load_config_with_layer_stack(
|
||||
cfg: ConfigToml,
|
||||
overrides: ConfigOverrides,
|
||||
codex_home: PathBuf,
|
||||
requirements: ConfigRequirements,
|
||||
config_layer_stack: ConfigLayerStack,
|
||||
) -> std::io::Result<Self> {
|
||||
let requirements = config_layer_stack.requirements().clone();
|
||||
let user_instructions = Self::load_instructions(Some(&codex_home));
|
||||
|
||||
// Destructure ConfigOverrides fully to ensure all overrides are applied.
|
||||
@@ -1349,6 +1361,7 @@ impl Config {
|
||||
.collect(),
|
||||
tool_output_token_limit: cfg.tool_output_token_limit,
|
||||
codex_home,
|
||||
config_layer_stack,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
codex_linux_sandbox_exe,
|
||||
@@ -1366,7 +1379,6 @@ impl Config {
|
||||
.or(cfg.model_reasoning_summary)
|
||||
.unwrap_or_default(),
|
||||
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
|
||||
model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(),
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
@@ -1385,6 +1397,12 @@ impl Config {
|
||||
notices: cfg.notice.unwrap_or_default(),
|
||||
check_for_update_on_startup,
|
||||
disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false),
|
||||
analytics: config_profile
|
||||
.analytics
|
||||
.as_ref()
|
||||
.and_then(|a| a.enabled)
|
||||
.or(cfg.analytics.as_ref().and_then(|a| a.enabled))
|
||||
.unwrap_or(true),
|
||||
tui_notifications: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
@@ -1864,7 +1882,7 @@ trust_level = "trusted"
|
||||
profiles.insert(
|
||||
"work".to_string(),
|
||||
ConfigProfile {
|
||||
tools_view_image: Some(false),
|
||||
tools_web_search: Some(false),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
@@ -1880,7 +1898,7 @@ trust_level = "trusted"
|
||||
codex_home.path().to_path_buf(),
|
||||
)?;
|
||||
|
||||
assert!(!config.features.enabled(Feature::ViewImageTool));
|
||||
assert!(!config.features.enabled(Feature::WebSearchRequest));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -2045,6 +2063,7 @@ trust_level = "trusted"
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
@@ -2165,6 +2184,7 @@ trust_level = "trusted"
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
@@ -3032,6 +3052,9 @@ approval_policy = "untrusted"
|
||||
# `ConfigOverrides`.
|
||||
profile = "gpt3"
|
||||
|
||||
[analytics]
|
||||
enabled = true
|
||||
|
||||
[model_providers.openai-chat-completions]
|
||||
name = "OpenAI using Chat Completions"
|
||||
base_url = "https://api.openai.com/v1"
|
||||
@@ -3057,6 +3080,9 @@ model = "o3"
|
||||
model_provider = "openai"
|
||||
approval_policy = "on-failure"
|
||||
|
||||
[profiles.zdr.analytics]
|
||||
enabled = false
|
||||
|
||||
[profiles.gpt5]
|
||||
model = "gpt-5.1"
|
||||
model_provider = "openai"
|
||||
@@ -3167,6 +3193,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3175,7 +3202,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3197,6 +3223,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
@@ -3250,6 +3277,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3258,7 +3286,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3280,6 +3307,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
@@ -3348,6 +3376,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3356,7 +3385,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3378,6 +3406,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: false,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
@@ -3432,6 +3461,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3440,7 +3470,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3462,6 +3491,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::protocol::AskForApproval;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
@@ -9,7 +10,7 @@ use codex_protocol::openai_models::ReasoningEffort;
|
||||
|
||||
/// Collection of common configuration options that a user can define as a unit
|
||||
/// in `config.toml`.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Deserialize)]
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ConfigProfile {
|
||||
pub model: Option<String>,
|
||||
/// The key in the `model_providers` map identifying the
|
||||
@@ -28,6 +29,7 @@ pub struct ConfigProfile {
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
pub analytics: Option<crate::config::types::AnalyticsConfigToml>,
|
||||
/// Optional feature toggles scoped to this profile.
|
||||
#[serde(default)]
|
||||
pub features: Option<crate::features::FeaturesToml>,
|
||||
|
||||
@@ -106,16 +106,7 @@ pub struct ConfigService {
|
||||
}
|
||||
|
||||
impl ConfigService {
|
||||
pub fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
Self {
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
loader_overrides: LoaderOverrides::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn with_overrides(
|
||||
pub fn new(
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
@@ -127,6 +118,14 @@ impl ConfigService {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_defaults(codex_home: PathBuf) -> Self {
|
||||
Self {
|
||||
codex_home,
|
||||
cli_overrides: Vec::new(),
|
||||
loader_overrides: LoaderOverrides::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read(
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
@@ -556,6 +555,10 @@ fn override_message(layer: &ConfigLayerSource) -> String {
|
||||
ConfigLayerSource::System { file } => {
|
||||
format!("Overridden by managed config (system): {}", file.display())
|
||||
}
|
||||
ConfigLayerSource::Project { dot_codex_folder } => format!(
|
||||
"Overridden by project config: {}/{CONFIG_TOML_FILE}",
|
||||
dot_codex_folder.display(),
|
||||
),
|
||||
ConfigLayerSource::SessionFlags => "Overridden by session flags".to_string(),
|
||||
ConfigLayerSource::User { file } => {
|
||||
format!("Overridden by user config: {}", file.display())
|
||||
@@ -703,7 +706,7 @@ unified_exec = true
|
||||
"#;
|
||||
std::fs::write(tmp.path().join(CONFIG_TOML_FILE), original)?;
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()),
|
||||
@@ -744,13 +747,14 @@ remote_compaction = true
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file");
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -774,15 +778,41 @@ remote_compaction = true
|
||||
},
|
||||
);
|
||||
let layers = response.layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2, "expected two layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(
|
||||
crate::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX,
|
||||
)
|
||||
.expect("system file");
|
||||
assert_eq!(layers.len(), 3, "expected three layers on unix");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User {
|
||||
file: user_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(2).unwrap().name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2, "expected two layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -798,13 +828,14 @@ remote_compaction = true
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file");
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -849,7 +880,7 @@ remote_compaction = true
|
||||
let user_path = tmp.path().join(CONFIG_TOML_FILE);
|
||||
std::fs::write(&user_path, "model = \"user\"").unwrap();
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
let error = service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()),
|
||||
@@ -872,7 +903,7 @@ remote_compaction = true
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(tmp.path().join(CONFIG_TOML_FILE), "").unwrap();
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: None,
|
||||
@@ -900,13 +931,14 @@ remote_compaction = true
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -947,13 +979,14 @@ remote_compaction = true
|
||||
TomlValue::String("session".to_string()),
|
||||
)];
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
cli_overrides,
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -992,13 +1025,14 @@ remote_compaction = true
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file");
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -1050,7 +1084,7 @@ alpha = "a"
|
||||
|
||||
std::fs::write(&path, base)?;
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: Some(path.display().to_string()),
|
||||
|
||||
@@ -221,7 +221,7 @@ mod option_duration_secs {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
pub enum UriBasedFileOpener {
|
||||
#[serde(rename = "vscode")]
|
||||
VsCode,
|
||||
@@ -253,7 +253,7 @@ impl UriBasedFileOpener {
|
||||
}
|
||||
|
||||
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct History {
|
||||
/// If true, history entries will not be written to disk.
|
||||
pub persistence: HistoryPersistence,
|
||||
@@ -263,7 +263,7 @@ pub struct History {
|
||||
pub max_bytes: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum HistoryPersistence {
|
||||
/// Save all history entries to disk.
|
||||
@@ -273,9 +273,18 @@ pub enum HistoryPersistence {
|
||||
None,
|
||||
}
|
||||
|
||||
// ===== Analytics configuration =====
|
||||
|
||||
/// Analytics settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct AnalyticsConfigToml {
|
||||
/// When `false`, disables analytics across Codex product surfaces in this profile.
|
||||
pub enabled: Option<bool>,
|
||||
}
|
||||
|
||||
// ===== OTEL configuration =====
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelHttpProtocol {
|
||||
/// Binary payload
|
||||
@@ -284,7 +293,7 @@ pub enum OtelHttpProtocol {
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OtelTlsConfig {
|
||||
pub ca_certificate: Option<AbsolutePathBuf>,
|
||||
@@ -293,7 +302,7 @@ pub struct OtelTlsConfig {
|
||||
}
|
||||
|
||||
/// Which OTEL exporter to use.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelExporterKind {
|
||||
None,
|
||||
@@ -315,7 +324,7 @@ pub enum OtelExporterKind {
|
||||
}
|
||||
|
||||
/// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct OtelConfigToml {
|
||||
/// Log user prompt in traces
|
||||
pub log_user_prompt: Option<bool>,
|
||||
@@ -350,7 +359,7 @@ impl Default for OtelConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Notifications {
|
||||
Enabled(bool),
|
||||
@@ -368,7 +377,7 @@ impl Default for Notifications {
|
||||
/// Terminals generally encode both mouse wheels and trackpads as the same "scroll up/down" mouse
|
||||
/// button events, without a magnitude. This setting controls whether Codex uses a heuristic to
|
||||
/// infer wheel vs trackpad per stream, or forces a specific behavior.
|
||||
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ScrollInputMode {
|
||||
/// Infer wheel vs trackpad behavior per scroll stream.
|
||||
@@ -386,7 +395,7 @@ impl Default for ScrollInputMode {
|
||||
}
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Tui {
|
||||
/// Enable desktop notifications from the TUI when the terminal is unfocused.
|
||||
/// Defaults to `true`.
|
||||
@@ -514,7 +523,7 @@ const fn default_true() -> bool {
|
||||
/// Settings for notices we display to users via the tui and app-server clients
|
||||
/// (primarily the Codex IDE extension). NOTE: these are different from
|
||||
/// notifications - notices are warnings, NUX screens, acknowledgements, etc.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Notice {
|
||||
/// Tracks whether the user has acknowledged the full access warning prompt.
|
||||
pub hide_full_access_warning: Option<bool>,
|
||||
@@ -537,7 +546,7 @@ impl Notice {
|
||||
pub(crate) const TABLE_KEY: &'static str = "notice";
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<AbsolutePathBuf>,
|
||||
@@ -560,7 +569,7 @@ impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum ShellEnvironmentPolicyInherit {
|
||||
/// "Core" environment variables for the platform. On UNIX, this would
|
||||
@@ -577,7 +586,7 @@ pub enum ShellEnvironmentPolicyInherit {
|
||||
|
||||
/// Policy for building the `env` when spawning a process via either the
|
||||
/// `shell` or `local_shell` tool.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct ShellEnvironmentPolicyToml {
|
||||
pub inherit: Option<ShellEnvironmentPolicyInherit>,
|
||||
|
||||
|
||||
@@ -33,11 +33,13 @@ pub(super) async fn load_config_layers_internal(
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
managed_preferences_base64,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
let managed_config_path = AbsolutePathBuf::from_absolute_path(
|
||||
@@ -91,12 +93,8 @@ pub(super) async fn read_config_from_path(
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the default managed config path (honoring `CODEX_MANAGED_CONFIG_PATH`).
|
||||
/// Return the default managed config path.
|
||||
pub(super) fn managed_config_default_path(codex_home: &Path) -> PathBuf {
|
||||
if let Ok(path) = std::env::var("CODEX_MANAGED_CONFIG_PATH") {
|
||||
return PathBuf::from(path);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = codex_home;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use super::config_requirements::ConfigRequirementsToml;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
@@ -10,6 +11,7 @@ use toml::Value as TomlValue;
|
||||
|
||||
const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex";
|
||||
const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
const MANAGED_PREFERENCES_REQUIREMENTS_KEY: &str = "requirements_toml_base64";
|
||||
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
override_base64: Option<&str>,
|
||||
@@ -19,82 +21,126 @@ pub(crate) async fn load_managed_admin_config_layer(
|
||||
return if trimmed.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
parse_managed_config_base64(trimmed).map(Some)
|
||||
};
|
||||
}
|
||||
|
||||
const LOAD_ERROR: &str = "Failed to load managed preferences configuration";
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_config).await {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed preferences load task was cancelled");
|
||||
tracing::error!("Managed config load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed preferences load task failed: {join_err}");
|
||||
tracing::error!("Managed config load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other(LOAD_ERROR))
|
||||
Err(io::Error::other("Failed to load managed config"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_config() -> io::Result<Option<TomlValue>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_CONFIG_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_config_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub(crate) async fn load_managed_admin_requirements_toml(
|
||||
target: &mut ConfigRequirementsToml,
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<()> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
if !trimmed.is_empty() {
|
||||
target.merge_unset_fields(parse_managed_requirements_base64(trimmed)?);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_requirements).await {
|
||||
Ok(result) => {
|
||||
if let Some(requirements) = result? {
|
||||
target.merge_unset_fields(requirements);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed requirements load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed requirements load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other("Failed to load managed requirements"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_requirements() -> io::Result<Option<ConfigRequirementsToml>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_REQUIREMENTS_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_requirements_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
fn load_managed_preference(key_name: &str) -> io::Result<Option<String>> {
|
||||
#[link(name = "CoreFoundation", kind = "framework")]
|
||||
unsafe extern "C" {
|
||||
fn CFPreferencesCopyAppValue(key: CFStringRef, application_id: CFStringRef) -> *mut c_void;
|
||||
}
|
||||
|
||||
let application_id = CFString::new(MANAGED_PREFERENCES_APPLICATION_ID);
|
||||
let key = CFString::new(MANAGED_PREFERENCES_CONFIG_KEY);
|
||||
|
||||
let value_ref = unsafe {
|
||||
CFPreferencesCopyAppValue(
|
||||
key.as_concrete_TypeRef(),
|
||||
application_id.as_concrete_TypeRef(),
|
||||
CFString::new(key_name).as_concrete_TypeRef(),
|
||||
CFString::new(MANAGED_PREFERENCES_APPLICATION_ID).as_concrete_TypeRef(),
|
||||
)
|
||||
};
|
||||
|
||||
if value_ref.is_null() {
|
||||
tracing::debug!(
|
||||
"Managed preferences for {} key {} not found",
|
||||
MANAGED_PREFERENCES_APPLICATION_ID,
|
||||
MANAGED_PREFERENCES_CONFIG_KEY
|
||||
"Managed preferences for {MANAGED_PREFERENCES_APPLICATION_ID} key {key_name} not found",
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) };
|
||||
let contents = value.to_string();
|
||||
let trimmed = contents.trim();
|
||||
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) }.to_string();
|
||||
Ok(Some(value))
|
||||
}
|
||||
|
||||
fn parse_managed_preferences_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
let decoded = BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed preferences as base64: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
let decoded_str = String::from_utf8(decoded).map_err(|err| {
|
||||
tracing::error!("Managed preferences base64 contents were not valid UTF-8: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
match toml::from_str::<TomlValue>(&decoded_str) {
|
||||
fn parse_managed_config_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
match toml::from_str::<TomlValue>(&decode_managed_preferences_base64(encoded)?) {
|
||||
Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)),
|
||||
Ok(other) => {
|
||||
tracing::error!(
|
||||
"Managed preferences TOML must have a table at the root, found {other:?}",
|
||||
);
|
||||
tracing::error!("Managed config TOML must have a table at the root, found {other:?}",);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"managed preferences root must be a table",
|
||||
"managed config root must be a table",
|
||||
))
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse managed preferences TOML: {err}");
|
||||
tracing::error!("Failed to parse managed config TOML: {err}");
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_managed_requirements_base64(encoded: &str) -> io::Result<ConfigRequirementsToml> {
|
||||
toml::from_str::<ConfigRequirementsToml>(&decode_managed_preferences_base64(encoded)?).map_err(
|
||||
|err| {
|
||||
tracing::error!("Failed to parse managed requirements TOML: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn decode_managed_preferences_base64(encoded: &str) -> io::Result<String> {
|
||||
String::from_utf8(BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed value as base64: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?)
|
||||
.map_err(|err| {
|
||||
tracing::error!("Managed value base64 contents were not valid UTF-8: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user