mirror of
https://github.com/openai/codex.git
synced 2026-02-01 22:47:52 +00:00
Compare commits
87 Commits
jif/cache
...
rust-v0.79
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fcbcfb21b9 | ||
|
|
915352b10c | ||
|
|
740bf0e755 | ||
|
|
d1c6329c32 | ||
|
|
cab7136fb3 | ||
|
|
32db8ea5ca | ||
|
|
06e21c7a65 | ||
|
|
7ecd0dc9b3 | ||
|
|
8858012fd1 | ||
|
|
6346e4f560 | ||
|
|
4c3d2a5bbe | ||
|
|
c92dbea7c1 | ||
|
|
771f1ca6ab | ||
|
|
b1c93e135b | ||
|
|
5f8776d34d | ||
|
|
58a91a0b50 | ||
|
|
c29afc0cf3 | ||
|
|
cafb07fe6e | ||
|
|
07f077dfb3 | ||
|
|
7cf6f1c723 | ||
|
|
57f8158608 | ||
|
|
95580f229e | ||
|
|
720fa67816 | ||
|
|
fabb797097 | ||
|
|
807f8a43c2 | ||
|
|
1d8e2b4da8 | ||
|
|
bba5e5e0d4 | ||
|
|
8f10d3bf05 | ||
|
|
468ee8a75c | ||
|
|
0b53aed2d0 | ||
|
|
649badd102 | ||
|
|
a8e0fe8bb9 | ||
|
|
e139ef3e67 | ||
|
|
db1423ae8b | ||
|
|
1d678c8187 | ||
|
|
181ff89cbd | ||
|
|
5678213058 | ||
|
|
279283fe02 | ||
|
|
0c1658d0ec | ||
|
|
19525efb22 | ||
|
|
90f37e8549 | ||
|
|
ee9d441777 | ||
|
|
1b5095b5d1 | ||
|
|
c673e7adb6 | ||
|
|
6846bc1115 | ||
|
|
efd2d76484 | ||
|
|
82fcc087b5 | ||
|
|
3cfa4bc8be | ||
|
|
ab753387cc | ||
|
|
2de731490e | ||
|
|
7078a0b676 | ||
|
|
79ce79a62e | ||
|
|
66b7c673e9 | ||
|
|
13c42a077c | ||
|
|
a48904de72 | ||
|
|
4313e0a710 | ||
|
|
ce3ff29932 | ||
|
|
810ebe0d2b | ||
|
|
bf732600ea | ||
|
|
38de0a1de4 | ||
|
|
e61bae12e3 | ||
|
|
96a65ff0ed | ||
|
|
40de81e7af | ||
|
|
972b5853a0 | ||
|
|
fb24c47bea | ||
|
|
f2b740c95d | ||
|
|
0130a2fa40 | ||
|
|
53eb2e9f27 | ||
|
|
2828549323 | ||
|
|
cbc5fb9acf | ||
|
|
310f2114ae | ||
|
|
e27d9bd88f | ||
|
|
414fbe0da9 | ||
|
|
277babba79 | ||
|
|
14dbd0610a | ||
|
|
f6275a5142 | ||
|
|
7d0c5c7bd5 | ||
|
|
4673090f73 | ||
|
|
8e900c210c | ||
|
|
6b2ef216f1 | ||
|
|
d65fe38b2c | ||
|
|
7809e36a92 | ||
|
|
0237459f71 | ||
|
|
314937fb11 | ||
|
|
8ff16a7714 | ||
|
|
96fdbdd434 | ||
|
|
33e1d0844a |
BIN
.github/codex-cli-login.png
vendored
BIN
.github/codex-cli-login.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 2.9 MiB |
BIN
.github/codex-cli-permissions.png
vendored
BIN
.github/codex-cli-permissions.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 408 KiB |
BIN
.github/codex-cli-splash.png
vendored
BIN
.github/codex-cli-splash.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 3.1 MiB After Width: | Height: | Size: 818 KiB |
BIN
.github/demo.gif
vendored
BIN
.github/demo.gif
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 19 MiB |
@@ -12,6 +12,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
close-stale-contributor-prs:
|
||||
# Prevent scheduled runs on forks
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Close inactive PRs from contributors
|
||||
|
||||
3
.github/workflows/issue-deduplicator.yml
vendored
3
.github/workflows/issue-deduplicator.yml
vendored
@@ -9,7 +9,8 @@ on:
|
||||
jobs:
|
||||
gather-duplicates:
|
||||
name: Identify potential duplicates
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate'))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
3
.github/workflows/issue-labeler.yml
vendored
3
.github/workflows/issue-labeler.yml
vendored
@@ -9,7 +9,8 @@ on:
|
||||
jobs:
|
||||
gather-labels:
|
||||
name: Generate label suggestions
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label'))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
2
.github/workflows/rust-release-prepare.yml
vendored
2
.github/workflows/rust-release-prepare.yml
vendored
@@ -14,6 +14,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
# Prevent scheduled runs on forks (no secrets, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
@@ -77,6 +77,12 @@ If you don’t have the tool:
|
||||
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
|
||||
- Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above.
|
||||
|
||||
### Spawning workspace binaries in tests (Cargo vs Buck2)
|
||||
|
||||
- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries.
|
||||
- Under Buck2, `CARGO_BIN_EXE_*` may be project-relative (e.g. `buck-out/...`), which breaks if a test changes its working directory. `codex_utils_cargo_bin::cargo_bin` resolves to an absolute path first.
|
||||
- When locating fixture files under Buck2, avoid `env!("CARGO_MANIFEST_DIR")` (Buck codegen sets it to `"."`). Prefer deriving paths from `codex_utils_cargo_bin::buck_project_root()` when needed.
|
||||
|
||||
### Integration tests (core)
|
||||
|
||||
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
|
||||
|
||||
77
README.md
77
README.md
@@ -1,13 +1,11 @@
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
|
||||
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
</br>
|
||||
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
|
||||
</p>
|
||||
</p>
|
||||
</br>
|
||||
If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE.</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
|
||||
|
||||
---
|
||||
|
||||
@@ -15,25 +13,19 @@
|
||||
|
||||
### Installing and running Codex CLI
|
||||
|
||||
Install globally with your preferred package manager. If you use npm:
|
||||
Install globally with your preferred package manager:
|
||||
|
||||
```shell
|
||||
# Install using npm
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
Alternatively, if you use Homebrew:
|
||||
|
||||
```shell
|
||||
# Install using Homebrew
|
||||
brew install --cask codex
|
||||
```
|
||||
|
||||
Then simply run `codex` to get started:
|
||||
|
||||
```shell
|
||||
codex
|
||||
```
|
||||
|
||||
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
|
||||
Then simply run `codex` to get started.
|
||||
|
||||
<details>
|
||||
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||
@@ -53,60 +45,15 @@ Each archive contains a single entry with the platform baked into the name (e.g.
|
||||
|
||||
### Using Codex with your ChatGPT plan
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" />
|
||||
</p>
|
||||
|
||||
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
|
||||
|
||||
You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243).
|
||||
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).
|
||||
|
||||
### Model Context Protocol (MCP)
|
||||
## Docs
|
||||
|
||||
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
|
||||
|
||||
### Configuration
|
||||
|
||||
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||
|
||||
### Execpolicy
|
||||
|
||||
See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute.
|
||||
|
||||
### Docs & FAQ
|
||||
|
||||
- [**Getting started**](./docs/getting-started.md)
|
||||
- [CLI usage](./docs/getting-started.md#cli-usage)
|
||||
- [Slash Commands](./docs/slash_commands.md)
|
||||
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
|
||||
- [Example prompts](./docs/getting-started.md#example-prompts)
|
||||
- [Custom prompts](./docs/prompts.md)
|
||||
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
|
||||
- [**Configuration**](./docs/config.md)
|
||||
- [Example config](./docs/example-config.md)
|
||||
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||
- [**Execpolicy quickstart**](./docs/execpolicy.md)
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- **Automating Codex**
|
||||
- [GitHub Action](https://github.com/openai/codex-action)
|
||||
- [TypeScript SDK](./sdk/typescript/README.md)
|
||||
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
|
||||
- [**Advanced**](./docs/advanced.md)
|
||||
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
|
||||
- [**Zero data retention (ZDR)**](./docs/zdr.md)
|
||||
- [**Codex Documentation**](https://developers.openai.com/codex)
|
||||
- [**Contributing**](./docs/contributing.md)
|
||||
- [**Install & build**](./docs/install.md)
|
||||
- [System Requirements](./docs/install.md#system-requirements)
|
||||
- [DotSlash](./docs/install.md#dotslash)
|
||||
- [Build from source](./docs/install.md#build-from-source)
|
||||
- [**FAQ**](./docs/faq.md)
|
||||
- [**Installing & building**](./docs/install.md)
|
||||
- [**Open source fund**](./docs/open-source-fund.md)
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
|
||||
16
announcement_tip.toml
Normal file
16
announcement_tip.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
# Example announcement tips for Codex TUI.
|
||||
# Each [[announcements]] entry is evaluated in order; the last matching one is shown.
|
||||
# Dates are UTC, formatted as YYYY-MM-DD. The from_date is inclusive and the to_date is exclusive.
|
||||
# version_regex matches against the CLI version (env!("CARGO_PKG_VERSION")); omit to apply to all versions.
|
||||
# target_app specify which app should display the announcement (cli, vsce, ...).
|
||||
|
||||
[[announcements]]
|
||||
content = "Welcome to Codex! Check out the new onboarding flow."
|
||||
from_date = "2024-10-01"
|
||||
to_date = "2024-10-15"
|
||||
target_app = "cli"
|
||||
|
||||
[[announcements]]
|
||||
content = "This is a test announcement"
|
||||
version_regex = "^0\\.0\\.0$"
|
||||
to_date = "2026-01-10"
|
||||
@@ -2,6 +2,7 @@
|
||||
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
|
||||
|
||||
import argparse
|
||||
from contextlib import contextmanager
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
@@ -12,6 +13,7 @@ import zipfile
|
||||
from dataclasses import dataclass
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from typing import Iterable, Sequence
|
||||
from urllib.parse import urlparse
|
||||
from urllib.request import urlopen
|
||||
@@ -77,6 +79,45 @@ RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [
|
||||
RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS}
|
||||
DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
|
||||
|
||||
# urllib.request.urlopen() defaults to no timeout (can hang indefinitely), which is painful in CI.
|
||||
DOWNLOAD_TIMEOUT_SECS = 60
|
||||
|
||||
|
||||
def _gha_enabled() -> bool:
|
||||
# GitHub Actions supports "workflow commands" (e.g. ::group:: / ::error::) that make logs
|
||||
# much easier to scan: groups collapse noisy sections and error annotations surface the
|
||||
# failure in the UI without changing the actual exception/traceback output.
|
||||
return os.environ.get("GITHUB_ACTIONS") == "true"
|
||||
|
||||
|
||||
def _gha_escape(value: str) -> str:
|
||||
# Workflow commands require percent/newline escaping.
|
||||
return value.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
|
||||
|
||||
|
||||
def _gha_error(*, title: str, message: str) -> None:
|
||||
# Emit a GitHub Actions error annotation. This does not replace stdout/stderr logs; it just
|
||||
# adds a prominent summary line to the job UI so the root cause is easier to spot.
|
||||
if not _gha_enabled():
|
||||
return
|
||||
print(
|
||||
f"::error title={_gha_escape(title)}::{_gha_escape(message)}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _gha_group(title: str):
|
||||
# Wrap a block in a collapsible log group on GitHub Actions. Outside of GHA this is a no-op
|
||||
# so local output remains unchanged.
|
||||
if _gha_enabled():
|
||||
print(f"::group::{_gha_escape(title)}", flush=True)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if _gha_enabled():
|
||||
print("::endgroup::", flush=True)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
|
||||
@@ -131,18 +172,20 @@ def main() -> int:
|
||||
workflow_id = workflow_url.rstrip("/").split("/")[-1]
|
||||
print(f"Downloading native artifacts from workflow {workflow_id}...")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
with _gha_group(f"Download native artifacts from workflow {workflow_id}"):
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
|
||||
if "rg" in components:
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
with _gha_group("Fetch ripgrep binaries"):
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
|
||||
print(f"Installed native dependencies into {vendor_dir}")
|
||||
return 0
|
||||
@@ -203,7 +246,14 @@ def fetch_rg(
|
||||
|
||||
for future in as_completed(future_map):
|
||||
target = future_map[future]
|
||||
results[target] = future.result()
|
||||
try:
|
||||
results[target] = future.result()
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep install failed",
|
||||
message=f"target={target} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(f"Failed to install ripgrep for target {target}.") from exc
|
||||
print(f" installed ripgrep for {target}")
|
||||
|
||||
return [results[target] for target in targets]
|
||||
@@ -301,6 +351,8 @@ def _fetch_single_rg(
|
||||
url = providers[0]["url"]
|
||||
archive_format = platform_info.get("format", "zst")
|
||||
archive_member = platform_info.get("path")
|
||||
digest = platform_info.get("digest")
|
||||
expected_size = platform_info.get("size")
|
||||
|
||||
dest_dir = vendor_dir / target / "path"
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -313,10 +365,32 @@ def _fetch_single_rg(
|
||||
tmp_dir = Path(tmp_dir_str)
|
||||
archive_filename = os.path.basename(urlparse(url).path)
|
||||
download_path = tmp_dir / archive_filename
|
||||
_download_file(url, download_path)
|
||||
print(
|
||||
f" downloading ripgrep for {target} ({platform_key}) from {url}",
|
||||
flush=True,
|
||||
)
|
||||
try:
|
||||
_download_file(url, download_path)
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep download failed",
|
||||
message=f"target={target} platform={platform_key} url={url} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(
|
||||
"Failed to download ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"expected_size={expected_size!r}, digest={digest!r}, url={url}, dest={download_path})."
|
||||
) from exc
|
||||
|
||||
dest.unlink(missing_ok=True)
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
try:
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
except Exception as exc:
|
||||
raise RuntimeError(
|
||||
"Failed to extract ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"member={archive_member!r}, url={url}, archive={download_path})."
|
||||
) from exc
|
||||
|
||||
if not is_windows:
|
||||
dest.chmod(0o755)
|
||||
@@ -326,7 +400,9 @@ def _fetch_single_rg(
|
||||
|
||||
def _download_file(url: str, dest: Path) -> None:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
with urlopen(url) as response, open(dest, "wb") as out:
|
||||
dest.unlink(missing_ok=True)
|
||||
|
||||
with urlopen(url, timeout=DOWNLOAD_TIMEOUT_SECS) as response, open(dest, "wb") as out:
|
||||
shutil.copyfileobj(response, out)
|
||||
|
||||
|
||||
|
||||
350
codex-rs/Cargo.lock
generated
350
codex-rs/Cargo.lock
generated
@@ -42,7 +42,7 @@ dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"bytes",
|
||||
"bytestring",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"encoding_rs",
|
||||
"foldhash 0.1.5",
|
||||
"futures-core",
|
||||
@@ -137,7 +137,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"bytestring",
|
||||
"cfg-if",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"encoding_rs",
|
||||
"foldhash 0.1.5",
|
||||
"futures-core",
|
||||
@@ -329,12 +329,12 @@ name = "app_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"base64",
|
||||
"chrono",
|
||||
"codex-app-server-protocol",
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -365,6 +365,12 @@ dependencies = [
|
||||
"x11rb",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.6"
|
||||
@@ -906,9 +912,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_complete"
|
||||
version = "4.5.57"
|
||||
version = "4.5.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4d9501bd3f5f09f7bbee01da9a511073ed30a80cd7a509f1214bb74eadea71ad"
|
||||
checksum = "4c0da80818b2d95eca9aa614a30783e42f62bf5fdfee24e68cfb960b071ba8d1"
|
||||
dependencies = [
|
||||
"clap",
|
||||
]
|
||||
@@ -987,7 +993,6 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"app_test_support",
|
||||
"assert_cmd",
|
||||
"base64",
|
||||
"chrono",
|
||||
"codex-app-server-protocol",
|
||||
@@ -1058,6 +1063,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"assert_matches",
|
||||
"codex-utils-cargo-bin",
|
||||
"pretty_assertions",
|
||||
"similar",
|
||||
"tempfile",
|
||||
@@ -1155,6 +1161,7 @@ dependencies = [
|
||||
"codex-tui",
|
||||
"codex-tui2",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-windows-sandbox",
|
||||
"ctor 0.5.0",
|
||||
"libc",
|
||||
@@ -1256,6 +1263,7 @@ name = "codex-core"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"assert_cmd",
|
||||
"assert_matches",
|
||||
"async-channel",
|
||||
@@ -1278,6 +1286,7 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"codex-rmcp-client",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-readiness",
|
||||
"codex-utils-string",
|
||||
@@ -1327,7 +1336,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"toml 0.9.5",
|
||||
"toml_edit",
|
||||
"toml_edit 0.24.0+spec-1.1.0",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"tracing-test",
|
||||
@@ -1353,6 +1362,7 @@ dependencies = [
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"libc",
|
||||
"mcp-types",
|
||||
@@ -1378,11 +1388,11 @@ name = "codex-exec-server"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"async-trait",
|
||||
"clap",
|
||||
"codex-core",
|
||||
"codex-execpolicy",
|
||||
"codex-utils-cargo-bin",
|
||||
"exec_server_test_support",
|
||||
"libc",
|
||||
"maplit",
|
||||
@@ -1423,7 +1433,7 @@ dependencies = [
|
||||
"allocative",
|
||||
"anyhow",
|
||||
"clap",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"env_logger",
|
||||
"log",
|
||||
"multimap",
|
||||
@@ -1444,6 +1454,7 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"pretty_assertions",
|
||||
"sentry",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
@@ -1541,7 +1552,6 @@ name = "codex-mcp-server"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-arg0",
|
||||
"codex-common",
|
||||
"codex-core",
|
||||
@@ -1600,7 +1610,6 @@ dependencies = [
|
||||
"serde_json",
|
||||
"strum_macros 0.27.2",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"tracing-opentelemetry",
|
||||
"tracing-subscriber",
|
||||
@@ -1665,8 +1674,8 @@ dependencies = [
|
||||
"axum",
|
||||
"codex-keyring-store",
|
||||
"codex-protocol",
|
||||
"codex-utils-cargo-bin",
|
||||
"dirs",
|
||||
"escargot",
|
||||
"futures",
|
||||
"keyring",
|
||||
"mcp-types",
|
||||
@@ -1693,6 +1702,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-utils-cargo-bin",
|
||||
"pretty_assertions",
|
||||
"tempfile",
|
||||
"uds_windows",
|
||||
@@ -1722,7 +1732,7 @@ dependencies = [
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"crossterm",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"diffy",
|
||||
"dirs",
|
||||
"dunce",
|
||||
@@ -1749,6 +1759,7 @@ dependencies = [
|
||||
"supports-color 3.0.2",
|
||||
"tempfile",
|
||||
"textwrap 0.16.2",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
@@ -1763,6 +1774,9 @@ dependencies = [
|
||||
"url",
|
||||
"uuid",
|
||||
"vt100",
|
||||
"which",
|
||||
"windows-sys 0.52.0",
|
||||
"winsplit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1791,7 +1805,7 @@ dependencies = [
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"crossterm",
|
||||
"derive_more 2.0.1",
|
||||
"derive_more 2.1.1",
|
||||
"diffy",
|
||||
"dirs",
|
||||
"dunce",
|
||||
@@ -1806,6 +1820,7 @@ dependencies = [
|
||||
"pulldown-cmark",
|
||||
"rand 0.9.2",
|
||||
"ratatui",
|
||||
"ratatui-core",
|
||||
"ratatui-macros",
|
||||
"regex-lite",
|
||||
"reqwest",
|
||||
@@ -1827,6 +1842,7 @@ dependencies = [
|
||||
"tracing-subscriber",
|
||||
"tree-sitter-bash",
|
||||
"tree-sitter-highlight",
|
||||
"tui-scrollbar",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.2.1",
|
||||
"url",
|
||||
@@ -1855,6 +1871,14 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-cargo-bin"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"assert_cmd",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-image"
|
||||
version = "0.0.0"
|
||||
@@ -1983,6 +2007,20 @@ dependencies = [
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "compact_str"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a"
|
||||
dependencies = [
|
||||
"castaway",
|
||||
"cfg-if",
|
||||
"itoa",
|
||||
"rustversion",
|
||||
"ryu",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concurrent-queue"
|
||||
version = "2.5.0"
|
||||
@@ -2004,6 +2042,18 @@ dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-hex"
|
||||
version = "1.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"proptest",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.6.0"
|
||||
@@ -2015,9 +2065,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.7.1"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7"
|
||||
checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
|
||||
dependencies = [
|
||||
"unicode-segmentation",
|
||||
]
|
||||
@@ -2058,6 +2108,7 @@ dependencies = [
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"notify",
|
||||
"pretty_assertions",
|
||||
"regex-lite",
|
||||
@@ -2403,11 +2454,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "2.0.1"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
|
||||
checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134"
|
||||
dependencies = [
|
||||
"derive_more-impl 2.0.1",
|
||||
"derive_more-impl 2.1.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2425,13 +2476,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "derive_more-impl"
|
||||
version = "2.0.1"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
|
||||
checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
|
||||
dependencies = [
|
||||
"convert_case 0.7.1",
|
||||
"convert_case 0.10.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustc_version",
|
||||
"syn 2.0.104",
|
||||
"unicode-xid",
|
||||
]
|
||||
@@ -2547,6 +2599,15 @@ version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
|
||||
|
||||
[[package]]
|
||||
name = "document-features"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61"
|
||||
dependencies = [
|
||||
"litrs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dotenvy"
|
||||
version = "0.15.7"
|
||||
@@ -2777,8 +2838,8 @@ name = "exec_server_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-utils-cargo-bin",
|
||||
"rmcp",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -3713,13 +3774,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "insta"
|
||||
version = "1.44.3"
|
||||
version = "1.46.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5c943d4415edd8153251b6f197de5eb1640e56d84e8d9159bea190421c73698"
|
||||
checksum = "1b66886d14d18d420ab5052cbff544fc5d34d0b2cdd35eb5976aaa10a4a472e5"
|
||||
dependencies = [
|
||||
"console",
|
||||
"once_cell",
|
||||
"similar",
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3744,17 +3806,6 @@ dependencies = [
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipnet"
|
||||
version = "2.11.0"
|
||||
@@ -3883,6 +3934,16 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kasuari"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fe90c1150662e858c7d5f945089b7517b0a80d8bf7ba4b1b5ffc984e7230a5b"
|
||||
dependencies = [
|
||||
"hashbrown 0.16.0",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "keyring"
|
||||
version = "3.6.3"
|
||||
@@ -4028,6 +4089,12 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
|
||||
|
||||
[[package]]
|
||||
name = "litrs"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092"
|
||||
|
||||
[[package]]
|
||||
name = "local-waker"
|
||||
version = "0.1.4"
|
||||
@@ -4146,9 +4213,9 @@ name = "mcp_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-mcp-server",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"mcp-types",
|
||||
"os_info",
|
||||
@@ -4649,9 +4716,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6"
|
||||
checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
@@ -4663,9 +4730,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-appender-tracing"
|
||||
version = "0.30.1"
|
||||
version = "0.31.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e68f63eca5fad47e570e00e893094fc17be959c80c79a7d6ec1abdd5ae6ffc16"
|
||||
checksum = "ef6a1ac5ca3accf562b8c306fa8483c85f4390f768185ab775f242f7fe8fdcc2"
|
||||
dependencies = [
|
||||
"opentelemetry",
|
||||
"tracing",
|
||||
@@ -4675,9 +4742,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-http"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d"
|
||||
checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -4688,9 +4755,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b"
|
||||
checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf"
|
||||
dependencies = [
|
||||
"http 1.3.1",
|
||||
"opentelemetry",
|
||||
@@ -4708,30 +4775,32 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc"
|
||||
checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"hex",
|
||||
"const-hex",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"prost",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2"
|
||||
checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_sdk"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b"
|
||||
checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-executor",
|
||||
@@ -4739,7 +4808,6 @@ dependencies = [
|
||||
"opentelemetry",
|
||||
"percent-encoding",
|
||||
"rand 0.9.2",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -5095,7 +5163,7 @@ version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983"
|
||||
dependencies = [
|
||||
"toml_edit",
|
||||
"toml_edit 0.23.10+spec-1.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5122,10 +5190,25 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.13.5"
|
||||
name = "proptest"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5"
|
||||
checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"num-traits",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
"rand_xorshift",
|
||||
"regex-syntax 0.8.5",
|
||||
"unarray",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive",
|
||||
@@ -5133,9 +5216,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.13.5"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||
checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.14.0",
|
||||
@@ -5335,6 +5418,15 @@ dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_xorshift"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
|
||||
dependencies = [
|
||||
"rand_core 0.9.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ratatui"
|
||||
version = "0.29.0"
|
||||
@@ -5342,7 +5434,7 @@ source = "git+https://github.com/nornagon/ratatui?branch=nornagon-v0.29.0-patch#
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cassowary",
|
||||
"compact_str",
|
||||
"compact_str 0.8.1",
|
||||
"crossterm",
|
||||
"indoc",
|
||||
"instability",
|
||||
@@ -5351,7 +5443,27 @@ dependencies = [
|
||||
"paste",
|
||||
"strum 0.26.3",
|
||||
"unicode-segmentation",
|
||||
"unicode-truncate",
|
||||
"unicode-truncate 1.1.0",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ratatui-core"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ef8dea09a92caaf73bff7adb70b76162e5937524058a7e5bff37869cbbec293"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"compact_str 0.9.0",
|
||||
"hashbrown 0.16.0",
|
||||
"indoc",
|
||||
"itertools 0.14.0",
|
||||
"kasuari",
|
||||
"lru 0.16.2",
|
||||
"strum 0.27.2",
|
||||
"thiserror 2.0.17",
|
||||
"unicode-segmentation",
|
||||
"unicode-truncate 2.0.0",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
@@ -5440,9 +5552,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-lite"
|
||||
version = "0.1.7"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30"
|
||||
checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da"
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
@@ -6518,6 +6630,9 @@ name = "strum"
|
||||
version = "0.27.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
|
||||
dependencies = [
|
||||
"strum_macros 0.27.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum_macros"
|
||||
@@ -6909,29 +7024,26 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.47.1"
|
||||
version = "1.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
||||
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"slab",
|
||||
"socket2 0.6.1",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.61.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.5.0"
|
||||
version = "2.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -6960,9 +7072,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-stream"
|
||||
version = "0.1.17"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
|
||||
checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"pin-project-lite",
|
||||
@@ -7024,18 +7136,30 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.7.3"
|
||||
version = "0.7.5+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533"
|
||||
checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.23.7"
|
||||
version = "0.23.10+spec-1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d"
|
||||
checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269"
|
||||
dependencies = [
|
||||
"indexmap 2.12.0",
|
||||
"toml_datetime",
|
||||
"toml_parser",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.24.0+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c740b185920170a6d9191122cafef7010bd6270a3824594bff6784c04d7f09e"
|
||||
dependencies = [
|
||||
"indexmap 2.12.0",
|
||||
"toml_datetime",
|
||||
@@ -7046,30 +7170,28 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_parser"
|
||||
version = "1.0.4"
|
||||
version = "1.0.6+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e"
|
||||
checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44"
|
||||
dependencies = [
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_writer"
|
||||
version = "1.0.4"
|
||||
version = "1.0.6+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2"
|
||||
checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607"
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.13.1"
|
||||
version = "0.14.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9"
|
||||
checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64",
|
||||
"bytes",
|
||||
"h2",
|
||||
"http 1.3.1",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
@@ -7078,9 +7200,8 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prost",
|
||||
"rustls-native-certs",
|
||||
"socket2 0.5.10",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"tokio-stream",
|
||||
@@ -7090,6 +7211,17 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic-prost"
|
||||
version = "0.14.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost",
|
||||
"tonic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.5.2"
|
||||
@@ -7207,15 +7339,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-opentelemetry"
|
||||
version = "0.31.0"
|
||||
version = "0.32.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c"
|
||||
checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"rustversion",
|
||||
"smallvec",
|
||||
"thiserror 2.0.17",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
@@ -7225,9 +7358,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.20"
|
||||
version = "0.3.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
|
||||
checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e"
|
||||
dependencies = [
|
||||
"matchers",
|
||||
"nu-ansi-term",
|
||||
@@ -7346,6 +7479,16 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tui-scrollbar"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c42613099915b2e30e9f144670666e858e2538366f77742e1cf1c2f230efcacd"
|
||||
dependencies = [
|
||||
"document-features",
|
||||
"ratatui-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.18.0"
|
||||
@@ -7372,6 +7515,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unarray"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "2.8.1"
|
||||
@@ -7407,6 +7556,17 @@ dependencies = [
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-truncate"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fbf03860ff438702f3910ca5f28f8dac63c1c11e7efb5012b8b175493606330"
|
||||
dependencies = [
|
||||
"itertools 0.13.0",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
@@ -8372,6 +8532,12 @@ version = "0.0.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
|
||||
|
||||
[[package]]
|
||||
name = "winsplit"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ab703352da6a72f35c39a533526393725640575bb211f61987a2748323ad956"
|
||||
|
||||
[[package]]
|
||||
name = "wiremock"
|
||||
version = "0.6.5"
|
||||
|
||||
@@ -36,6 +36,7 @@ members = [
|
||||
"tui",
|
||||
"tui2",
|
||||
"utils/absolute-path",
|
||||
"utils/cargo-bin",
|
||||
"utils/git",
|
||||
"utils/cache",
|
||||
"utils/image",
|
||||
@@ -49,7 +50,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.0.0"
|
||||
version = "0.79.0-alpha.1"
|
||||
# Track the edition for all workspace crates in one place. Individual
|
||||
# crates can still override this value, but keeping it here means new
|
||||
# crates created with `cargo new -w ...` automatically inherit the 2024
|
||||
@@ -93,6 +94,7 @@ codex-tui = { path = "tui" }
|
||||
codex-tui2 = { path = "tui2" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
codex-utils-cache = { path = "utils/cache" }
|
||||
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
|
||||
codex-utils-image = { path = "utils/image" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
codex-utils-pty = { path = "utils/pty" }
|
||||
@@ -143,7 +145,7 @@ ignore = "0.4.23"
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
include_dir = "0.7.4"
|
||||
indexmap = "2.12.0"
|
||||
insta = "1.44.3"
|
||||
insta = "1.46.0"
|
||||
itertools = "0.14.0"
|
||||
keyring = { version = "3.6", default-features = false }
|
||||
landlock = "0.4.4"
|
||||
@@ -158,12 +160,12 @@ notify = "8.2.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
once_cell = "1.20.2"
|
||||
openssl-sys = "*"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-appender-tracing = "0.30.0"
|
||||
opentelemetry-otlp = "0.30.0"
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
opentelemetry_sdk = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
opentelemetry = "0.31.0"
|
||||
opentelemetry-appender-tracing = "0.31.0"
|
||||
opentelemetry-otlp = "0.31.0"
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
opentelemetry_sdk = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
os_info = "3.12.0"
|
||||
owo-colors = "4.2.0"
|
||||
path-absolutize = "3.1.1"
|
||||
@@ -174,9 +176,10 @@ pretty_assertions = "1.4.1"
|
||||
pulldown-cmark = "0.10"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-core = "0.1.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
regex = "1.12.2"
|
||||
regex-lite = "0.1.7"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.12.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
@@ -204,20 +207,20 @@ thiserror = "2.0.17"
|
||||
time = "0.3"
|
||||
tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
tokio-stream = "0.1.17"
|
||||
tokio-stream = "0.1.18"
|
||||
tokio-test = "0.4"
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.23.5"
|
||||
tonic = "0.13.1"
|
||||
toml_edit = "0.24.0"
|
||||
tracing = "0.1.43"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-subscriber = "0.3.22"
|
||||
tracing-test = "0.2.5"
|
||||
tree-sitter = "0.25.10"
|
||||
tree-sitter-bash = "0.25"
|
||||
tree-sitter-highlight = "0.25.10"
|
||||
ts-rs = "11"
|
||||
tui-scrollbar = "0.2.1"
|
||||
uds_windows = "1.1.0"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.2"
|
||||
|
||||
@@ -15,8 +15,8 @@ You can also install via Homebrew (`brew install --cask codex`) or download a pl
|
||||
|
||||
## Documentation quickstart
|
||||
|
||||
- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management.
|
||||
- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md).
|
||||
- First run with Codex? Start with [`docs/getting-started.md`](../docs/getting-started.md) (links to the walkthrough for prompts, keyboard shortcuts, and session management).
|
||||
- Want deeper control? See [`docs/config.md`](../docs/config.md) and [`docs/install.md`](../docs/install.md).
|
||||
|
||||
## What's new in the Rust CLI
|
||||
|
||||
@@ -30,7 +30,7 @@ Codex supports a rich set of configuration options. Note that the Rust CLI uses
|
||||
|
||||
#### MCP client
|
||||
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#connecting-to-mcp-servers) for details.
|
||||
|
||||
#### MCP server (experimental)
|
||||
|
||||
|
||||
@@ -384,6 +384,8 @@ pub struct SendUserTurnParams {
|
||||
pub model: String,
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
pub summary: ReasoningSummary,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
|
||||
@@ -227,6 +227,8 @@ pub enum ConfigLayerSource {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
System {
|
||||
/// This is the path to the system config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
@@ -237,9 +239,19 @@ pub enum ConfigLayerSource {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
User {
|
||||
/// This is the path to the user's config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Path to a .codex/ folder within a project. There could be multiple of
|
||||
/// these between `cwd` and the project/repo root.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
Project {
|
||||
dot_codex_folder: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Session-layer overrides supplied via `-c`/`--config`.
|
||||
SessionFlags,
|
||||
|
||||
@@ -247,6 +259,8 @@ pub enum ConfigLayerSource {
|
||||
/// as the last layer on top of everything else. This scheme did not quite
|
||||
/// work out as intended, but we keep this variant as a "best effort" while
|
||||
/// we phase out `managed_config.toml` in favor of `requirements.toml`.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
LegacyManagedConfigTomlFromFile {
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
@@ -262,6 +276,7 @@ impl ConfigLayerSource {
|
||||
ConfigLayerSource::Mdm { .. } => 0,
|
||||
ConfigLayerSource::System { .. } => 10,
|
||||
ConfigLayerSource::User { .. } => 20,
|
||||
ConfigLayerSource::Project { .. } => 25,
|
||||
ConfigLayerSource::SessionFlags => 30,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => 40,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => 50,
|
||||
@@ -315,6 +330,15 @@ pub struct ProfileV2 {
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct AnalyticsConfig {
|
||||
pub enabled: Option<bool>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -339,6 +363,7 @@ pub struct Config {
|
||||
pub model_reasoning_effort: Option<ReasoningEffort>,
|
||||
pub model_reasoning_summary: Option<ReasoningSummary>,
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
pub analytics: Option<AnalyticsConfig>,
|
||||
#[serde(default, flatten)]
|
||||
pub additional: HashMap<String, JsonValue>,
|
||||
}
|
||||
@@ -1259,6 +1284,8 @@ pub struct Turn {
|
||||
pub struct TurnError {
|
||||
pub message: String,
|
||||
pub codex_error_info: Option<CodexErrorInfo>,
|
||||
#[serde(default)]
|
||||
pub additional_details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1302,6 +1329,8 @@ pub struct TurnStartParams {
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
/// Override the reasoning summary for this turn and subsequent turns.
|
||||
pub summary: Option<ReasoningSummary>,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
|
||||
@@ -35,6 +35,8 @@ use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::LoginChatGptCompleteNotification;
|
||||
use codex_app_server_protocol::LoginChatGptResponse;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
use codex_app_server_protocol::NewConversationResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
@@ -113,6 +115,9 @@ enum CliCommand {
|
||||
TestLogin,
|
||||
/// Fetch the current account rate limits from the Codex app-server.
|
||||
GetAccountRateLimits,
|
||||
/// List the available models from the Codex app-server.
|
||||
#[command(name = "model-list")]
|
||||
ModelList,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
@@ -134,6 +139,7 @@ fn main() -> Result<()> {
|
||||
} => send_follow_up_v2(codex_bin, first_message, follow_up_message),
|
||||
CliCommand::TestLogin => test_login(codex_bin),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(codex_bin),
|
||||
CliCommand::ModelList => model_list(codex_bin),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,6 +307,18 @@ fn get_account_rate_limits(codex_bin: String) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn model_list(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
|
||||
let response = client.model_list(ModelListParams::default())?;
|
||||
println!("< model/list response: {response:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct CodexClient {
|
||||
child: Child,
|
||||
stdin: Option<ChildStdin>,
|
||||
@@ -452,6 +470,16 @@ impl CodexClient {
|
||||
self.send_request(request, request_id, "account/rateLimits/read")
|
||||
}
|
||||
|
||||
fn model_list(&mut self, params: ModelListParams) -> Result<ModelListResponse> {
|
||||
let request_id = self.request_id();
|
||||
let request = ClientRequest::ModelList {
|
||||
request_id: request_id.clone(),
|
||||
params,
|
||||
};
|
||||
|
||||
self.send_request(request, request_id, "model/list")
|
||||
}
|
||||
|
||||
fn stream_conversation(&mut self, conversation_id: &ConversationId) -> Result<()> {
|
||||
loop {
|
||||
let notification = self.next_notification()?;
|
||||
|
||||
@@ -48,7 +48,6 @@ uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
app_test_support = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
|
||||
@@ -162,7 +162,7 @@ Turns attach user input (text or images) to a thread and trigger Codex generatio
|
||||
- `{"type":"image","url":"https://…png"}`
|
||||
- `{"type":"localImage","path":"/tmp/screenshot.png"}`
|
||||
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread.
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread. `outputSchema` applies only to the current turn.
|
||||
|
||||
```json
|
||||
{ "method": "turn/start", "id": 30, "params": {
|
||||
@@ -178,7 +178,14 @@ You can optionally specify config overrides on the new turn. If specified, these
|
||||
},
|
||||
"model": "gpt-5.1-codex",
|
||||
"effort": "medium",
|
||||
"summary": "concise"
|
||||
"summary": "concise",
|
||||
// Optional JSON Schema to constrain the final assistant message for this turn.
|
||||
"outputSchema": {
|
||||
"type": "object",
|
||||
"properties": { "answer": { "type": "string" } },
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
} }
|
||||
{ "id": 30, "result": { "turn": {
|
||||
"id": "turn_456",
|
||||
@@ -302,7 +309,7 @@ Event notifications are the server-initiated event stream for thread lifecycles,
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo?, additionalDetails? } }`.
|
||||
- `turn/diff/updated` — `{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items.
|
||||
- `turn/plan/updated` — `{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
|
||||
|
||||
@@ -352,7 +359,7 @@ There are additional item-specific events:
|
||||
|
||||
### Errors
|
||||
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo?, additionalDetails? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
|
||||
|
||||
@@ -340,6 +340,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: None,
|
||||
};
|
||||
handle_error(conversation_id, turn_error.clone(), &turn_summary_store).await;
|
||||
outgoing
|
||||
@@ -357,6 +358,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: ev.additional_details,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
@@ -1340,6 +1342,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1351,6 +1354,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
@@ -1398,6 +1402,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "oops".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1439,6 +1444,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1467,6 +1473,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1691,6 +1698,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1710,6 +1718,7 @@ mod tests {
|
||||
TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1746,6 +1755,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1766,6 +1776,7 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1103,7 +1103,7 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn get_user_saved_config(&self, request_id: RequestId) {
|
||||
let service = ConfigService::new(self.config.codex_home.clone(), Vec::new());
|
||||
let service = ConfigService::new_with_defaults(self.config.codex_home.clone());
|
||||
let user_saved_config: UserSavedConfig = match service.load_user_saved_config().await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
@@ -2579,6 +2579,7 @@ impl CodexMessageProcessor {
|
||||
let _ = conversation
|
||||
.submit(Op::UserInput {
|
||||
items: mapped_items,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -2598,6 +2599,7 @@ impl CodexMessageProcessor {
|
||||
model,
|
||||
effort,
|
||||
summary,
|
||||
output_schema,
|
||||
} = params;
|
||||
|
||||
let Ok(conversation) = self
|
||||
@@ -2632,7 +2634,7 @@ impl CodexMessageProcessor {
|
||||
model,
|
||||
effort,
|
||||
summary,
|
||||
final_output_json_schema: None,
|
||||
final_output_json_schema: output_schema,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -2650,19 +2652,17 @@ impl CodexMessageProcessor {
|
||||
};
|
||||
|
||||
let skills_manager = self.conversation_manager.skills_manager();
|
||||
let data = cwds
|
||||
.into_iter()
|
||||
.map(|cwd| {
|
||||
let outcome = skills_manager.skills_for_cwd_with_options(&cwd, force_reload);
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills);
|
||||
codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let mut data = Vec::new();
|
||||
for cwd in cwds {
|
||||
let outcome = skills_manager.skills_for_cwd(&cwd, force_reload).await;
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills);
|
||||
data.push(codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
});
|
||||
}
|
||||
self.outgoing
|
||||
.send_response(request_id, SkillsListResponse { data })
|
||||
.await;
|
||||
@@ -2741,6 +2741,7 @@ impl CodexMessageProcessor {
|
||||
let turn_id = conversation
|
||||
.submit(Op::UserInput {
|
||||
items: mapped_items,
|
||||
final_output_json_schema: params.output_schema,
|
||||
})
|
||||
.await;
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_core::config::ConfigService;
|
||||
use codex_core::config::ConfigServiceError;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
@@ -19,9 +20,13 @@ pub(crate) struct ConfigApi {
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
pub(crate) fn new(
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
) -> Self {
|
||||
Self {
|
||||
service: ConfigService::new(codex_home, cli_overrides),
|
||||
service: ConfigService::new(codex_home, cli_overrides, loader_overrides),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||||
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Result as IoResult;
|
||||
use std::path::PathBuf;
|
||||
@@ -17,13 +18,11 @@ use tokio::io::BufReader;
|
||||
use tokio::io::{self};
|
||||
use tokio::sync::mpsc;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Level;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::Layer;
|
||||
use tracing_subscriber::filter::Targets;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
@@ -44,6 +43,7 @@ const CHANNEL_CAPACITY: usize = 128;
|
||||
pub async fn run_main(
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
loader_overrides: LoaderOverrides,
|
||||
) -> IoResult<()> {
|
||||
// Set up channels.
|
||||
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
|
||||
@@ -80,7 +80,11 @@ pub async fn run_main(
|
||||
format!("error parsing -c overrides: {e}"),
|
||||
)
|
||||
})?;
|
||||
let config = Config::load_with_cli_overrides(cli_kv_overrides.clone())
|
||||
let loader_overrides_for_config_api = loader_overrides.clone();
|
||||
let config = ConfigBuilder::default()
|
||||
.cli_overrides(cli_kv_overrides.clone())
|
||||
.loader_overrides(loader_overrides)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
|
||||
@@ -103,11 +107,8 @@ pub async fn run_main(
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL)
|
||||
.with_filter(EnvFilter::from_default_env());
|
||||
|
||||
let feedback_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(feedback.make_writer())
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_filter(Targets::new().with_default(Level::TRACE));
|
||||
let feedback_layer = feedback.logger_layer();
|
||||
let feedback_metadata_layer = feedback.metadata_layer();
|
||||
|
||||
let otel_logger_layer = otel.as_ref().and_then(|o| o.logger_layer());
|
||||
|
||||
@@ -116,6 +117,7 @@ pub async fn run_main(
|
||||
let _ = tracing_subscriber::registry()
|
||||
.with(stderr_fmt)
|
||||
.with(feedback_layer)
|
||||
.with(feedback_metadata_layer)
|
||||
.with(otel_logger_layer)
|
||||
.with(otel_tracing_layer)
|
||||
.try_init();
|
||||
@@ -124,11 +126,13 @@ pub async fn run_main(
|
||||
let processor_handle = tokio::spawn({
|
||||
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
|
||||
let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone();
|
||||
let loader_overrides = loader_overrides_for_config_api;
|
||||
let mut processor = MessageProcessor::new(
|
||||
outgoing_message_sender,
|
||||
codex_linux_sandbox_exe,
|
||||
std::sync::Arc::new(config),
|
||||
cli_overrides,
|
||||
loader_overrides,
|
||||
feedback.clone(),
|
||||
);
|
||||
async move {
|
||||
|
||||
@@ -1,10 +1,42 @@
|
||||
use codex_app_server::run_main;
|
||||
use codex_arg0::arg0_dispatch_or_else;
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use std::path::PathBuf;
|
||||
|
||||
// Debug-only test hook: lets integration tests point the server at a temporary
|
||||
// managed config file without writing to /etc.
|
||||
const MANAGED_CONFIG_PATH_ENV_VAR: &str = "CODEX_APP_SERVER_MANAGED_CONFIG_PATH";
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
|
||||
run_main(codex_linux_sandbox_exe, CliConfigOverrides::default()).await?;
|
||||
let managed_config_path = managed_config_path_from_debug_env();
|
||||
let loader_overrides = LoaderOverrides {
|
||||
managed_config_path,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
run_main(
|
||||
codex_linux_sandbox_exe,
|
||||
CliConfigOverrides::default(),
|
||||
loader_overrides,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn managed_config_path_from_debug_env() -> Option<PathBuf> {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
if let Ok(value) = std::env::var(MANAGED_CONFIG_PATH_ENV_VAR) {
|
||||
return if value.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(PathBuf::from(value))
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ use codex_app_server_protocol::RequestId;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use codex_core::default_client::USER_AGENT_SUFFIX;
|
||||
use codex_core::default_client::get_codex_user_agent;
|
||||
use codex_feedback::CodexFeedback;
|
||||
@@ -41,6 +42,7 @@ impl MessageProcessor {
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
feedback: CodexFeedback,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
@@ -62,7 +64,7 @@ impl MessageProcessor {
|
||||
cli_overrides.clone(),
|
||||
feedback,
|
||||
);
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides);
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides, loader_overrides);
|
||||
|
||||
Self {
|
||||
outgoing,
|
||||
|
||||
@@ -9,12 +9,12 @@ path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true, features = ["test-support"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
|
||||
@@ -11,7 +11,6 @@ use tokio::process::ChildStdin;
|
||||
use tokio::process::ChildStdout;
|
||||
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::ArchiveConversationParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
@@ -49,7 +48,6 @@ use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use std::process::Command as StdCommand;
|
||||
use tokio::process::Command;
|
||||
|
||||
pub struct McpProcess {
|
||||
@@ -78,12 +76,8 @@ impl McpProcess {
|
||||
codex_home: &Path,
|
||||
env_overrides: &[(&str, Option<&str>)],
|
||||
) -> anyhow::Result<Self> {
|
||||
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
|
||||
let std_cmd = StdCommand::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-mcp-server")?;
|
||||
|
||||
let program = std_cmd.get_program().to_owned();
|
||||
|
||||
let program = codex_utils_cargo_bin::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-app-server")?;
|
||||
let mut cmd = Command::new(program);
|
||||
|
||||
cmd.stdin(Stdio::piped());
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use codex_core::models_manager::model_presets::all_model_presets;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -25,7 +23,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
} else {
|
||||
ModelVisibility::Hide
|
||||
},
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority,
|
||||
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
|
||||
@@ -37,7 +34,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
// Acknowledge sendUserTurn
|
||||
@@ -418,6 +419,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -443,6 +445,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
|
||||
@@ -7,6 +7,7 @@ mod fuzzy_file_search;
|
||||
mod interrupt;
|
||||
mod list_resume;
|
||||
mod login;
|
||||
mod output_schema;
|
||||
mod send_message;
|
||||
mod set_default_model;
|
||||
mod user_agent;
|
||||
|
||||
282
codex-rs/app-server/tests/suite/output_schema.rs
Normal file
282
codex-rs/app-server/tests/suite/output_schema.rs
Normal file
@@ -0,0 +1,282 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::InputItem;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
use codex_app_server_protocol::NewConversationResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SendUserTurnParams;
|
||||
use codex_app_server_protocol::SendUserTurnResponse;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_accepts_output_schema_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_output_schema_is_per_turn_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let send_turn_id_2 = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp_2: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id_2)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -18,6 +18,7 @@ use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ToolsV2;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
@@ -73,8 +74,7 @@ sandbox_mode = "workspace-write"
|
||||
}
|
||||
);
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -136,8 +136,7 @@ view_image = false
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -185,7 +184,10 @@ writable_roots = [{}]
|
||||
|
||||
let mut mcp = McpProcess::new_with_env(
|
||||
codex_home.path(),
|
||||
&[("CODEX_MANAGED_CONFIG_PATH", Some(&managed_path_str))],
|
||||
&[(
|
||||
"CODEX_APP_SERVER_MANAGED_CONFIG_PATH",
|
||||
Some(&managed_path_str),
|
||||
)],
|
||||
)
|
||||
.await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
@@ -257,12 +259,7 @@ writable_roots = [{}]
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_layers_managed_user_then_optional_system(&layers, managed_file, user_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -433,3 +430,50 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[1].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_managed_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
managed_file: AbsolutePathBuf,
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 3);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[2].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
mod account;
|
||||
mod config_rpc;
|
||||
mod model_list;
|
||||
mod output_schema;
|
||||
mod rate_limits;
|
||||
mod review;
|
||||
mod thread_archive;
|
||||
|
||||
231
codex-rs/app-server/tests/suite/v2/output_schema.rs
Normal file
231
codex-rs/app-server/tests/suite/v2/output_schema.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_accepts_output_schema_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_output_schema_is_per_turn_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req_1 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_1: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_1)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_1)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let turn_req_2 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
output_schema: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_2: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_2)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_2)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -540,6 +540,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -566,6 +567,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
|
||||
@@ -25,5 +25,6 @@ tree-sitter-bash = { workspace = true }
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -227,11 +227,14 @@ fn check_start_and_end_lines_strict(
|
||||
first_line: Option<&&str>,
|
||||
last_line: Option<&&str>,
|
||||
) -> Result<(), ParseError> {
|
||||
let first_line = first_line.map(|line| line.trim());
|
||||
let last_line = last_line.map(|line| line.trim());
|
||||
|
||||
match (first_line, last_line) {
|
||||
(Some(&first), Some(&last)) if first == BEGIN_PATCH_MARKER && last == END_PATCH_MARKER => {
|
||||
(Some(first), Some(last)) if first == BEGIN_PATCH_MARKER && last == END_PATCH_MARKER => {
|
||||
Ok(())
|
||||
}
|
||||
(Some(&first), _) if first != BEGIN_PATCH_MARKER => Err(InvalidPatchError(String::from(
|
||||
(Some(first), _) if first != BEGIN_PATCH_MARKER => Err(InvalidPatchError(String::from(
|
||||
"The first line of the patch must be '*** Begin Patch'",
|
||||
))),
|
||||
_ => Err(InvalidPatchError(String::from(
|
||||
@@ -444,6 +447,25 @@ fn test_parse_patch() {
|
||||
"The last line of the patch must be '*** End Patch'".to_string()
|
||||
))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
concat!(
|
||||
"*** Begin Patch",
|
||||
" ",
|
||||
"\n*** Add File: foo\n+hi\n",
|
||||
" ",
|
||||
"*** End Patch"
|
||||
),
|
||||
ParseMode::Strict
|
||||
)
|
||||
.unwrap()
|
||||
.hunks,
|
||||
vec![AddFile {
|
||||
path: PathBuf::from("foo"),
|
||||
contents: "hi\n".to_string()
|
||||
}]
|
||||
);
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
"*** Begin Patch\n\
|
||||
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/expected/keep.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/expected/keep.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
keep
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/keep.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/keep.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
keep
|
||||
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/obsolete.txt
vendored
Normal file
1
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/input/obsolete.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
obsolete
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/patch.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/020_delete_file_success/patch.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*** Begin Patch
|
||||
*** Delete File: obsolete.txt
|
||||
*** End Patch
|
||||
@@ -0,0 +1 @@
|
||||
two
|
||||
@@ -0,0 +1 @@
|
||||
one
|
||||
@@ -0,0 +1,6 @@
|
||||
*** Begin Patch
|
||||
*** Update File: file.txt
|
||||
@@
|
||||
-one
|
||||
+two
|
||||
*** End Patch
|
||||
@@ -0,0 +1,2 @@
|
||||
line1
|
||||
line3
|
||||
3
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/input/lines.txt
vendored
Normal file
3
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/input/lines.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
line1
|
||||
line2
|
||||
line3
|
||||
7
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/patch.txt
vendored
Normal file
7
codex-rs/apply-patch/tests/fixtures/scenarios/021_update_file_deletion_only/patch.txt
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
*** Begin Patch
|
||||
*** Update File: lines.txt
|
||||
@@
|
||||
line1
|
||||
-line2
|
||||
line3
|
||||
*** End Patch
|
||||
@@ -0,0 +1,2 @@
|
||||
first
|
||||
second updated
|
||||
@@ -0,0 +1,2 @@
|
||||
first
|
||||
second
|
||||
8
codex-rs/apply-patch/tests/fixtures/scenarios/022_update_file_end_of_file_marker/patch.txt
vendored
Normal file
8
codex-rs/apply-patch/tests/fixtures/scenarios/022_update_file_end_of_file_marker/patch.txt
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
*** Begin Patch
|
||||
*** Update File: tail.txt
|
||||
@@
|
||||
first
|
||||
-second
|
||||
+second updated
|
||||
*** End of File
|
||||
*** End Patch
|
||||
@@ -1,8 +1,13 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use assert_cmd::Command;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn apply_patch_command() -> anyhow::Result<Command> {
|
||||
Ok(Command::new(codex_utils_cargo_bin::cargo_bin(
|
||||
"apply_patch",
|
||||
)?))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
@@ -16,8 +21,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
apply_patch_command()?
|
||||
.arg(add_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -34,8 +38,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
apply_patch_command()?
|
||||
.arg(update_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -59,10 +62,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(add_patch)
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(add_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
@@ -77,10 +79,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(update_patch)
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(update_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
@@ -9,7 +8,8 @@ use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
|
||||
for scenario in fs::read_dir("tests/fixtures/scenarios")? {
|
||||
let scenarios_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/scenarios");
|
||||
for scenario in fs::read_dir(scenarios_dir)? {
|
||||
let scenario = scenario?;
|
||||
let path = scenario.path();
|
||||
if path.is_dir() {
|
||||
@@ -36,7 +36,7 @@ fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> {
|
||||
// Run apply_patch in the temporary directory. We intentionally do not assert
|
||||
// on the exit status here; the scenarios are specified purely in terms of
|
||||
// final filesystem state, which we compare below.
|
||||
Command::cargo_bin("apply_patch")?
|
||||
Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?)
|
||||
.arg(patch)
|
||||
.current_dir(tmp.path())
|
||||
.output()?;
|
||||
@@ -82,11 +82,15 @@ fn snapshot_dir_recursive(
|
||||
continue;
|
||||
};
|
||||
let rel = stripped.to_path_buf();
|
||||
let file_type = entry.file_type()?;
|
||||
if file_type.is_dir() {
|
||||
|
||||
// Under Buck2, files in `__srcs` are often materialized as symlinks.
|
||||
// Use `metadata()` (follows symlinks) so our fixture snapshots work
|
||||
// under both Cargo and Buck2.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
entries.insert(rel.clone(), Entry::Dir);
|
||||
snapshot_dir_recursive(base, &path, entries)?;
|
||||
} else if file_type.is_file() {
|
||||
} else if metadata.is_file() {
|
||||
let contents = fs::read(&path)?;
|
||||
entries.insert(rel, Entry::File(contents));
|
||||
}
|
||||
@@ -98,12 +102,14 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> {
|
||||
for entry in fs::read_dir(src)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
let file_type = entry.file_type()?;
|
||||
let dest_path = dst.join(entry.file_name());
|
||||
if file_type.is_dir() {
|
||||
|
||||
// See note in `snapshot_dir_recursive` about Buck2 symlink trees.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
fs::create_dir_all(&dest_path)?;
|
||||
copy_dir_recursive(&path, &dest_path)?;
|
||||
} else if file_type.is_file() {
|
||||
} else if metadata.is_file() {
|
||||
if let Some(parent) = dest_path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
@@ -5,13 +5,13 @@ use std::path::Path;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn run_apply_patch_in_dir(dir: &Path, patch: &str) -> anyhow::Result<assert_cmd::assert::Assert> {
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd.arg(patch).assert())
|
||||
}
|
||||
|
||||
fn apply_patch_command(dir: &Path) -> anyhow::Result<Command> {
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -480,7 +480,12 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
}
|
||||
Some(Subcommand::AppServer(app_server_cli)) => match app_server_cli.subcommand {
|
||||
None => {
|
||||
codex_app_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?;
|
||||
codex_app_server::run_main(
|
||||
codex_linux_sandbox_exe,
|
||||
root_config_overrides,
|
||||
codex_core::config_loader::LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Some(AppServerSubcommand::GenerateTs(gen_cli)) => {
|
||||
codex_app_server_protocol::generate_ts(
|
||||
|
||||
@@ -24,7 +24,7 @@ prefix_rule(
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let output = Command::cargo_bin("codex")?
|
||||
let output = Command::new(codex_utils_cargo_bin::cargo_bin("codex")?)
|
||||
.env("CODEX_HOME", codex_home.path())
|
||||
.args([
|
||||
"execpolicy",
|
||||
@@ -59,3 +59,61 @@ prefix_rule(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn execpolicy_check_includes_justification_when_present() -> Result<(), Box<dyn std::error::Error>>
|
||||
{
|
||||
let codex_home = TempDir::new()?;
|
||||
let policy_path = codex_home.path().join("rules").join("policy.rules");
|
||||
fs::create_dir_all(
|
||||
policy_path
|
||||
.parent()
|
||||
.expect("policy path should have a parent"),
|
||||
)?;
|
||||
fs::write(
|
||||
&policy_path,
|
||||
r#"
|
||||
prefix_rule(
|
||||
pattern = ["git", "push"],
|
||||
decision = "forbidden",
|
||||
justification = "pushing is blocked in this repo",
|
||||
)
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let output = Command::new(codex_utils_cargo_bin::cargo_bin("codex")?)
|
||||
.env("CODEX_HOME", codex_home.path())
|
||||
.args([
|
||||
"execpolicy",
|
||||
"check",
|
||||
"--rules",
|
||||
policy_path
|
||||
.to_str()
|
||||
.expect("policy path should be valid UTF-8"),
|
||||
"git",
|
||||
"push",
|
||||
"origin",
|
||||
"main",
|
||||
])
|
||||
.output()?;
|
||||
|
||||
assert!(output.status.success());
|
||||
let result: serde_json::Value = serde_json::from_slice(&output.stdout)?;
|
||||
assert_eq!(
|
||||
result,
|
||||
json!({
|
||||
"decision": "forbidden",
|
||||
"matchedRules": [
|
||||
{
|
||||
"prefixRuleMatch": {
|
||||
"matchedPrefix": ["git", "push"],
|
||||
"decision": "forbidden",
|
||||
"justification": "pushing is blocked in this repo"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ pub enum ResponseEvent {
|
||||
summary_index: i64,
|
||||
},
|
||||
RateLimits(RateLimitSnapshot),
|
||||
ModelsEtag(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
|
||||
@@ -152,6 +152,9 @@ impl Stream for AggregatedStream {
|
||||
Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
|
||||
@@ -5,6 +5,7 @@ use crate::provider::Provider;
|
||||
use crate::telemetry::run_with_request_telemetry;
|
||||
use codex_client::HttpTransport;
|
||||
use codex_client::RequestTelemetry;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -41,7 +42,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
&self,
|
||||
client_version: &str,
|
||||
extra_headers: HeaderMap,
|
||||
) -> Result<ModelsResponse, ApiError> {
|
||||
) -> Result<(Vec<ModelInfo>, Option<String>), ApiError> {
|
||||
let builder = || {
|
||||
let mut req = self.provider.build_request(Method::GET, self.path());
|
||||
req.headers.extend(extra_headers.clone());
|
||||
@@ -66,7 +67,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(ToString::to_string);
|
||||
|
||||
let ModelsResponse { models, etag } = serde_json::from_slice::<ModelsResponse>(&resp.body)
|
||||
let ModelsResponse { models } = serde_json::from_slice::<ModelsResponse>(&resp.body)
|
||||
.map_err(|e| {
|
||||
ApiError::Stream(format!(
|
||||
"failed to decode models response: {e}; body: {}",
|
||||
@@ -74,9 +75,7 @@ impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> {
|
||||
))
|
||||
})?;
|
||||
|
||||
let etag = header_etag.unwrap_or(etag);
|
||||
|
||||
Ok(ModelsResponse { models, etag })
|
||||
Ok((models, header_etag))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,16 +101,15 @@ mod tests {
|
||||
struct CapturingTransport {
|
||||
last_request: Arc<Mutex<Option<Request>>>,
|
||||
body: Arc<ModelsResponse>,
|
||||
etag: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for CapturingTransport {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: String::new(),
|
||||
}),
|
||||
body: Arc::new(ModelsResponse { models: Vec::new() }),
|
||||
etag: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,8 +120,8 @@ mod tests {
|
||||
*self.last_request.lock().unwrap() = Some(req);
|
||||
let body = serde_json::to_vec(&*self.body).unwrap();
|
||||
let mut headers = HeaderMap::new();
|
||||
if !self.body.etag.is_empty() {
|
||||
headers.insert(ETAG, self.body.etag.parse().unwrap());
|
||||
if let Some(etag) = &self.etag {
|
||||
headers.insert(ETAG, etag.parse().unwrap());
|
||||
}
|
||||
Ok(Response {
|
||||
status: StatusCode::OK,
|
||||
@@ -166,14 +164,12 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn appends_client_version_query() {
|
||||
let response = ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: String::new(),
|
||||
};
|
||||
let response = ModelsResponse { models: Vec::new() };
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -182,12 +178,12 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 0);
|
||||
assert_eq!(models.len(), 0);
|
||||
|
||||
let url = transport
|
||||
.last_request
|
||||
@@ -227,17 +223,16 @@ mod tests {
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.unwrap(),
|
||||
],
|
||||
etag: String::new(),
|
||||
};
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -246,27 +241,25 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 1);
|
||||
assert_eq!(result.models[0].slug, "gpt-test");
|
||||
assert_eq!(result.models[0].supported_in_api, true);
|
||||
assert_eq!(result.models[0].priority, 1);
|
||||
assert_eq!(models.len(), 1);
|
||||
assert_eq!(models[0].slug, "gpt-test");
|
||||
assert_eq!(models[0].supported_in_api, true);
|
||||
assert_eq!(models[0].priority, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_includes_etag() {
|
||||
let response = ModelsResponse {
|
||||
models: Vec::new(),
|
||||
etag: "\"abc\"".to_string(),
|
||||
};
|
||||
let response = ModelsResponse { models: Vec::new() };
|
||||
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: Some("\"abc\"".to_string()),
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -275,12 +268,12 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let result = client
|
||||
let (models, etag) = client
|
||||
.list_models("0.1.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 0);
|
||||
assert_eq!(result.etag, "\"abc\"");
|
||||
assert_eq!(models.len(), 0);
|
||||
assert_eq!(etag, Some("\"abc\"".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,24 +204,16 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
call_id,
|
||||
..
|
||||
} => {
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
}]
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
});
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::LocalShellCall {
|
||||
id,
|
||||
@@ -229,22 +221,14 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
status,
|
||||
action,
|
||||
} => {
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
}]
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
});
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
let content_value = if let Some(items) = &output.content_items {
|
||||
@@ -277,18 +261,16 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
input,
|
||||
status: _,
|
||||
} => {
|
||||
messages.push(json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
}]
|
||||
}));
|
||||
let tool_call = json!({
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
});
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { call_id, output } => {
|
||||
messages.push(json!({
|
||||
@@ -328,11 +310,50 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn push_tool_call_message(messages: &mut Vec<Value>, tool_call: Value, reasoning: Option<&str>) {
|
||||
// Chat Completions requires that tool calls are grouped into a single assistant message
|
||||
// (with `tool_calls: [...]`) followed by tool role responses.
|
||||
if let Some(Value::Object(obj)) = messages.last_mut()
|
||||
&& obj.get("role").and_then(Value::as_str) == Some("assistant")
|
||||
&& obj.get("content").is_some_and(Value::is_null)
|
||||
&& let Some(tool_calls) = obj.get_mut("tool_calls").and_then(Value::as_array_mut)
|
||||
{
|
||||
tool_calls.push(tool_call);
|
||||
if let Some(reasoning) = reasoning {
|
||||
if let Some(Value::String(existing)) = obj.get_mut("reasoning") {
|
||||
if !existing.is_empty() {
|
||||
existing.push('\n');
|
||||
}
|
||||
existing.push_str(reasoning);
|
||||
} else {
|
||||
obj.insert(
|
||||
"reasoning".to_string(),
|
||||
Value::String(reasoning.to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [tool_call],
|
||||
});
|
||||
if let Some(reasoning) = reasoning
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::provider::RetryConfig;
|
||||
use crate::provider::WireApi;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use http::HeaderValue;
|
||||
@@ -385,4 +406,89 @@ mod tests {
|
||||
Some(&HeaderValue::from_static("review"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn groups_consecutive_tool_calls_into_a_single_assistant_message() {
|
||||
let prompt_input = vec![
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "read these".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"a.txt"}"#.to_string(),
|
||||
call_id: "call-a".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"b.txt"}"#.to_string(),
|
||||
call_id: "call-b".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"c.txt"}"#.to_string(),
|
||||
call_id: "call-c".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-a".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "A".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-b".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "B".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-c".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "C".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
let req = ChatRequestBuilder::new("gpt-test", "inst", &prompt_input, &[])
|
||||
.build(&provider())
|
||||
.expect("request");
|
||||
|
||||
let messages = req
|
||||
.body
|
||||
.get("messages")
|
||||
.and_then(|v| v.as_array())
|
||||
.expect("messages array");
|
||||
// system + user + assistant(tool_calls=[...]) + 3 tool outputs
|
||||
assert_eq!(messages.len(), 6);
|
||||
|
||||
assert_eq!(messages[0]["role"], "system");
|
||||
assert_eq!(messages[1]["role"], "user");
|
||||
|
||||
let tool_calls_msg = &messages[2];
|
||||
assert_eq!(tool_calls_msg["role"], "assistant");
|
||||
assert_eq!(tool_calls_msg["content"], serde_json::Value::Null);
|
||||
let tool_calls = tool_calls_msg["tool_calls"]
|
||||
.as_array()
|
||||
.expect("tool_calls array");
|
||||
assert_eq!(tool_calls.len(), 3);
|
||||
assert_eq!(tool_calls[0]["id"], "call-a");
|
||||
assert_eq!(tool_calls[1]["id"], "call-b");
|
||||
assert_eq!(tool_calls[2]["id"], "call-c");
|
||||
|
||||
assert_eq!(messages[3]["role"], "tool");
|
||||
assert_eq!(messages[3]["tool_call_id"], "call-a");
|
||||
assert_eq!(messages[4]["role"], "tool");
|
||||
assert_eq!(messages[4]["tool_call_id"], "call-b");
|
||||
assert_eq!(messages[5]["role"], "tool");
|
||||
assert_eq!(messages[5]["tool_call_id"], "call-c");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,21 @@ pub(crate) fn spawn_chat_stream(
|
||||
ResponseStream { rx_event }
|
||||
}
|
||||
|
||||
/// Processes Server-Sent Events from the legacy Chat Completions streaming API.
|
||||
///
|
||||
/// The upstream protocol terminates a streaming response with a final sentinel event
|
||||
/// (`data: [DONE]`). Historically, some of our test stubs have emitted `data: DONE`
|
||||
/// (without brackets) instead.
|
||||
///
|
||||
/// `eventsource_stream` delivers these sentinels as regular events rather than signaling
|
||||
/// end-of-stream. If we try to parse them as JSON, we log and skip them, then keep
|
||||
/// polling for more events.
|
||||
///
|
||||
/// On servers that keep the HTTP connection open after emitting the sentinel (notably
|
||||
/// wiremock on Windows), skipping the sentinel means we never emit `ResponseEvent::Completed`.
|
||||
/// Higher-level workflows/tests that wait for completion before issuing subsequent model
|
||||
/// calls will then stall, which shows up as "expected N requests, got 1" verification
|
||||
/// failures in the mock server.
|
||||
pub async fn process_chat_sse<S>(
|
||||
stream: S,
|
||||
tx_event: mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
@@ -57,6 +72,31 @@ pub async fn process_chat_sse<S>(
|
||||
let mut reasoning_item: Option<ResponseItem> = None;
|
||||
let mut completed_sent = false;
|
||||
|
||||
async fn flush_and_complete(
|
||||
tx_event: &mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
reasoning_item: &mut Option<ResponseItem>,
|
||||
assistant_item: &mut Option<ResponseItem>,
|
||||
) {
|
||||
if let Some(reasoning) = reasoning_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
|
||||
loop {
|
||||
let start = Instant::now();
|
||||
let response = timeout(idle_timeout, stream.next()).await;
|
||||
@@ -70,24 +110,8 @@ pub async fn process_chat_sse<S>(
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
if let Some(reasoning) = reasoning_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
if !completed_sent {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -101,16 +125,25 @@ pub async fn process_chat_sse<S>(
|
||||
|
||||
trace!("SSE event: {}", sse.data);
|
||||
|
||||
if sse.data.trim().is_empty() {
|
||||
let data = sse.data.trim();
|
||||
|
||||
if data.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let value: serde_json::Value = match serde_json::from_str(&sse.data) {
|
||||
if data == "[DONE]" || data == "DONE" {
|
||||
if !completed_sent {
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let value: serde_json::Value = match serde_json::from_str(data) {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
debug!(
|
||||
"Failed to parse ChatCompletions SSE event: {err}, data: {}",
|
||||
&sse.data
|
||||
data
|
||||
);
|
||||
continue;
|
||||
}
|
||||
@@ -362,6 +395,16 @@ mod tests {
|
||||
body
|
||||
}
|
||||
|
||||
/// Regression test: the stream should complete when we see a `[DONE]` sentinel.
|
||||
///
|
||||
/// This is important for tests/mocks that don't immediately close the underlying
|
||||
/// connection after emitting the sentinel.
|
||||
#[tokio::test]
|
||||
async fn completes_on_done_sentinel_without_json() {
|
||||
let events = collect_events("event: message\ndata: [DONE]\n\n").await;
|
||||
assert_matches!(&events[..], [ResponseEvent::Completed { .. }]);
|
||||
}
|
||||
|
||||
async fn collect_events(body: &str) -> Vec<ResponseEvent> {
|
||||
let reader = ReaderStream::new(std::io::Cursor::new(body.to_string()))
|
||||
.map_err(|err| codex_client::TransportError::Network(err.to_string()));
|
||||
|
||||
@@ -51,11 +51,19 @@ pub fn spawn_response_stream(
|
||||
telemetry: Option<Arc<dyn SseTelemetry>>,
|
||||
) -> ResponseStream {
|
||||
let rate_limits = parse_rate_limit(&stream_response.headers);
|
||||
let models_etag = stream_response
|
||||
.headers
|
||||
.get("X-Models-Etag")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(ToString::to_string);
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600);
|
||||
tokio::spawn(async move {
|
||||
if let Some(snapshot) = rate_limits {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::RateLimits(snapshot))).await;
|
||||
}
|
||||
if let Some(etag) = models_etag {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await;
|
||||
}
|
||||
process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await;
|
||||
});
|
||||
|
||||
|
||||
@@ -4,14 +4,12 @@ use codex_api::provider::Provider;
|
||||
use codex_api::provider::RetryConfig;
|
||||
use codex_api::provider::WireApi;
|
||||
use codex_client::ReqwestTransport;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -75,7 +73,6 @@ async fn models_client_hits_models_endpoint() {
|
||||
],
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 1,
|
||||
upgrade: None,
|
||||
@@ -87,10 +84,8 @@ async fn models_client_hits_models_endpoint() {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}],
|
||||
etag: String::new(),
|
||||
};
|
||||
|
||||
Mock::given(method("GET"))
|
||||
@@ -106,13 +101,13 @@ async fn models_client_hits_models_endpoint() {
|
||||
let transport = ReqwestTransport::new(reqwest::Client::new());
|
||||
let client = ModelsClient::new(transport, provider(&base_url), DummyAuth);
|
||||
|
||||
let result = client
|
||||
let (models, _) = client
|
||||
.list_models("0.1.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("models request should succeed");
|
||||
|
||||
assert_eq!(result.models.len(), 1);
|
||||
assert_eq!(result.models[0].slug, "gpt-test");
|
||||
assert_eq!(models.len(), 1);
|
||||
assert_eq!(models[0].slug, "gpt-test");
|
||||
|
||||
let received = server
|
||||
.received_requests()
|
||||
|
||||
@@ -69,6 +69,15 @@ impl ReqwestTransport {
|
||||
#[async_trait]
|
||||
impl HttpTransport for ReqwestTransport {
|
||||
async fn execute(&self, req: Request) -> Result<Response, TransportError> {
|
||||
if enabled!(Level::TRACE) {
|
||||
trace!(
|
||||
"{} to {}: {}",
|
||||
req.method,
|
||||
req.url,
|
||||
req.body.as_ref().unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
let builder = self.build(req)?;
|
||||
let resp = builder.send().await.map_err(Self::map_error)?;
|
||||
let status = resp.status();
|
||||
|
||||
@@ -16,6 +16,7 @@ workspace = true
|
||||
anyhow = { workspace = true }
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
arc-swap = "1.7.1"
|
||||
base64 = { workspace = true }
|
||||
chardetng = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
@@ -122,6 +123,7 @@ assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-core = { path = ".", features = ["deterministic_process_ids"] }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
escargot = { workspace = true }
|
||||
|
||||
@@ -36,11 +36,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "list",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
62,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.62.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 1,
|
||||
@@ -79,11 +75,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 2,
|
||||
@@ -118,11 +110,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "list",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 3,
|
||||
@@ -165,11 +153,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "list",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 4,
|
||||
@@ -208,11 +192,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 5,
|
||||
@@ -251,11 +231,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 6,
|
||||
@@ -298,11 +274,7 @@
|
||||
],
|
||||
"shell_type": "default",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 7,
|
||||
@@ -337,11 +309,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": "gpt-5.2-codex",
|
||||
"priority": 8,
|
||||
@@ -380,11 +348,7 @@
|
||||
],
|
||||
"shell_type": "local",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": null,
|
||||
"priority": 9,
|
||||
@@ -427,11 +391,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": null,
|
||||
"priority": 10,
|
||||
@@ -474,11 +434,7 @@
|
||||
],
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "hide",
|
||||
"minimal_client_version": [
|
||||
0,
|
||||
60,
|
||||
0
|
||||
],
|
||||
"minimal_client_version": "0.60.0",
|
||||
"supported_in_api": true,
|
||||
"upgrade": null,
|
||||
"priority": 11,
|
||||
|
||||
@@ -46,6 +46,7 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
"string_content",
|
||||
"raw_string",
|
||||
"number",
|
||||
"concatenation",
|
||||
];
|
||||
// Allow only safe punctuation / operator tokens; anything else causes reject.
|
||||
const ALLOWED_PUNCT_TOKENS: &[&str] = &["&&", "||", ";", "|", "\"", "'"];
|
||||
@@ -158,6 +159,48 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"concatenation" => {
|
||||
// Handle concatenated arguments like -g"*.py"
|
||||
let mut concatenated = String::new();
|
||||
let mut concat_cursor = child.walk();
|
||||
for part in child.named_children(&mut concat_cursor) {
|
||||
match part.kind() {
|
||||
"word" | "number" => {
|
||||
concatenated
|
||||
.push_str(part.utf8_text(src.as_bytes()).ok()?.to_owned().as_str());
|
||||
}
|
||||
"string" => {
|
||||
if part.child_count() == 3
|
||||
&& part.child(0)?.kind() == "\""
|
||||
&& part.child(1)?.kind() == "string_content"
|
||||
&& part.child(2)?.kind() == "\""
|
||||
{
|
||||
concatenated.push_str(
|
||||
part.child(1)?
|
||||
.utf8_text(src.as_bytes())
|
||||
.ok()?
|
||||
.to_owned()
|
||||
.as_str(),
|
||||
);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"raw_string" => {
|
||||
let raw_string = part.utf8_text(src.as_bytes()).ok()?;
|
||||
let stripped = raw_string
|
||||
.strip_prefix('\'')
|
||||
.and_then(|s| s.strip_suffix('\''))?;
|
||||
concatenated.push_str(stripped);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
if concatenated.is_empty() {
|
||||
return None;
|
||||
}
|
||||
words.push(concatenated);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
@@ -256,4 +299,47 @@ mod tests {
|
||||
let parsed = parse_shell_lc_plain_commands(&command).unwrap();
|
||||
assert_eq!(parsed, vec![vec!["ls".to_string()]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_and_value() {
|
||||
// Test case: -g"*.py" (flag directly concatenated with quoted value)
|
||||
let cmds = parse_seq("rg -n \"foo\" -g\"*.py\"").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"rg".to_string(),
|
||||
"-n".to_string(),
|
||||
"foo".to_string(),
|
||||
"-g*.py".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_with_single_quotes() {
|
||||
let cmds = parse_seq("grep -n 'pattern' -g'*.txt'").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"grep".to_string(),
|
||||
"-n".to_string(),
|
||||
"pattern".to_string(),
|
||||
"-g*.txt".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_variable_substitution() {
|
||||
// Environment variables in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$VAR\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"${VAR}\" pattern").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_command_substitution() {
|
||||
// Command substitution in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$(pwd)\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"$(echo '*.py')\" pattern").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::compact;
|
||||
use crate::compact::run_inline_auto_compact_task;
|
||||
use crate::compact::should_use_remote_compact_task;
|
||||
use crate::compact_remote::run_inline_remote_auto_compact_task;
|
||||
use crate::exec_policy::load_exec_policy_for_features;
|
||||
use crate::exec_policy::ExecPolicyManager;
|
||||
use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
@@ -88,6 +88,7 @@ use crate::error::Result as CodexResult;
|
||||
#[cfg(test)]
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::exec_policy::ExecPolicyUpdateError;
|
||||
use crate::feedback_tags;
|
||||
use crate::mcp::auth::compute_auth_statuses;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::model_provider_info::CHAT_WIRE_API_DEPRECATION_SUMMARY;
|
||||
@@ -148,7 +149,6 @@ use crate::user_instructions::UserInstructions;
|
||||
use crate::user_notification::UserNotification;
|
||||
use crate::util::backoff;
|
||||
use codex_async_utils::OrCancelExt;
|
||||
use codex_execpolicy::Policy as ExecPolicy;
|
||||
use codex_otel::otel_manager::OtelManager;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
@@ -218,10 +218,11 @@ impl Codex {
|
||||
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
let (tx_event, rx_event) = async_channel::unbounded();
|
||||
|
||||
let loaded_skills = config
|
||||
.features
|
||||
.enabled(Feature::Skills)
|
||||
.then(|| skills_manager.skills_for_cwd(&config.cwd));
|
||||
let loaded_skills = if config.features.enabled(Feature::Skills) {
|
||||
Some(skills_manager.skills_for_config(&config))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(outcome) = &loaded_skills {
|
||||
for err in &outcome.errors {
|
||||
@@ -241,14 +242,15 @@ impl Codex {
|
||||
)
|
||||
.await;
|
||||
|
||||
let exec_policy = load_exec_policy_for_features(&config.features, &config.codex_home)
|
||||
let exec_policy = ExecPolicyManager::load(&config.features, &config.config_layer_stack)
|
||||
.await
|
||||
.map_err(|err| CodexErr::Fatal(format!("failed to load execpolicy: {err}")))?;
|
||||
let exec_policy = Arc::new(RwLock::new(exec_policy));
|
||||
|
||||
let config = Arc::new(config);
|
||||
if config.features.enabled(Feature::RemoteModels)
|
||||
&& let Err(err) = models_manager.refresh_available_models(&config).await
|
||||
&& let Err(err) = models_manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
{
|
||||
error!("failed to refresh available models: {err:?}");
|
||||
}
|
||||
@@ -266,7 +268,6 @@ impl Codex {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy,
|
||||
session_source,
|
||||
};
|
||||
|
||||
@@ -278,6 +279,7 @@ impl Codex {
|
||||
config.clone(),
|
||||
auth_manager.clone(),
|
||||
models_manager.clone(),
|
||||
exec_policy,
|
||||
tx_event.clone(),
|
||||
conversation_history,
|
||||
session_source_clone,
|
||||
@@ -371,7 +373,6 @@ pub(crate) struct TurnContext {
|
||||
pub(crate) final_output_json_schema: Option<Value>,
|
||||
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub(crate) tool_call_gate: Arc<ReadinessFlag>,
|
||||
pub(crate) exec_policy: Arc<RwLock<ExecPolicy>>,
|
||||
pub(crate) truncation_policy: TruncationPolicy,
|
||||
}
|
||||
|
||||
@@ -426,9 +427,6 @@ pub(crate) struct SessionConfiguration {
|
||||
/// operate deterministically.
|
||||
cwd: PathBuf,
|
||||
|
||||
/// Execpolicy policy, applied only when enabled by feature flag.
|
||||
exec_policy: Arc<RwLock<ExecPolicy>>,
|
||||
|
||||
// TODO(pakrym): Remove config from here
|
||||
original_config_do_not_use: Arc<Config>,
|
||||
/// Source of the session (cli, vscode, exec, mcp, ...)
|
||||
@@ -533,7 +531,6 @@ impl Session {
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
exec_policy: session_configuration.exec_policy.clone(),
|
||||
truncation_policy: TruncationPolicy::new(
|
||||
per_turn_config.as_ref(),
|
||||
model_family.truncation_policy,
|
||||
@@ -547,6 +544,7 @@ impl Session {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
exec_policy: ExecPolicyManager,
|
||||
tx_event: Sender<Event>,
|
||||
initial_history: InitialHistory,
|
||||
session_source: SessionSource,
|
||||
@@ -666,6 +664,7 @@ impl Session {
|
||||
rollout: Mutex::new(Some(rollout_recorder)),
|
||||
user_shell: Arc::new(default_shell),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_manager,
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
@@ -813,6 +812,13 @@ impl Session {
|
||||
.await;
|
||||
}
|
||||
|
||||
// Seed usage info from the recorded rollout so UIs can show token counts
|
||||
// immediately on resume/fork.
|
||||
if let Some(info) = Self::last_token_info_from_rollout(&rollout_items) {
|
||||
let mut state = self.state.lock().await;
|
||||
state.set_token_info(Some(info));
|
||||
}
|
||||
|
||||
// If persisting, persist all rollout items as-is (recorder filters)
|
||||
if persist && !rollout_items.is_empty() {
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
@@ -823,6 +829,13 @@ impl Session {
|
||||
}
|
||||
}
|
||||
|
||||
fn last_token_info_from_rollout(rollout_items: &[RolloutItem]) -> Option<TokenUsageInfo> {
|
||||
rollout_items.iter().rev().find_map(|item| match item {
|
||||
RolloutItem::EventMsg(EventMsg::TokenCount(ev)) => ev.info.clone(),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn update_settings(
|
||||
&self,
|
||||
updates: SessionSettingsUpdate,
|
||||
@@ -1025,29 +1038,24 @@ impl Session {
|
||||
amendment: &ExecPolicyAmendment,
|
||||
) -> Result<(), ExecPolicyUpdateError> {
|
||||
let features = self.features.clone();
|
||||
let (codex_home, current_policy) = {
|
||||
let state = self.state.lock().await;
|
||||
(
|
||||
state
|
||||
.session_configuration
|
||||
.original_config_do_not_use
|
||||
.codex_home
|
||||
.clone(),
|
||||
state.session_configuration.exec_policy.clone(),
|
||||
)
|
||||
};
|
||||
let codex_home = self
|
||||
.state
|
||||
.lock()
|
||||
.await
|
||||
.session_configuration
|
||||
.original_config_do_not_use
|
||||
.codex_home
|
||||
.clone();
|
||||
|
||||
if !features.enabled(Feature::ExecPolicy) {
|
||||
error!("attempted to append execpolicy rule while execpolicy feature is disabled");
|
||||
return Err(ExecPolicyUpdateError::FeatureDisabled);
|
||||
}
|
||||
|
||||
crate::exec_policy::append_execpolicy_amendment_and_update(
|
||||
&codex_home,
|
||||
¤t_policy,
|
||||
&amendment.command,
|
||||
)
|
||||
.await?;
|
||||
self.services
|
||||
.exec_policy
|
||||
.append_amendment_and_update(&codex_home, amendment)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1432,12 +1440,14 @@ impl Session {
|
||||
message: impl Into<String>,
|
||||
codex_error: CodexErr,
|
||||
) {
|
||||
let additional_details = codex_error.to_string();
|
||||
let codex_error_info = CodexErrorInfo::ResponseStreamDisconnected {
|
||||
http_status_code: codex_error.http_status_code_value(),
|
||||
};
|
||||
let event = EventMsg::StreamError(StreamErrorEvent {
|
||||
message: message.into(),
|
||||
codex_error_info: Some(codex_error_info),
|
||||
additional_details: Some(additional_details),
|
||||
});
|
||||
self.send_event(turn_context, event).await;
|
||||
}
|
||||
@@ -1767,7 +1777,16 @@ mod handlers {
|
||||
final_output_json_schema: Some(final_output_json_schema),
|
||||
},
|
||||
),
|
||||
Op::UserInput { items } => (items, SessionSettingsUpdate::default()),
|
||||
Op::UserInput {
|
||||
items,
|
||||
final_output_json_schema,
|
||||
} => (
|
||||
items,
|
||||
SessionSettingsUpdate {
|
||||
final_output_json_schema: Some(final_output_json_schema),
|
||||
..Default::default()
|
||||
},
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@@ -1969,18 +1988,18 @@ mod handlers {
|
||||
};
|
||||
let skills = if sess.enabled(Feature::Skills) {
|
||||
let skills_manager = &sess.services.skills_manager;
|
||||
cwds.into_iter()
|
||||
.map(|cwd| {
|
||||
let outcome = skills_manager.skills_for_cwd_with_options(&cwd, force_reload);
|
||||
let errors = super::errors_to_info(&outcome.errors);
|
||||
let skills = super::skills_to_info(&outcome.skills);
|
||||
SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
let mut entries = Vec::new();
|
||||
for cwd in cwds {
|
||||
let outcome = skills_manager.skills_for_cwd(&cwd, force_reload).await;
|
||||
let errors = super::errors_to_info(&outcome.errors);
|
||||
let skills = super::skills_to_info(&outcome.skills);
|
||||
entries.push(SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
});
|
||||
}
|
||||
entries
|
||||
} else {
|
||||
cwds.into_iter()
|
||||
.map(|cwd| SkillsListEntry {
|
||||
@@ -2059,7 +2078,7 @@ mod handlers {
|
||||
review_request: ReviewRequest,
|
||||
) {
|
||||
let turn_context = sess.new_default_turn_with_sub_id(sub_id.clone()).await;
|
||||
match resolve_review_request(review_request, config.cwd.as_path()) {
|
||||
match resolve_review_request(review_request, turn_context.cwd.as_path()) {
|
||||
Ok(resolved) => {
|
||||
spawn_review_thread(
|
||||
Arc::clone(sess),
|
||||
@@ -2154,7 +2173,6 @@ async fn spawn_review_thread(
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
exec_policy: parent_turn_context.exec_policy.clone(),
|
||||
truncation_policy: TruncationPolicy::new(&per_turn_config, model_family.truncation_policy),
|
||||
};
|
||||
|
||||
@@ -2235,11 +2253,16 @@ pub(crate) async fn run_task(
|
||||
});
|
||||
sess.send_event(&turn_context, event).await;
|
||||
|
||||
let skills_outcome = sess.enabled(Feature::Skills).then(|| {
|
||||
sess.services
|
||||
.skills_manager
|
||||
.skills_for_cwd(&turn_context.cwd)
|
||||
});
|
||||
let skills_outcome = if sess.enabled(Feature::Skills) {
|
||||
Some(
|
||||
sess.services
|
||||
.skills_manager
|
||||
.skills_for_cwd(&turn_context.cwd, false)
|
||||
.await,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let SkillInjections {
|
||||
items: skill_items,
|
||||
@@ -2525,8 +2548,22 @@ async fn try_run_turn(
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
developer_instructions: turn_context.developer_instructions.clone(),
|
||||
final_output_json_schema: turn_context.final_output_json_schema.clone(),
|
||||
truncation_policy: Some(turn_context.truncation_policy.into()),
|
||||
});
|
||||
|
||||
feedback_tags!(
|
||||
model = turn_context.client.get_model(),
|
||||
approval_policy = turn_context.approval_policy,
|
||||
sandbox_policy = turn_context.sandbox_policy,
|
||||
effort = turn_context.client.get_reasoning_effort(),
|
||||
auth_mode = sess.services.auth_manager.get_auth_mode(),
|
||||
features = sess.features.enabled_features(),
|
||||
);
|
||||
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
let mut stream = turn_context
|
||||
.client
|
||||
@@ -2617,6 +2654,13 @@ async fn try_run_turn(
|
||||
// token usage is available to avoid duplicate TokenCount events.
|
||||
sess.update_rate_limits(&turn_context, snapshot).await;
|
||||
}
|
||||
ResponseEvent::ModelsEtag(etag) => {
|
||||
// Update internal state with latest models etag
|
||||
sess.services
|
||||
.models_manager
|
||||
.refresh_if_new_etag(etag, sess.features.enabled(Feature::RemoteModels))
|
||||
.await;
|
||||
}
|
||||
ResponseEvent::Completed {
|
||||
response_id: _,
|
||||
token_usage,
|
||||
@@ -2748,6 +2792,7 @@ mod tests {
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::shell::default_user_shell;
|
||||
use crate::tools::format_exec_output_str;
|
||||
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
use crate::protocol::CompactedItem;
|
||||
@@ -2756,6 +2801,9 @@ mod tests {
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::RateLimitWindow;
|
||||
use crate::protocol::ResumedHistory;
|
||||
use crate::protocol::TokenCountEvent;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::protocol::TokenUsageInfo;
|
||||
use crate::state::TaskKind;
|
||||
use crate::tasks::SessionTask;
|
||||
use crate::tasks::SessionTaskContext;
|
||||
@@ -2810,6 +2858,83 @@ mod tests {
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_seeds_token_info_from_rollout() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (mut rollout_items, _expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
let info1 = TokenUsageInfo {
|
||||
total_token_usage: TokenUsage {
|
||||
input_tokens: 10,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 20,
|
||||
reasoning_output_tokens: 0,
|
||||
total_tokens: 30,
|
||||
},
|
||||
last_token_usage: TokenUsage {
|
||||
input_tokens: 3,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 4,
|
||||
reasoning_output_tokens: 0,
|
||||
total_tokens: 7,
|
||||
},
|
||||
model_context_window: Some(1_000),
|
||||
};
|
||||
let info2 = TokenUsageInfo {
|
||||
total_token_usage: TokenUsage {
|
||||
input_tokens: 100,
|
||||
cached_input_tokens: 50,
|
||||
output_tokens: 200,
|
||||
reasoning_output_tokens: 25,
|
||||
total_tokens: 375,
|
||||
},
|
||||
last_token_usage: TokenUsage {
|
||||
input_tokens: 10,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 20,
|
||||
reasoning_output_tokens: 5,
|
||||
total_tokens: 35,
|
||||
},
|
||||
model_context_window: Some(2_000),
|
||||
};
|
||||
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: Some(info1),
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: None,
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: Some(info2.clone()),
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: None,
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
|
||||
session
|
||||
.record_initial_history(InitialHistory::Resumed(ResumedHistory {
|
||||
conversation_id: ConversationId::default(),
|
||||
history: rollout_items,
|
||||
rollout_path: PathBuf::from("/tmp/resume.jsonl"),
|
||||
}))
|
||||
.await;
|
||||
|
||||
let actual = session.state.lock().await.token_info();
|
||||
assert_eq!(actual, Some(info2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_reconstructs_forked_transcript() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
@@ -2842,7 +2967,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
|
||||
@@ -2909,7 +3033,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
|
||||
@@ -3102,6 +3225,7 @@ mod tests {
|
||||
let auth_manager =
|
||||
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let models_manager = Arc::new(ModelsManager::new(auth_manager.clone()));
|
||||
let exec_policy = ExecPolicyManager::default();
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
provider: config.model_provider.clone(),
|
||||
@@ -3116,7 +3240,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
let per_turn_config = Session::build_per_turn_config(&session_configuration);
|
||||
@@ -3142,9 +3265,10 @@ mod tests {
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: Arc::new(default_user_shell()),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: auth_manager.clone(),
|
||||
otel_manager: otel_manager.clone(),
|
||||
models_manager,
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
skills_manager,
|
||||
};
|
||||
@@ -3188,6 +3312,7 @@ mod tests {
|
||||
let auth_manager =
|
||||
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let models_manager = Arc::new(ModelsManager::new(auth_manager.clone()));
|
||||
let exec_policy = ExecPolicyManager::default();
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
provider: config.model_provider.clone(),
|
||||
@@ -3202,7 +3327,6 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
let per_turn_config = Session::build_per_turn_config(&session_configuration);
|
||||
@@ -3228,9 +3352,10 @@ mod tests {
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: Arc::new(default_user_shell()),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_manager: otel_manager.clone(),
|
||||
models_manager,
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
skills_manager,
|
||||
};
|
||||
|
||||
@@ -118,7 +118,11 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
.await?;
|
||||
|
||||
// Send the initial input to kick off the one-shot turn.
|
||||
io.submit(Op::UserInput { items: input }).await?;
|
||||
io.submit(Op::UserInput {
|
||||
items: input,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Bridge events so we can observe completion and shut down automatically.
|
||||
let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
@@ -184,6 +188,10 @@ async fn forward_events(
|
||||
id: _,
|
||||
msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::TokenCount(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::SessionConfigured(_),
|
||||
|
||||
@@ -86,6 +86,11 @@ async fn run_compact_task_inner(
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
developer_instructions: turn_context.developer_instructions.clone(),
|
||||
final_output_json_schema: turn_context.final_output_json_schema.clone(),
|
||||
truncation_policy: Some(turn_context.truncation_policy.into()),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::config::types::ShellEnvironmentPolicyToml;
|
||||
use crate::config::types::Tui;
|
||||
use crate::config::types::UriBasedFileOpener;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::LoaderOverrides;
|
||||
use crate::config_loader::load_config_layers_state;
|
||||
@@ -37,12 +38,12 @@ use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_rmcp_client::OAuthCredentialsStoreMode;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use dirs::home_dir;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use similar::DiffableStr;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
@@ -93,6 +94,10 @@ pub(crate) fn test_config() -> Config {
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
/// Provenance for how this [`Config`] was derived (merged layers + enforced
|
||||
/// requirements).
|
||||
pub config_layer_stack: ConfigLayerStack,
|
||||
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
|
||||
@@ -297,9 +302,6 @@ pub struct Config {
|
||||
/// Optional override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Optional override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
@@ -351,6 +353,10 @@ pub struct Config {
|
||||
/// or placeholder replacement will occur for fast keypress bursts.
|
||||
pub disable_paste_burst: bool,
|
||||
|
||||
/// When `false`, disables analytics across Codex product surfaces in this machine.
|
||||
/// Defaults to `true`.
|
||||
pub analytics: bool,
|
||||
|
||||
/// OTEL configuration (exporter type, endpoint, headers, etc.).
|
||||
pub otel: crate::config::types::OtelConfig,
|
||||
}
|
||||
@@ -411,11 +417,11 @@ impl ConfigBuilder {
|
||||
let config_toml: ConfigToml = merged_toml
|
||||
.try_into()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
|
||||
Config::load_config_with_requirements(
|
||||
Config::load_config_with_layer_stack(
|
||||
config_toml,
|
||||
harness_overrides,
|
||||
codex_home,
|
||||
config_layer_stack.requirements().clone(),
|
||||
config_layer_stack,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -664,7 +670,7 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R
|
||||
}
|
||||
|
||||
/// Base config deserialized from ~/.codex/config.toml.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigToml {
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
@@ -780,9 +786,6 @@ pub struct ConfigToml {
|
||||
/// Override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
|
||||
@@ -799,6 +802,11 @@ pub struct ConfigToml {
|
||||
#[serde(default)]
|
||||
pub ghost_snapshot: Option<GhostSnapshotToml>,
|
||||
|
||||
/// Markers used to detect the project root when searching parent
|
||||
/// directories for `.codex` folders. Defaults to [".git"] when unset.
|
||||
#[serde(default)]
|
||||
pub project_root_markers: Option<Vec<String>>,
|
||||
|
||||
/// When `true`, checks for Codex updates on startup and surfaces update prompts.
|
||||
/// Set to `false` only if your Codex updates are centrally managed.
|
||||
/// Defaults to `true`.
|
||||
@@ -809,6 +817,10 @@ pub struct ConfigToml {
|
||||
/// or placeholder replacement will occur for fast keypress bursts.
|
||||
pub disable_paste_burst: Option<bool>,
|
||||
|
||||
/// When `false`, disables analytics across Codex product surfaces in this machine.
|
||||
/// Defaults to `true`.
|
||||
pub analytics: Option<crate::config::types::AnalyticsConfigToml>,
|
||||
|
||||
/// OTEL configuration.
|
||||
pub otel: Option<crate::config::types::OtelConfigToml>,
|
||||
|
||||
@@ -853,7 +865,7 @@ impl From<ConfigToml> for UserSavedConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ProjectConfig {
|
||||
pub trust_level: Option<TrustLevel>,
|
||||
}
|
||||
@@ -868,7 +880,7 @@ impl ProjectConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ToolsToml {
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
@@ -887,7 +899,7 @@ impl From<ToolsToml> for Tools {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct GhostSnapshotToml {
|
||||
/// Exclude untracked files larger than this many bytes from ghost snapshots.
|
||||
#[serde(alias = "ignore_untracked_files_over_bytes")]
|
||||
@@ -1062,16 +1074,17 @@ impl Config {
|
||||
codex_home: PathBuf,
|
||||
) -> std::io::Result<Self> {
|
||||
// Note this ignores requirements.toml enforcement for tests.
|
||||
let requirements = ConfigRequirements::default();
|
||||
Self::load_config_with_requirements(cfg, overrides, codex_home, requirements)
|
||||
let config_layer_stack = ConfigLayerStack::default();
|
||||
Self::load_config_with_layer_stack(cfg, overrides, codex_home, config_layer_stack)
|
||||
}
|
||||
|
||||
fn load_config_with_requirements(
|
||||
fn load_config_with_layer_stack(
|
||||
cfg: ConfigToml,
|
||||
overrides: ConfigOverrides,
|
||||
codex_home: PathBuf,
|
||||
requirements: ConfigRequirements,
|
||||
config_layer_stack: ConfigLayerStack,
|
||||
) -> std::io::Result<Self> {
|
||||
let requirements = config_layer_stack.requirements().clone();
|
||||
let user_instructions = Self::load_instructions(Some(&codex_home));
|
||||
|
||||
// Destructure ConfigOverrides fully to ensure all overrides are applied.
|
||||
@@ -1349,6 +1362,7 @@ impl Config {
|
||||
.collect(),
|
||||
tool_output_token_limit: cfg.tool_output_token_limit,
|
||||
codex_home,
|
||||
config_layer_stack,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
codex_linux_sandbox_exe,
|
||||
@@ -1366,7 +1380,6 @@ impl Config {
|
||||
.or(cfg.model_reasoning_summary)
|
||||
.unwrap_or_default(),
|
||||
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
|
||||
model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(),
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
@@ -1385,6 +1398,12 @@ impl Config {
|
||||
notices: cfg.notice.unwrap_or_default(),
|
||||
check_for_update_on_startup,
|
||||
disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false),
|
||||
analytics: config_profile
|
||||
.analytics
|
||||
.as_ref()
|
||||
.and_then(|a| a.enabled)
|
||||
.or(cfg.analytics.as_ref().and_then(|a| a.enabled))
|
||||
.unwrap_or(true),
|
||||
tui_notifications: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
@@ -2045,6 +2064,7 @@ trust_level = "trusted"
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
@@ -2165,6 +2185,7 @@ trust_level = "trusted"
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
@@ -3032,6 +3053,9 @@ approval_policy = "untrusted"
|
||||
# `ConfigOverrides`.
|
||||
profile = "gpt3"
|
||||
|
||||
[analytics]
|
||||
enabled = true
|
||||
|
||||
[model_providers.openai-chat-completions]
|
||||
name = "OpenAI using Chat Completions"
|
||||
base_url = "https://api.openai.com/v1"
|
||||
@@ -3057,6 +3081,9 @@ model = "o3"
|
||||
model_provider = "openai"
|
||||
approval_policy = "on-failure"
|
||||
|
||||
[profiles.zdr.analytics]
|
||||
enabled = false
|
||||
|
||||
[profiles.gpt5]
|
||||
model = "gpt-5.1"
|
||||
model_provider = "openai"
|
||||
@@ -3167,6 +3194,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3175,7 +3203,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3197,6 +3224,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
@@ -3250,6 +3278,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3258,7 +3287,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3280,6 +3308,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
@@ -3348,6 +3377,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3356,7 +3386,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3378,6 +3407,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: false,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
@@ -3432,6 +3462,7 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3440,7 +3471,6 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3462,6 +3492,7 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
analytics: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::protocol::AskForApproval;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
@@ -9,7 +10,7 @@ use codex_protocol::openai_models::ReasoningEffort;
|
||||
|
||||
/// Collection of common configuration options that a user can define as a unit
|
||||
/// in `config.toml`.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Deserialize)]
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ConfigProfile {
|
||||
pub model: Option<String>,
|
||||
/// The key in the `model_providers` map identifying the
|
||||
@@ -28,6 +29,7 @@ pub struct ConfigProfile {
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
pub analytics: Option<crate::config::types::AnalyticsConfigToml>,
|
||||
/// Optional feature toggles scoped to this profile.
|
||||
#[serde(default)]
|
||||
pub features: Option<crate::features::FeaturesToml>,
|
||||
|
||||
@@ -106,16 +106,7 @@ pub struct ConfigService {
|
||||
}
|
||||
|
||||
impl ConfigService {
|
||||
pub fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
Self {
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
loader_overrides: LoaderOverrides::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn with_overrides(
|
||||
pub fn new(
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
loader_overrides: LoaderOverrides,
|
||||
@@ -127,6 +118,14 @@ impl ConfigService {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_defaults(codex_home: PathBuf) -> Self {
|
||||
Self {
|
||||
codex_home,
|
||||
cli_overrides: Vec::new(),
|
||||
loader_overrides: LoaderOverrides::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read(
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
@@ -556,6 +555,10 @@ fn override_message(layer: &ConfigLayerSource) -> String {
|
||||
ConfigLayerSource::System { file } => {
|
||||
format!("Overridden by managed config (system): {}", file.display())
|
||||
}
|
||||
ConfigLayerSource::Project { dot_codex_folder } => format!(
|
||||
"Overridden by project config: {}/{CONFIG_TOML_FILE}",
|
||||
dot_codex_folder.display(),
|
||||
),
|
||||
ConfigLayerSource::SessionFlags => "Overridden by session flags".to_string(),
|
||||
ConfigLayerSource::User { file } => {
|
||||
format!("Overridden by user config: {}", file.display())
|
||||
@@ -703,7 +706,7 @@ unified_exec = true
|
||||
"#;
|
||||
std::fs::write(tmp.path().join(CONFIG_TOML_FILE), original)?;
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()),
|
||||
@@ -744,13 +747,14 @@ remote_compaction = true
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file");
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -774,15 +778,41 @@ remote_compaction = true
|
||||
},
|
||||
);
|
||||
let layers = response.layers.expect("layers present");
|
||||
assert_eq!(layers.len(), 2, "expected two layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(
|
||||
crate::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX,
|
||||
)
|
||||
.expect("system file");
|
||||
assert_eq!(layers.len(), 3, "expected three layers on unix");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User {
|
||||
file: user_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(2).unwrap().name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2, "expected two layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -798,13 +828,14 @@ remote_compaction = true
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file");
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -849,7 +880,7 @@ remote_compaction = true
|
||||
let user_path = tmp.path().join(CONFIG_TOML_FILE);
|
||||
std::fs::write(&user_path, "model = \"user\"").unwrap();
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
let error = service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()),
|
||||
@@ -872,7 +903,7 @@ remote_compaction = true
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
std::fs::write(tmp.path().join(CONFIG_TOML_FILE), "").unwrap();
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: None,
|
||||
@@ -900,13 +931,14 @@ remote_compaction = true
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -947,13 +979,14 @@ remote_compaction = true
|
||||
TomlValue::String("session".to_string()),
|
||||
)];
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
cli_overrides,
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -992,13 +1025,14 @@ remote_compaction = true
|
||||
std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap();
|
||||
let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file");
|
||||
|
||||
let service = ConfigService::with_overrides(
|
||||
let service = ConfigService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
vec![],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -1050,7 +1084,7 @@ alpha = "a"
|
||||
|
||||
std::fs::write(&path, base)?;
|
||||
|
||||
let service = ConfigService::new(tmp.path().to_path_buf(), vec![]);
|
||||
let service = ConfigService::new_with_defaults(tmp.path().to_path_buf());
|
||||
service
|
||||
.write_value(ConfigValueWriteParams {
|
||||
file_path: Some(path.display().to_string()),
|
||||
|
||||
@@ -221,7 +221,7 @@ mod option_duration_secs {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
pub enum UriBasedFileOpener {
|
||||
#[serde(rename = "vscode")]
|
||||
VsCode,
|
||||
@@ -253,7 +253,7 @@ impl UriBasedFileOpener {
|
||||
}
|
||||
|
||||
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct History {
|
||||
/// If true, history entries will not be written to disk.
|
||||
pub persistence: HistoryPersistence,
|
||||
@@ -263,7 +263,7 @@ pub struct History {
|
||||
pub max_bytes: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum HistoryPersistence {
|
||||
/// Save all history entries to disk.
|
||||
@@ -273,9 +273,18 @@ pub enum HistoryPersistence {
|
||||
None,
|
||||
}
|
||||
|
||||
// ===== Analytics configuration =====
|
||||
|
||||
/// Analytics settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct AnalyticsConfigToml {
|
||||
/// When `false`, disables analytics across Codex product surfaces in this profile.
|
||||
pub enabled: Option<bool>,
|
||||
}
|
||||
|
||||
// ===== OTEL configuration =====
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelHttpProtocol {
|
||||
/// Binary payload
|
||||
@@ -284,7 +293,7 @@ pub enum OtelHttpProtocol {
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OtelTlsConfig {
|
||||
pub ca_certificate: Option<AbsolutePathBuf>,
|
||||
@@ -293,7 +302,7 @@ pub struct OtelTlsConfig {
|
||||
}
|
||||
|
||||
/// Which OTEL exporter to use.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelExporterKind {
|
||||
None,
|
||||
@@ -315,7 +324,7 @@ pub enum OtelExporterKind {
|
||||
}
|
||||
|
||||
/// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct OtelConfigToml {
|
||||
/// Log user prompt in traces
|
||||
pub log_user_prompt: Option<bool>,
|
||||
@@ -350,7 +359,7 @@ impl Default for OtelConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Notifications {
|
||||
Enabled(bool),
|
||||
@@ -368,7 +377,7 @@ impl Default for Notifications {
|
||||
/// Terminals generally encode both mouse wheels and trackpads as the same "scroll up/down" mouse
|
||||
/// button events, without a magnitude. This setting controls whether Codex uses a heuristic to
|
||||
/// infer wheel vs trackpad per stream, or forces a specific behavior.
|
||||
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ScrollInputMode {
|
||||
/// Infer wheel vs trackpad behavior per scroll stream.
|
||||
@@ -386,7 +395,7 @@ impl Default for ScrollInputMode {
|
||||
}
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Tui {
|
||||
/// Enable desktop notifications from the TUI when the terminal is unfocused.
|
||||
/// Defaults to `true`.
|
||||
@@ -514,7 +523,7 @@ const fn default_true() -> bool {
|
||||
/// Settings for notices we display to users via the tui and app-server clients
|
||||
/// (primarily the Codex IDE extension). NOTE: these are different from
|
||||
/// notifications - notices are warnings, NUX screens, acknowledgements, etc.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Notice {
|
||||
/// Tracks whether the user has acknowledged the full access warning prompt.
|
||||
pub hide_full_access_warning: Option<bool>,
|
||||
@@ -537,7 +546,7 @@ impl Notice {
|
||||
pub(crate) const TABLE_KEY: &'static str = "notice";
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<AbsolutePathBuf>,
|
||||
@@ -560,7 +569,7 @@ impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum ShellEnvironmentPolicyInherit {
|
||||
/// "Core" environment variables for the platform. On UNIX, this would
|
||||
@@ -577,7 +586,7 @@ pub enum ShellEnvironmentPolicyInherit {
|
||||
|
||||
/// Policy for building the `env` when spawning a process via either the
|
||||
/// `shell` or `local_shell` tool.
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct ShellEnvironmentPolicyToml {
|
||||
pub inherit: Option<ShellEnvironmentPolicyInherit>,
|
||||
|
||||
|
||||
@@ -33,11 +33,13 @@ pub(super) async fn load_config_layers_internal(
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
managed_preferences_base64,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
let managed_config_path = AbsolutePathBuf::from_absolute_path(
|
||||
@@ -91,12 +93,8 @@ pub(super) async fn read_config_from_path(
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the default managed config path (honoring `CODEX_MANAGED_CONFIG_PATH`).
|
||||
/// Return the default managed config path.
|
||||
pub(super) fn managed_config_default_path(codex_home: &Path) -> PathBuf {
|
||||
if let Ok(path) = std::env::var("CODEX_MANAGED_CONFIG_PATH") {
|
||||
return PathBuf::from(path);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = codex_home;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use super::config_requirements::ConfigRequirementsToml;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
@@ -10,6 +11,7 @@ use toml::Value as TomlValue;
|
||||
|
||||
const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex";
|
||||
const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
const MANAGED_PREFERENCES_REQUIREMENTS_KEY: &str = "requirements_toml_base64";
|
||||
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
override_base64: Option<&str>,
|
||||
@@ -19,82 +21,126 @@ pub(crate) async fn load_managed_admin_config_layer(
|
||||
return if trimmed.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
parse_managed_config_base64(trimmed).map(Some)
|
||||
};
|
||||
}
|
||||
|
||||
const LOAD_ERROR: &str = "Failed to load managed preferences configuration";
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_config).await {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed preferences load task was cancelled");
|
||||
tracing::error!("Managed config load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed preferences load task failed: {join_err}");
|
||||
tracing::error!("Managed config load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other(LOAD_ERROR))
|
||||
Err(io::Error::other("Failed to load managed config"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_config() -> io::Result<Option<TomlValue>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_CONFIG_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_config_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub(crate) async fn load_managed_admin_requirements_toml(
|
||||
target: &mut ConfigRequirementsToml,
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<()> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
if !trimmed.is_empty() {
|
||||
target.merge_unset_fields(parse_managed_requirements_base64(trimmed)?);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_requirements).await {
|
||||
Ok(result) => {
|
||||
if let Some(requirements) = result? {
|
||||
target.merge_unset_fields(requirements);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed requirements load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed requirements load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other("Failed to load managed requirements"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_requirements() -> io::Result<Option<ConfigRequirementsToml>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_REQUIREMENTS_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_requirements_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
fn load_managed_preference(key_name: &str) -> io::Result<Option<String>> {
|
||||
#[link(name = "CoreFoundation", kind = "framework")]
|
||||
unsafe extern "C" {
|
||||
fn CFPreferencesCopyAppValue(key: CFStringRef, application_id: CFStringRef) -> *mut c_void;
|
||||
}
|
||||
|
||||
let application_id = CFString::new(MANAGED_PREFERENCES_APPLICATION_ID);
|
||||
let key = CFString::new(MANAGED_PREFERENCES_CONFIG_KEY);
|
||||
|
||||
let value_ref = unsafe {
|
||||
CFPreferencesCopyAppValue(
|
||||
key.as_concrete_TypeRef(),
|
||||
application_id.as_concrete_TypeRef(),
|
||||
CFString::new(key_name).as_concrete_TypeRef(),
|
||||
CFString::new(MANAGED_PREFERENCES_APPLICATION_ID).as_concrete_TypeRef(),
|
||||
)
|
||||
};
|
||||
|
||||
if value_ref.is_null() {
|
||||
tracing::debug!(
|
||||
"Managed preferences for {} key {} not found",
|
||||
MANAGED_PREFERENCES_APPLICATION_ID,
|
||||
MANAGED_PREFERENCES_CONFIG_KEY
|
||||
"Managed preferences for {MANAGED_PREFERENCES_APPLICATION_ID} key {key_name} not found",
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) };
|
||||
let contents = value.to_string();
|
||||
let trimmed = contents.trim();
|
||||
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) }.to_string();
|
||||
Ok(Some(value))
|
||||
}
|
||||
|
||||
fn parse_managed_preferences_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
let decoded = BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed preferences as base64: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
let decoded_str = String::from_utf8(decoded).map_err(|err| {
|
||||
tracing::error!("Managed preferences base64 contents were not valid UTF-8: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
match toml::from_str::<TomlValue>(&decoded_str) {
|
||||
fn parse_managed_config_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
match toml::from_str::<TomlValue>(&decode_managed_preferences_base64(encoded)?) {
|
||||
Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)),
|
||||
Ok(other) => {
|
||||
tracing::error!(
|
||||
"Managed preferences TOML must have a table at the root, found {other:?}",
|
||||
);
|
||||
tracing::error!("Managed config TOML must have a table at the root, found {other:?}",);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"managed preferences root must be a table",
|
||||
"managed config root must be a table",
|
||||
))
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse managed preferences TOML: {err}");
|
||||
tracing::error!("Failed to parse managed config TOML: {err}");
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_managed_requirements_base64(encoded: &str) -> io::Result<ConfigRequirementsToml> {
|
||||
toml::from_str::<ConfigRequirementsToml>(&decode_managed_preferences_base64(encoded)?).map_err(
|
||||
|err| {
|
||||
tracing::error!("Failed to parse managed requirements TOML: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn decode_managed_preferences_base64(encoded: &str) -> io::Result<String> {
|
||||
String::from_utf8(BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed value as base64: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?)
|
||||
.map_err(|err| {
|
||||
tracing::error!("Managed value base64 contents were not valid UTF-8: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,12 +11,14 @@ mod state;
|
||||
mod tests;
|
||||
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config_loader::config_requirements::ConfigRequirementsToml;
|
||||
use crate::config_loader::layer_io::LoadedConfigLayers;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use serde::Deserialize;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
@@ -26,11 +28,19 @@ pub use config_requirements::ConfigRequirements;
|
||||
pub use merge::merge_toml_values;
|
||||
pub use state::ConfigLayerEntry;
|
||||
pub use state::ConfigLayerStack;
|
||||
pub use state::ConfigLayerStackOrdering;
|
||||
pub use state::LoaderOverrides;
|
||||
|
||||
/// On Unix systems, load requirements from this file path, if present.
|
||||
const DEFAULT_REQUIREMENTS_TOML_FILE_UNIX: &str = "/etc/codex/requirements.toml";
|
||||
|
||||
/// On Unix systems, load default settings from this file path, if present.
|
||||
/// Note that /etc/codex/ is treated as a "config folder," so subfolders such
|
||||
/// as skills/ and rules/ will also be honored.
|
||||
pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
|
||||
|
||||
const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
|
||||
|
||||
/// To build up the set of admin-enforced constraints, we build up from multiple
|
||||
/// configuration layers in the following order, but a constraint defined in an
|
||||
/// earlier layer cannot be overridden by a later layer:
|
||||
@@ -68,8 +78,14 @@ pub async fn load_config_layers_state(
|
||||
) -> io::Result<ConfigLayerStack> {
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
|
||||
// TODO(mbolin): Support an entry in MDM for config requirements and use it
|
||||
// with `config_requirements_toml.merge_unset_fields(...)`, if present.
|
||||
#[cfg(target_os = "macos")]
|
||||
macos::load_managed_admin_requirements_toml(
|
||||
&mut config_requirements_toml,
|
||||
overrides
|
||||
.macos_managed_config_requirements_base64
|
||||
.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Honor /etc/codex/requirements.toml.
|
||||
if cfg!(unix) {
|
||||
@@ -91,44 +107,57 @@ pub async fn load_config_layers_state(
|
||||
|
||||
let mut layers = Vec::<ConfigLayerEntry>::new();
|
||||
|
||||
// TODO(mbolin): Honor managed preferences (macOS only).
|
||||
// TODO(mbolin): Honor /etc/codex/config.toml.
|
||||
// Include an entry for the "system" config folder, loading its config.toml,
|
||||
// if it exists.
|
||||
let system_config_toml_file = if cfg!(unix) {
|
||||
Some(AbsolutePathBuf::from_absolute_path(
|
||||
SYSTEM_CONFIG_TOML_FILE_UNIX,
|
||||
)?)
|
||||
} else {
|
||||
// TODO(gt): Determine the path to load on Windows.
|
||||
None
|
||||
};
|
||||
if let Some(system_config_toml_file) = system_config_toml_file {
|
||||
let system_layer =
|
||||
load_config_toml_for_required_layer(&system_config_toml_file, |config_toml| {
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::System {
|
||||
file: system_config_toml_file.clone(),
|
||||
},
|
||||
config_toml,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
layers.push(system_layer);
|
||||
}
|
||||
|
||||
// Add a layer for $CODEX_HOME/config.toml if it exists. Note if the file
|
||||
// exists, but is malformed, then this error should be propagated to the
|
||||
// user.
|
||||
let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home)?;
|
||||
match tokio::fs::read_to_string(&user_file).await {
|
||||
Ok(contents) => {
|
||||
let user_config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing user config file {}: {e}",
|
||||
user_file.as_path().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User { file: user_file },
|
||||
user_config,
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() != io::ErrorKind::NotFound {
|
||||
return Err(io::Error::new(
|
||||
e.kind(),
|
||||
format!(
|
||||
"Failed to read user config file {}: {e}",
|
||||
user_file.as_path().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
let user_layer = load_config_toml_for_required_layer(&user_file, |config_toml| {
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User {
|
||||
file: user_file.clone(),
|
||||
},
|
||||
config_toml,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
layers.push(user_layer);
|
||||
|
||||
// TODO(mbolin): Add layers for cwd, tree, and repo config files.
|
||||
let _ = cwd;
|
||||
if let Some(cwd) = cwd {
|
||||
let mut merged_so_far = TomlValue::Table(toml::map::Map::new());
|
||||
for layer in &layers {
|
||||
merge_toml_values(&mut merged_so_far, &layer.config);
|
||||
}
|
||||
let project_root_markers = project_root_markers_from_config(&merged_so_far)?
|
||||
.unwrap_or_else(default_project_root_markers);
|
||||
|
||||
let project_root = find_project_root(&cwd, &project_root_markers).await?;
|
||||
let project_layers = load_project_layers(&cwd, &project_root).await?;
|
||||
layers.extend(project_layers);
|
||||
}
|
||||
|
||||
// Add a layer for runtime overrides from the CLI or UI, if any exist.
|
||||
if !cli_overrides.is_empty() {
|
||||
@@ -149,11 +178,20 @@ pub async fn load_config_layers_state(
|
||||
managed_config_from_mdm,
|
||||
} = loaded_config_layers;
|
||||
if let Some(config) = managed_config {
|
||||
let managed_parent = config.file.as_path().parent().ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Managed config file {} has no parent directory",
|
||||
config.file.as_path().display()
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let managed_config =
|
||||
resolve_relative_paths_in_config_toml(config.managed_config, managed_parent)?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: config.file.clone(),
|
||||
},
|
||||
config.managed_config,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: config.file },
|
||||
managed_config,
|
||||
));
|
||||
}
|
||||
if let Some(config) = managed_config_from_mdm {
|
||||
@@ -166,6 +204,52 @@ pub async fn load_config_layers_state(
|
||||
ConfigLayerStack::new(layers, config_requirements_toml.try_into()?)
|
||||
}
|
||||
|
||||
/// Attempts to load a config.toml file from `config_toml`.
|
||||
/// - If the file exists and is valid TOML, passes the parsed `toml::Value` to
|
||||
/// `create_entry` and returns the resulting layer entry.
|
||||
/// - If the file does not exist, uses an empty `Table` with `create_entry` and
|
||||
/// returns the resulting layer entry.
|
||||
/// - If there is an error reading the file or parsing the TOML, returns an
|
||||
/// error.
|
||||
async fn load_config_toml_for_required_layer(
|
||||
config_toml: impl AsRef<Path>,
|
||||
create_entry: impl FnOnce(TomlValue) -> ConfigLayerEntry,
|
||||
) -> io::Result<ConfigLayerEntry> {
|
||||
let toml_file = config_toml.as_ref();
|
||||
let toml_value = match tokio::fs::read_to_string(toml_file).await {
|
||||
Ok(contents) => {
|
||||
let config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Error parsing config file {}: {e}", toml_file.display()),
|
||||
)
|
||||
})?;
|
||||
let config_parent = toml_file.parent().ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Config file {} has no parent directory",
|
||||
toml_file.display()
|
||||
),
|
||||
)
|
||||
})?;
|
||||
resolve_relative_paths_in_config_toml(config, config_parent)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Ok(TomlValue::Table(toml::map::Map::new()))
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
e.kind(),
|
||||
format!("Failed to read config file {}: {e}", toml_file.display()),
|
||||
))
|
||||
}
|
||||
}
|
||||
}?;
|
||||
|
||||
Ok(create_entry(toml_value))
|
||||
}
|
||||
|
||||
/// If available, apply requirements from `/etc/codex/requirements.toml` to
|
||||
/// `config_requirements_toml` by filling in any unset fields.
|
||||
async fn load_requirements_toml(
|
||||
@@ -235,6 +319,217 @@ async fn load_requirements_from_legacy_scheme(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads `project_root_markers` from the [toml::Value] produced by merging
|
||||
/// `config.toml` from the config layers in the stack preceding
|
||||
/// [ConfigLayerSource::Project].
|
||||
///
|
||||
/// Invariants:
|
||||
/// - If `project_root_markers` is not specified, returns `Ok(None)`.
|
||||
/// - If `project_root_markers` is specified, returns `Ok(Some(markers))` where
|
||||
/// `markers` is a `Vec<String>` (including `Ok(Some(Vec::new()))` for an
|
||||
/// empty array, which indicates that root detection should be disabled).
|
||||
/// - Returns an error if `project_root_markers` is specified but is not an
|
||||
/// array of strings.
|
||||
fn project_root_markers_from_config(config: &TomlValue) -> io::Result<Option<Vec<String>>> {
|
||||
let Some(table) = config.as_table() else {
|
||||
return Ok(None);
|
||||
};
|
||||
let Some(markers_value) = table.get("project_root_markers") else {
|
||||
return Ok(None);
|
||||
};
|
||||
let TomlValue::Array(entries) = markers_value else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"project_root_markers must be an array of strings",
|
||||
));
|
||||
};
|
||||
if entries.is_empty() {
|
||||
return Ok(Some(Vec::new()));
|
||||
}
|
||||
let mut markers = Vec::new();
|
||||
for entry in entries {
|
||||
let Some(marker) = entry.as_str() else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"project_root_markers must be an array of strings",
|
||||
));
|
||||
};
|
||||
markers.push(marker.to_string());
|
||||
}
|
||||
Ok(Some(markers))
|
||||
}
|
||||
|
||||
fn default_project_root_markers() -> Vec<String> {
|
||||
DEFAULT_PROJECT_ROOT_MARKERS
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Takes a `toml::Value` parsed from a config.toml file and walks through it,
|
||||
/// resolving any `AbsolutePathBuf` fields against `base_dir`, returning a new
|
||||
/// `toml::Value` with the same shape but with paths resolved.
|
||||
///
|
||||
/// This ensures that multiple config layers can be merged together correctly
|
||||
/// even if they were loaded from different directories.
|
||||
fn resolve_relative_paths_in_config_toml(
|
||||
value_from_config_toml: TomlValue,
|
||||
base_dir: &Path,
|
||||
) -> io::Result<TomlValue> {
|
||||
// Use the serialize/deserialize round-trip to convert the
|
||||
// `toml::Value` into a `ConfigToml` with `AbsolutePath
|
||||
let _guard = AbsolutePathBufGuard::new(base_dir);
|
||||
let Ok(resolved) = value_from_config_toml.clone().try_into::<ConfigToml>() else {
|
||||
return Ok(value_from_config_toml);
|
||||
};
|
||||
drop(_guard);
|
||||
|
||||
let resolved_value = TomlValue::try_from(resolved).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Failed to serialize resolved config: {e}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(copy_shape_from_original(
|
||||
&value_from_config_toml,
|
||||
&resolved_value,
|
||||
))
|
||||
}
|
||||
|
||||
/// Ensure that every field in `original` is present in the returned
|
||||
/// `toml::Value`, taking the value from `resolved` where possible. This ensures
|
||||
/// the fields that we "removed" during the serialize/deserialize round-trip in
|
||||
/// `resolve_config_paths` are preserved, out of an abundance of caution.
|
||||
fn copy_shape_from_original(original: &TomlValue, resolved: &TomlValue) -> TomlValue {
|
||||
match (original, resolved) {
|
||||
(TomlValue::Table(original_table), TomlValue::Table(resolved_table)) => {
|
||||
let mut table = toml::map::Map::new();
|
||||
for (key, original_value) in original_table {
|
||||
let resolved_value = resolved_table.get(key).unwrap_or(original_value);
|
||||
table.insert(
|
||||
key.clone(),
|
||||
copy_shape_from_original(original_value, resolved_value),
|
||||
);
|
||||
}
|
||||
TomlValue::Table(table)
|
||||
}
|
||||
(TomlValue::Array(original_array), TomlValue::Array(resolved_array)) => {
|
||||
let mut items = Vec::new();
|
||||
for (index, original_value) in original_array.iter().enumerate() {
|
||||
let resolved_value = resolved_array.get(index).unwrap_or(original_value);
|
||||
items.push(copy_shape_from_original(original_value, resolved_value));
|
||||
}
|
||||
TomlValue::Array(items)
|
||||
}
|
||||
(_, resolved_value) => resolved_value.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_project_root(
|
||||
cwd: &AbsolutePathBuf,
|
||||
project_root_markers: &[String],
|
||||
) -> io::Result<AbsolutePathBuf> {
|
||||
if project_root_markers.is_empty() {
|
||||
return Ok(cwd.clone());
|
||||
}
|
||||
|
||||
for ancestor in cwd.as_path().ancestors() {
|
||||
for marker in project_root_markers {
|
||||
let marker_path = ancestor.join(marker);
|
||||
if tokio::fs::metadata(&marker_path).await.is_ok() {
|
||||
return AbsolutePathBuf::from_absolute_path(ancestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(cwd.clone())
|
||||
}
|
||||
|
||||
/// Return the appropriate list of layers (each with
|
||||
/// [ConfigLayerSource::Project] as the source) between `cwd` and
|
||||
/// `project_root`, inclusive. The list is ordered in _increasing_ precdence,
|
||||
/// starting from folders closest to `project_root` (which is the lowest
|
||||
/// precedence) to those closest to `cwd` (which is the highest precedence).
|
||||
async fn load_project_layers(
|
||||
cwd: &AbsolutePathBuf,
|
||||
project_root: &AbsolutePathBuf,
|
||||
) -> io::Result<Vec<ConfigLayerEntry>> {
|
||||
let mut dirs = cwd
|
||||
.as_path()
|
||||
.ancestors()
|
||||
.scan(false, |done, a| {
|
||||
if *done {
|
||||
None
|
||||
} else {
|
||||
if a == project_root.as_path() {
|
||||
*done = true;
|
||||
}
|
||||
Some(a)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
dirs.reverse();
|
||||
|
||||
let mut layers = Vec::new();
|
||||
for dir in dirs {
|
||||
let dot_codex = dir.join(".codex");
|
||||
if !tokio::fs::metadata(&dot_codex)
|
||||
.await
|
||||
.map(|meta| meta.is_dir())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let dot_codex_abs = AbsolutePathBuf::from_absolute_path(&dot_codex)?;
|
||||
let config_file = dot_codex_abs.join(CONFIG_TOML_FILE)?;
|
||||
match tokio::fs::read_to_string(&config_file).await {
|
||||
Ok(contents) => {
|
||||
let config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing project config file {}: {e}",
|
||||
config_file.as_path().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let config =
|
||||
resolve_relative_paths_in_config_toml(config, dot_codex_abs.as_path())?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: dot_codex_abs,
|
||||
},
|
||||
config,
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
if err.kind() == io::ErrorKind::NotFound {
|
||||
// If there is no config.toml file, record an empty entry
|
||||
// for this project layer, as this may still have subfolders
|
||||
// that are significant in the overall ConfigLayerStack.
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: dot_codex_abs,
|
||||
},
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
));
|
||||
} else {
|
||||
return Err(io::Error::new(
|
||||
err.kind(),
|
||||
format!(
|
||||
"Failed to read project config file {}: {err}",
|
||||
config_file.as_path().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(layers)
|
||||
}
|
||||
|
||||
/// The legacy mechanism for specifying admin-enforced configuration is to read
|
||||
/// from a file like `/etc/codex/managed_config.toml` that has the same
|
||||
/// structure as `config.toml` where fields like `approval_policy` can specify
|
||||
@@ -266,3 +561,47 @@ impl From<LegacyManagedConfigToml> for ConfigRequirementsToml {
|
||||
config_requirements_toml
|
||||
}
|
||||
}
|
||||
|
||||
// Cannot name this `mod tests` because of tests.rs in this folder.
|
||||
#[cfg(test)]
|
||||
mod unit_tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn ensure_resolve_relative_paths_in_config_toml_preserves_all_fields() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let base_dir = tmp.path();
|
||||
let contents = r#"
|
||||
# This is a field recognized by config.toml that is an AbsolutePathBuf in
|
||||
# the ConfigToml struct.
|
||||
experimental_instructions_file = "./some_file.md"
|
||||
|
||||
# This is a field recognized by config.toml.
|
||||
model = "gpt-1000"
|
||||
|
||||
# This is a field not recognized by config.toml.
|
||||
foo = "xyzzy"
|
||||
"#;
|
||||
let user_config: TomlValue = toml::from_str(contents)?;
|
||||
|
||||
let normalized_toml_value = resolve_relative_paths_in_config_toml(user_config, base_dir)?;
|
||||
let mut expected_toml_value = toml::map::Map::new();
|
||||
expected_toml_value.insert(
|
||||
"experimental_instructions_file".to_string(),
|
||||
TomlValue::String(
|
||||
AbsolutePathBuf::resolve_path_against_base("./some_file.md", base_dir)?
|
||||
.as_path()
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
),
|
||||
);
|
||||
expected_toml_value.insert(
|
||||
"model".to_string(),
|
||||
TomlValue::String("gpt-1000".to_string()),
|
||||
);
|
||||
expected_toml_value.insert("foo".to_string(), TomlValue::String("xyzzy".to_string()));
|
||||
assert_eq!(normalized_toml_value, TomlValue::Table(expected_toml_value));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,14 +12,17 @@ use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
/// LoaderOverrides overrides managed configuration inputs (primarily for tests).
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct LoaderOverrides {
|
||||
pub managed_config_path: Option<PathBuf>,
|
||||
//TODO(gt): Add a macos_ prefix to this field and remove the target_os check.
|
||||
#[cfg(target_os = "macos")]
|
||||
pub managed_preferences_base64: Option<String>,
|
||||
pub macos_managed_config_requirements_base64: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ConfigLayerEntry {
|
||||
pub name: ConfigLayerSource,
|
||||
pub config: TomlValue,
|
||||
@@ -50,9 +53,28 @@ impl ConfigLayerEntry {
|
||||
config: serde_json::to_value(&self.config).unwrap_or(JsonValue::Null),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the `.codex/` folder associated with this config layer, if any.
|
||||
pub fn config_folder(&self) -> Option<AbsolutePathBuf> {
|
||||
match &self.name {
|
||||
ConfigLayerSource::Mdm { .. } => None,
|
||||
ConfigLayerSource::System { file } => file.parent(),
|
||||
ConfigLayerSource::User { file } => file.parent(),
|
||||
ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder.clone()),
|
||||
ConfigLayerSource::SessionFlags => None,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => None,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConfigLayerStackOrdering {
|
||||
LowestPrecedenceFirst,
|
||||
HighestPrecedenceFirst,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigLayerStack {
|
||||
/// Layers are listed from lowest precedence (base) to highest (top), so
|
||||
/// later entries in the Vec override earlier ones.
|
||||
@@ -156,7 +178,16 @@ impl ConfigLayerStack {
|
||||
/// Returns the highest-precedence to lowest-precedence layers, so
|
||||
/// `ConfigLayerSource::SessionFlags` would be first, if present.
|
||||
pub fn layers_high_to_low(&self) -> Vec<&ConfigLayerEntry> {
|
||||
self.layers.iter().rev().collect()
|
||||
self.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst)
|
||||
}
|
||||
|
||||
/// Returns the highest-precedence to lowest-precedence layers, so
|
||||
/// `ConfigLayerSource::SessionFlags` would be first, if present.
|
||||
pub fn get_layers(&self, ordering: ConfigLayerStackOrdering) -> Vec<&ConfigLayerEntry> {
|
||||
match ordering {
|
||||
ConfigLayerStackOrdering::HighestPrecedenceFirst => self.layers.iter().rev().collect(),
|
||||
ConfigLayerStackOrdering::LowestPrecedenceFirst => self.layers.iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,7 +201,12 @@ fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result<Option<
|
||||
));
|
||||
}
|
||||
|
||||
// The previous check ensured `layers` is sorted by precedence, so now we
|
||||
// further verify that:
|
||||
// 1. There is at most one user config layer.
|
||||
// 2. Project layers are ordered from root to cwd.
|
||||
let mut user_layer_index: Option<usize> = None;
|
||||
let mut previous_project_dot_codex_folder: Option<&AbsolutePathBuf> = None;
|
||||
for (index, layer) in layers.iter().enumerate() {
|
||||
if matches!(layer.name, ConfigLayerSource::User { .. }) {
|
||||
if user_layer_index.is_some() {
|
||||
@@ -181,6 +217,32 @@ fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result<Option<
|
||||
}
|
||||
user_layer_index = Some(index);
|
||||
}
|
||||
|
||||
if let ConfigLayerSource::Project {
|
||||
dot_codex_folder: current_project_dot_codex_folder,
|
||||
} = &layer.name
|
||||
{
|
||||
if let Some(previous) = previous_project_dot_codex_folder {
|
||||
let Some(parent) = previous.as_path().parent() else {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"project layer has no parent directory",
|
||||
));
|
||||
};
|
||||
if previous == current_project_dot_codex_folder
|
||||
|| !current_project_dot_codex_folder
|
||||
.as_path()
|
||||
.ancestors()
|
||||
.any(|ancestor| ancestor == parent)
|
||||
{
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"project layers are not ordered from root to cwd",
|
||||
));
|
||||
}
|
||||
}
|
||||
previous_project_dot_codex_folder = Some(current_project_dot_codex_folder);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(user_layer_index)
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
use super::LoaderOverrides;
|
||||
use super::load_config_layers_state;
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config_loader::ConfigLayerEntry;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::config_requirements::ConfigRequirementsToml;
|
||||
use crate::config_loader::fingerprint::version_for_toml;
|
||||
use crate::config_loader::load_requirements_toml;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
#[cfg(target_os = "macos")]
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
@@ -39,6 +45,7 @@ extra = true
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
@@ -69,10 +76,12 @@ extra = true
|
||||
async fn returns_empty_when_all_layers_missing() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
@@ -84,9 +93,24 @@ async fn returns_empty_when_all_layers_missing() {
|
||||
)
|
||||
.await
|
||||
.expect("load layers");
|
||||
assert!(
|
||||
layers.get_user_layer().is_none(),
|
||||
"no user layer when CODEX_HOME/config.toml does not exist"
|
||||
let user_layer = layers
|
||||
.get_user_layer()
|
||||
.expect("expected a user layer even when CODEX_HOME/config.toml does not exist");
|
||||
assert_eq!(
|
||||
&ConfigLayerEntry {
|
||||
name: super::ConfigLayerSource::User {
|
||||
file: AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, tmp.path())
|
||||
.expect("resolve user config.toml path")
|
||||
},
|
||||
config: TomlValue::Table(toml::map::Map::new()),
|
||||
version: version_for_toml(&TomlValue::Table(toml::map::Map::new())),
|
||||
},
|
||||
user_layer,
|
||||
);
|
||||
assert_eq!(
|
||||
user_layer.config,
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
"expected empty config for user layer when config.toml does not exist"
|
||||
);
|
||||
|
||||
let binding = layers.effective_config();
|
||||
@@ -100,9 +124,10 @@ async fn returns_empty_when_all_layers_missing() {
|
||||
.iter()
|
||||
.filter(|layer| matches!(layer.name, super::ConfigLayerSource::System { .. }))
|
||||
.count();
|
||||
let expected_system_layers = if cfg!(unix) { 1 } else { 0 };
|
||||
assert_eq!(
|
||||
num_system_layers, 0,
|
||||
"managed config layer should be absent when file missing"
|
||||
num_system_layers, expected_system_layers,
|
||||
"system layer should be present only on unix"
|
||||
);
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
@@ -121,12 +146,6 @@ async fn returns_empty_when_all_layers_missing() {
|
||||
async fn managed_preferences_take_highest_precedence() {
|
||||
use base64::Engine;
|
||||
|
||||
let managed_payload = r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#;
|
||||
let encoded = base64::prelude::BASE64_STANDARD.encode(managed_payload.as_bytes());
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
@@ -148,7 +167,17 @@ flag = true
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(encoded),
|
||||
managed_preferences_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
@@ -172,6 +201,108 @@ flag = true
|
||||
assert_eq!(nested.get("flag"), Some(&TomlValue::Boolean(false)));
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_requirements_are_applied() -> anyhow::Result<()> {
|
||||
use base64::Engine;
|
||||
|
||||
let tmp = tempdir()?;
|
||||
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(AbsolutePathBuf::try_from(tmp.path())?),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(tmp.path().join("managed_config.toml")),
|
||||
managed_preferences_base64: Some(String::new()),
|
||||
macos_managed_config_requirements_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
allowed_approval_policies = ["never"]
|
||||
allowed_sandbox_modes = ["read-only"]
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
state.requirements().approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
);
|
||||
assert_eq!(
|
||||
*state.requirements().sandbox_policy.get(),
|
||||
SandboxPolicy::ReadOnly
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: Vec::new(),
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
})
|
||||
.is_err()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_requirements_take_precedence() -> anyhow::Result<()> {
|
||||
use base64::Engine;
|
||||
|
||||
let tmp = tempdir()?;
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
tokio::fs::write(&managed_path, "approval_policy = \"on-request\"\n").await?;
|
||||
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(AbsolutePathBuf::try_from(tmp.path())?),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(String::new()),
|
||||
macos_managed_config_requirements_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
allowed_approval_policies = ["never"]
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
state.requirements().approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_err()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn load_requirements_toml_produces_expected_constraints() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
@@ -208,3 +339,209 @@ allowed_approval_policies = ["never", "on-request"]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"root\"\n",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"child\"\n",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter_map(|layer| match &layer.name {
|
||||
super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(project_layers.len(), 2);
|
||||
assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path());
|
||||
assert_eq!(
|
||||
project_layers[1].as_path(),
|
||||
project_root.join(".codex").as_path()
|
||||
);
|
||||
|
||||
let config = layers.effective_config();
|
||||
let foo = config
|
||||
.get("foo")
|
||||
.and_then(TomlValue::as_str)
|
||||
.expect("foo entry");
|
||||
assert_eq!(foo, "child");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_paths_resolve_relative_to_dot_codex_and_override_in_order() -> std::io::Result<()>
|
||||
{
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
let root_cfg = r#"
|
||||
experimental_instructions_file = "root.txt"
|
||||
"#;
|
||||
let nested_cfg = r#"
|
||||
experimental_instructions_file = "child.txt"
|
||||
"#;
|
||||
tokio::fs::write(project_root.join(".codex").join(CONFIG_TOML_FILE), root_cfg).await?;
|
||||
tokio::fs::write(nested.join(".codex").join(CONFIG_TOML_FILE), nested_cfg).await?;
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join("root.txt"),
|
||||
"root instructions",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join("child.txt"),
|
||||
"child instructions",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
|
||||
let config = ConfigBuilder::default()
|
||||
.codex_home(codex_home)
|
||||
.harness_overrides(ConfigOverrides {
|
||||
cwd: Some(nested.clone()),
|
||||
..ConfigOverrides::default()
|
||||
})
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
config.base_instructions.as_deref(),
|
||||
Some("child instructions")
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(&nested).await?;
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter(|layer| matches!(layer.name, super::ConfigLayerSource::Project { .. }))
|
||||
.collect();
|
||||
assert_eq!(
|
||||
vec![&ConfigLayerEntry {
|
||||
name: super::ConfigLayerSource::Project {
|
||||
dot_codex_folder: AbsolutePathBuf::from_absolute_path(project_root.join(".codex"))?,
|
||||
},
|
||||
config: TomlValue::Table(toml::map::Map::new()),
|
||||
version: version_for_toml(&TomlValue::Table(toml::map::Map::new())),
|
||||
}],
|
||||
project_layers
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".hg"), "hg").await?;
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"root\"\n",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"child\"\n",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
tokio::fs::write(
|
||||
codex_home.join(CONFIG_TOML_FILE),
|
||||
r#"
|
||||
project_root_markers = [".hg"]
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter_map(|layer| match &layer.name {
|
||||
super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(project_layers.len(), 2);
|
||||
assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path());
|
||||
assert_eq!(
|
||||
project_layers[1].as_path(),
|
||||
project_root.join(".codex").as_path()
|
||||
);
|
||||
|
||||
let merged = layers.effective_config();
|
||||
let foo = merged
|
||||
.get("foo")
|
||||
.and_then(TomlValue::as_str)
|
||||
.expect("foo entry");
|
||||
assert_eq!(foo, "child");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -3,7 +3,11 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
|
||||
use crate::command_safety::is_dangerous_command::requires_initial_appoval;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigLayerStackOrdering;
|
||||
use codex_execpolicy::AmendError;
|
||||
use codex_execpolicy::Decision;
|
||||
use codex_execpolicy::Error as ExecPolicyRuleError;
|
||||
@@ -17,7 +21,6 @@ use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use thiserror::Error;
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::task::spawn_blocking;
|
||||
|
||||
use crate::bash::parse_shell_lc_plain_commands;
|
||||
@@ -25,11 +28,10 @@ use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use crate::sandboxing::SandboxPermissions;
|
||||
use crate::tools::sandboxing::ExecApprovalRequirement;
|
||||
use shlex::try_join as shlex_try_join;
|
||||
|
||||
const FORBIDDEN_REASON: &str = "execpolicy forbids this command";
|
||||
const PROMPT_CONFLICT_REASON: &str =
|
||||
"execpolicy requires approval for this command, but AskForApproval is set to Never";
|
||||
const PROMPT_REASON: &str = "execpolicy requires approval for this command";
|
||||
"approval required by policy, but AskForApproval is set to Never";
|
||||
const RULES_DIR_NAME: &str = "rules";
|
||||
const RULE_EXTENSION: &str = "rules";
|
||||
const DEFAULT_POLICY_FILE: &str = "default.rules";
|
||||
@@ -80,20 +82,141 @@ pub enum ExecPolicyUpdateError {
|
||||
FeatureDisabled,
|
||||
}
|
||||
|
||||
pub(crate) async fn load_exec_policy_for_features(
|
||||
pub(crate) struct ExecPolicyManager {
|
||||
policy: ArcSwap<Policy>,
|
||||
}
|
||||
|
||||
impl ExecPolicyManager {
|
||||
pub(crate) fn new(policy: Arc<Policy>) -> Self {
|
||||
Self {
|
||||
policy: ArcSwap::from(policy),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn load(
|
||||
features: &Features,
|
||||
config_stack: &ConfigLayerStack,
|
||||
) -> Result<Self, ExecPolicyError> {
|
||||
let policy = load_exec_policy_for_features(features, config_stack).await?;
|
||||
Ok(Self::new(Arc::new(policy)))
|
||||
}
|
||||
|
||||
pub(crate) fn current(&self) -> Arc<Policy> {
|
||||
self.policy.load_full()
|
||||
}
|
||||
|
||||
pub(crate) async fn create_exec_approval_requirement_for_command(
|
||||
&self,
|
||||
features: &Features,
|
||||
command: &[String],
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_permissions: SandboxPermissions,
|
||||
) -> ExecApprovalRequirement {
|
||||
let exec_policy = self.current();
|
||||
let commands =
|
||||
parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]);
|
||||
let heuristics_fallback = |cmd: &[String]| {
|
||||
if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) {
|
||||
Decision::Prompt
|
||||
} else {
|
||||
Decision::Allow
|
||||
}
|
||||
};
|
||||
let evaluation = exec_policy.check_multiple(commands.iter(), &heuristics_fallback);
|
||||
|
||||
match evaluation.decision {
|
||||
Decision::Forbidden => ExecApprovalRequirement::Forbidden {
|
||||
reason: derive_forbidden_reason(command, &evaluation),
|
||||
},
|
||||
Decision::Prompt => {
|
||||
if matches!(approval_policy, AskForApproval::Never) {
|
||||
ExecApprovalRequirement::Forbidden {
|
||||
reason: PROMPT_CONFLICT_REASON.to_string(),
|
||||
}
|
||||
} else {
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: derive_prompt_reason(command, &evaluation),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_prompt_rules(
|
||||
&evaluation.matched_rules,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
Decision::Allow => ExecApprovalRequirement::Skip {
|
||||
// Bypass sandbox if execpolicy allows the command
|
||||
bypass_sandbox: evaluation.matched_rules.iter().any(|rule_match| {
|
||||
is_policy_match(rule_match) && rule_match.decision() == Decision::Allow
|
||||
}),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_allow_rules(&evaluation.matched_rules)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn append_amendment_and_update(
|
||||
&self,
|
||||
codex_home: &Path,
|
||||
amendment: &ExecPolicyAmendment,
|
||||
) -> Result<(), ExecPolicyUpdateError> {
|
||||
let policy_path = default_policy_path(codex_home);
|
||||
let prefix = amendment.command.clone();
|
||||
spawn_blocking({
|
||||
let policy_path = policy_path.clone();
|
||||
let prefix = prefix.clone();
|
||||
move || blocking_append_allow_prefix_rule(&policy_path, &prefix)
|
||||
})
|
||||
.await
|
||||
.map_err(|source| ExecPolicyUpdateError::JoinBlockingTask { source })?
|
||||
.map_err(|source| ExecPolicyUpdateError::AppendRule {
|
||||
path: policy_path,
|
||||
source,
|
||||
})?;
|
||||
|
||||
let mut updated_policy = self.current().as_ref().clone();
|
||||
updated_policy.add_prefix_rule(&prefix, Decision::Allow)?;
|
||||
self.policy.store(Arc::new(updated_policy));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ExecPolicyManager {
|
||||
fn default() -> Self {
|
||||
Self::new(Arc::new(Policy::empty()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_exec_policy_for_features(
|
||||
features: &Features,
|
||||
codex_home: &Path,
|
||||
config_stack: &ConfigLayerStack,
|
||||
) -> Result<Policy, ExecPolicyError> {
|
||||
if !features.enabled(Feature::ExecPolicy) {
|
||||
Ok(Policy::empty())
|
||||
} else {
|
||||
load_exec_policy(codex_home).await
|
||||
load_exec_policy(config_stack).await
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_exec_policy(codex_home: &Path) -> Result<Policy, ExecPolicyError> {
|
||||
let policy_dir = codex_home.join(RULES_DIR_NAME);
|
||||
let policy_paths = collect_policy_files(&policy_dir).await?;
|
||||
pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result<Policy, ExecPolicyError> {
|
||||
// Iterate the layers in increasing order of precedence, adding the *.rules
|
||||
// from each layer, so that higher-precedence layers can override
|
||||
// rules defined in lower-precedence ones.
|
||||
let mut policy_paths = Vec::new();
|
||||
for layer in config_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst) {
|
||||
if let Some(config_folder) = layer.config_folder() {
|
||||
#[expect(clippy::expect_used)]
|
||||
let policy_dir = config_folder.join(RULES_DIR_NAME).expect("safe join");
|
||||
let layer_policy_paths = collect_policy_files(&policy_dir).await?;
|
||||
policy_paths.extend(layer_policy_paths);
|
||||
}
|
||||
}
|
||||
|
||||
let mut parser = PolicyParser::new();
|
||||
for policy_path in &policy_paths {
|
||||
@@ -114,46 +237,15 @@ pub async fn load_exec_policy(codex_home: &Path) -> Result<Policy, ExecPolicyErr
|
||||
}
|
||||
|
||||
let policy = parser.build();
|
||||
tracing::debug!(
|
||||
"loaded execpolicy from {} files in {}",
|
||||
policy_paths.len(),
|
||||
policy_dir.display()
|
||||
);
|
||||
tracing::debug!("loaded execpolicy from {} files", policy_paths.len());
|
||||
|
||||
Ok(policy)
|
||||
}
|
||||
|
||||
pub(crate) fn default_policy_path(codex_home: &Path) -> PathBuf {
|
||||
fn default_policy_path(codex_home: &Path) -> PathBuf {
|
||||
codex_home.join(RULES_DIR_NAME).join(DEFAULT_POLICY_FILE)
|
||||
}
|
||||
|
||||
pub(crate) async fn append_execpolicy_amendment_and_update(
|
||||
codex_home: &Path,
|
||||
current_policy: &Arc<RwLock<Policy>>,
|
||||
prefix: &[String],
|
||||
) -> Result<(), ExecPolicyUpdateError> {
|
||||
let policy_path = default_policy_path(codex_home);
|
||||
let prefix = prefix.to_vec();
|
||||
spawn_blocking({
|
||||
let policy_path = policy_path.clone();
|
||||
let prefix = prefix.clone();
|
||||
move || blocking_append_allow_prefix_rule(&policy_path, &prefix)
|
||||
})
|
||||
.await
|
||||
.map_err(|source| ExecPolicyUpdateError::JoinBlockingTask { source })?
|
||||
.map_err(|source| ExecPolicyUpdateError::AppendRule {
|
||||
path: policy_path,
|
||||
source,
|
||||
})?;
|
||||
|
||||
current_policy
|
||||
.write()
|
||||
.await
|
||||
.add_prefix_rule(&prefix, Decision::Allow)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Derive a proposed execpolicy amendment when a command requires user approval
|
||||
/// - If any execpolicy rule prompts, return None, because an amendment would not skip that policy requirement.
|
||||
/// - Otherwise return the first heuristics Prompt.
|
||||
@@ -206,71 +298,73 @@ fn try_derive_execpolicy_amendment_for_allow_rules(
|
||||
})
|
||||
}
|
||||
|
||||
/// Only return PROMPT_REASON when an execpolicy rule drove the prompt decision.
|
||||
fn derive_prompt_reason(evaluation: &Evaluation) -> Option<String> {
|
||||
evaluation.matched_rules.iter().find_map(|rule_match| {
|
||||
if is_policy_match(rule_match) && rule_match.decision() == Decision::Prompt {
|
||||
Some(PROMPT_REASON.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
/// Only return a reason when a policy rule drove the prompt decision.
|
||||
fn derive_prompt_reason(command_args: &[String], evaluation: &Evaluation) -> Option<String> {
|
||||
let command = render_shlex_command(command_args);
|
||||
|
||||
pub(crate) async fn create_exec_approval_requirement_for_command(
|
||||
exec_policy: &Arc<RwLock<Policy>>,
|
||||
features: &Features,
|
||||
command: &[String],
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_permissions: SandboxPermissions,
|
||||
) -> ExecApprovalRequirement {
|
||||
let commands = parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]);
|
||||
let heuristics_fallback = |cmd: &[String]| {
|
||||
if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) {
|
||||
Decision::Prompt
|
||||
} else {
|
||||
Decision::Allow
|
||||
}
|
||||
};
|
||||
let policy = exec_policy.read().await;
|
||||
let evaluation = policy.check_multiple(commands.iter(), &heuristics_fallback);
|
||||
let most_specific_prompt = evaluation
|
||||
.matched_rules
|
||||
.iter()
|
||||
.filter_map(|rule_match| match rule_match {
|
||||
RuleMatch::PrefixRuleMatch {
|
||||
matched_prefix,
|
||||
decision: Decision::Prompt,
|
||||
justification,
|
||||
..
|
||||
} => Some((matched_prefix.len(), justification.as_deref())),
|
||||
_ => None,
|
||||
})
|
||||
.max_by_key(|(matched_prefix_len, _)| *matched_prefix_len);
|
||||
|
||||
match evaluation.decision {
|
||||
Decision::Forbidden => ExecApprovalRequirement::Forbidden {
|
||||
reason: FORBIDDEN_REASON.to_string(),
|
||||
},
|
||||
Decision::Prompt => {
|
||||
if matches!(approval_policy, AskForApproval::Never) {
|
||||
ExecApprovalRequirement::Forbidden {
|
||||
reason: PROMPT_CONFLICT_REASON.to_string(),
|
||||
}
|
||||
} else {
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: derive_prompt_reason(&evaluation),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_prompt_rules(&evaluation.matched_rules)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
match most_specific_prompt {
|
||||
Some((_matched_prefix_len, Some(justification))) => {
|
||||
Some(format!("`{command}` requires approval: {justification}"))
|
||||
}
|
||||
Decision::Allow => ExecApprovalRequirement::Skip {
|
||||
// Bypass sandbox if execpolicy allows the command
|
||||
bypass_sandbox: evaluation.matched_rules.iter().any(|rule_match| {
|
||||
is_policy_match(rule_match) && rule_match.decision() == Decision::Allow
|
||||
}),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_allow_rules(&evaluation.matched_rules)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
Some((_matched_prefix_len, None)) => {
|
||||
Some(format!("`{command}` requires approval by policy"))
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn collect_policy_files(dir: &Path) -> Result<Vec<PathBuf>, ExecPolicyError> {
|
||||
fn render_shlex_command(args: &[String]) -> String {
|
||||
shlex_try_join(args.iter().map(String::as_str)).unwrap_or_else(|_| args.join(" "))
|
||||
}
|
||||
|
||||
/// Derive a string explaining why the command was forbidden. If `justification`
|
||||
/// is set by the user, this can contain instructions with recommended
|
||||
/// alternatives, for example.
|
||||
fn derive_forbidden_reason(command_args: &[String], evaluation: &Evaluation) -> String {
|
||||
let command = render_shlex_command(command_args);
|
||||
|
||||
let most_specific_forbidden = evaluation
|
||||
.matched_rules
|
||||
.iter()
|
||||
.filter_map(|rule_match| match rule_match {
|
||||
RuleMatch::PrefixRuleMatch {
|
||||
matched_prefix,
|
||||
decision: Decision::Forbidden,
|
||||
justification,
|
||||
..
|
||||
} => Some((matched_prefix, justification.as_deref())),
|
||||
_ => None,
|
||||
})
|
||||
.max_by_key(|(matched_prefix, _)| matched_prefix.len());
|
||||
|
||||
match most_specific_forbidden {
|
||||
Some((_matched_prefix, Some(justification))) => {
|
||||
format!("`{command}` rejected: {justification}")
|
||||
}
|
||||
Some((matched_prefix, None)) => {
|
||||
let prefix = render_shlex_command(matched_prefix);
|
||||
format!("`{command}` rejected: policy forbids commands starting with `{prefix}`")
|
||||
}
|
||||
None => format!("`{command}` rejected: blocked by policy"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn collect_policy_files(dir: impl AsRef<Path>) -> Result<Vec<PathBuf>, ExecPolicyError> {
|
||||
let dir = dir.as_ref();
|
||||
let mut read_dir = match fs::read_dir(dir).await {
|
||||
Ok(read_dir) => read_dir,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => return Ok(Vec::new()),
|
||||
@@ -313,30 +407,54 @@ async fn collect_policy_files(dir: &Path) -> Result<Vec<PathBuf>, ExecPolicyErro
|
||||
|
||||
policy_paths.sort();
|
||||
|
||||
tracing::debug!(
|
||||
"loaded {} .rules files in {}",
|
||||
policy_paths.len(),
|
||||
dir.display()
|
||||
);
|
||||
Ok(policy_paths)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_loader::ConfigLayerEntry;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tempfile::tempdir;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
fn config_stack_for_dot_codex_folder(dot_codex_folder: &Path) -> ConfigLayerStack {
|
||||
let dot_codex_folder = AbsolutePathBuf::from_absolute_path(dot_codex_folder)
|
||||
.expect("absolute dot_codex_folder");
|
||||
let layer = ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project { dot_codex_folder },
|
||||
TomlValue::Table(Default::default()),
|
||||
);
|
||||
ConfigLayerStack::new(vec![layer], ConfigRequirements::default()).expect("ConfigLayerStack")
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn returns_empty_policy_when_feature_disabled() {
|
||||
let mut features = Features::with_defaults();
|
||||
features.disable(Feature::ExecPolicy);
|
||||
let temp_dir = tempdir().expect("create temp dir");
|
||||
let config_stack = config_stack_for_dot_codex_folder(temp_dir.path());
|
||||
|
||||
let policy = load_exec_policy_for_features(&features, temp_dir.path())
|
||||
let manager = ExecPolicyManager::load(&features, &config_stack)
|
||||
.await
|
||||
.expect("policy result");
|
||||
.expect("manager result");
|
||||
let policy = manager.current();
|
||||
|
||||
let commands = [vec!["rm".to_string()]];
|
||||
assert_eq!(
|
||||
@@ -367,6 +485,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn loads_policies_from_policy_subdirectory() {
|
||||
let temp_dir = tempdir().expect("create temp dir");
|
||||
let config_stack = config_stack_for_dot_codex_folder(temp_dir.path());
|
||||
let policy_dir = temp_dir.path().join(RULES_DIR_NAME);
|
||||
fs::create_dir_all(&policy_dir).expect("create policy dir");
|
||||
fs::write(
|
||||
@@ -375,7 +494,7 @@ mod tests {
|
||||
)
|
||||
.expect("write policy file");
|
||||
|
||||
let policy = load_exec_policy(temp_dir.path())
|
||||
let policy = load_exec_policy(&config_stack)
|
||||
.await
|
||||
.expect("policy result");
|
||||
let command = [vec!["rm".to_string()]];
|
||||
@@ -384,7 +503,8 @@ mod tests {
|
||||
decision: Decision::Forbidden,
|
||||
matched_rules: vec![RuleMatch::PrefixRuleMatch {
|
||||
matched_prefix: vec!["rm".to_string()],
|
||||
decision: Decision::Forbidden
|
||||
decision: Decision::Forbidden,
|
||||
justification: None,
|
||||
}],
|
||||
},
|
||||
policy.check_multiple(command.iter(), &|_| Decision::Allow)
|
||||
@@ -394,13 +514,14 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn ignores_policies_outside_policy_dir() {
|
||||
let temp_dir = tempdir().expect("create temp dir");
|
||||
let config_stack = config_stack_for_dot_codex_folder(temp_dir.path());
|
||||
fs::write(
|
||||
temp_dir.path().join("root.rules"),
|
||||
r#"prefix_rule(pattern=["ls"], decision="prompt")"#,
|
||||
)
|
||||
.expect("write policy file");
|
||||
|
||||
let policy = load_exec_policy(temp_dir.path())
|
||||
let policy = load_exec_policy(&config_stack)
|
||||
.await
|
||||
.expect("policy result");
|
||||
let command = [vec!["ls".to_string()]];
|
||||
@@ -416,6 +537,71 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn loads_policies_from_multiple_config_layers() -> anyhow::Result<()> {
|
||||
let user_dir = tempdir()?;
|
||||
let project_dir = tempdir()?;
|
||||
|
||||
let user_policy_dir = user_dir.path().join(RULES_DIR_NAME);
|
||||
fs::create_dir_all(&user_policy_dir)?;
|
||||
fs::write(
|
||||
user_policy_dir.join("user.rules"),
|
||||
r#"prefix_rule(pattern=["rm"], decision="forbidden")"#,
|
||||
)?;
|
||||
|
||||
let project_policy_dir = project_dir.path().join(RULES_DIR_NAME);
|
||||
fs::create_dir_all(&project_policy_dir)?;
|
||||
fs::write(
|
||||
project_policy_dir.join("project.rules"),
|
||||
r#"prefix_rule(pattern=["ls"], decision="prompt")"#,
|
||||
)?;
|
||||
|
||||
let user_config_toml =
|
||||
AbsolutePathBuf::from_absolute_path(user_dir.path().join("config.toml"))?;
|
||||
let project_dot_codex_folder = AbsolutePathBuf::from_absolute_path(project_dir.path())?;
|
||||
let layers = vec![
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User {
|
||||
file: user_config_toml,
|
||||
},
|
||||
TomlValue::Table(Default::default()),
|
||||
),
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: project_dot_codex_folder,
|
||||
},
|
||||
TomlValue::Table(Default::default()),
|
||||
),
|
||||
];
|
||||
let config_stack = ConfigLayerStack::new(layers, ConfigRequirements::default())?;
|
||||
|
||||
let policy = load_exec_policy(&config_stack).await?;
|
||||
|
||||
assert_eq!(
|
||||
Evaluation {
|
||||
decision: Decision::Forbidden,
|
||||
matched_rules: vec![RuleMatch::PrefixRuleMatch {
|
||||
matched_prefix: vec!["rm".to_string()],
|
||||
decision: Decision::Forbidden,
|
||||
justification: None,
|
||||
}],
|
||||
},
|
||||
policy.check_multiple([vec!["rm".to_string()]].iter(), &|_| Decision::Allow)
|
||||
);
|
||||
assert_eq!(
|
||||
Evaluation {
|
||||
decision: Decision::Prompt,
|
||||
matched_rules: vec![RuleMatch::PrefixRuleMatch {
|
||||
matched_prefix: vec!["ls".to_string()],
|
||||
decision: Decision::Prompt,
|
||||
justification: None,
|
||||
}],
|
||||
},
|
||||
policy.check_multiple([vec!["ls".to_string()]].iter(), &|_| Decision::Allow)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn evaluates_bash_lc_inner_commands() {
|
||||
let policy_src = r#"
|
||||
@@ -425,28 +611,67 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let policy = Arc::new(parser.build());
|
||||
|
||||
let forbidden_script = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
"rm -rf /tmp".to_string(),
|
||||
"rm -rf /some/important/folder".to_string(),
|
||||
];
|
||||
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&forbidden_script,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&forbidden_script,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
ExecApprovalRequirement::Forbidden {
|
||||
reason: FORBIDDEN_REASON.to_string()
|
||||
reason: "`bash -lc 'rm -rf /some/important/folder'` rejected: policy forbids commands starting with `rm`".to_string()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn justification_is_included_in_forbidden_exec_approval_requirement() {
|
||||
let policy_src = r#"
|
||||
prefix_rule(
|
||||
pattern=["rm"],
|
||||
decision="forbidden",
|
||||
justification="destructive command",
|
||||
)
|
||||
"#;
|
||||
let mut parser = PolicyParser::new();
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&[
|
||||
"rm".to_string(),
|
||||
"-rf".to_string(),
|
||||
"/some/important/folder".to_string(),
|
||||
],
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
ExecApprovalRequirement::Forbidden {
|
||||
reason: "`rm -rf /some/important/folder` rejected: destructive command".to_string()
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -458,23 +683,24 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let policy = Arc::new(parser.build());
|
||||
let command = vec!["rm".to_string()];
|
||||
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: Some(PROMPT_REASON.to_string()),
|
||||
reason: Some("`rm` requires approval by policy".to_string()),
|
||||
proposed_execpolicy_amendment: None,
|
||||
}
|
||||
);
|
||||
@@ -487,18 +713,19 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let policy = Arc::new(parser.build());
|
||||
let command = vec!["rm".to_string()];
|
||||
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::Never,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::Never,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -512,16 +739,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
async fn exec_approval_requirement_falls_back_to_heuristics() {
|
||||
let command = vec!["cargo".to_string(), "build".to_string()];
|
||||
|
||||
let empty_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&empty_policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -539,7 +766,7 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let policy = Arc::new(parser.build());
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
@@ -547,15 +774,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
ExecPolicyManager::new(policy)
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: None,
|
||||
proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![
|
||||
@@ -568,14 +795,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
#[tokio::test]
|
||||
async fn append_execpolicy_amendment_updates_policy_and_file() {
|
||||
let codex_home = tempdir().expect("create temp dir");
|
||||
let current_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
let prefix = vec!["echo".to_string(), "hello".to_string()];
|
||||
let manager = ExecPolicyManager::default();
|
||||
|
||||
append_execpolicy_amendment_and_update(codex_home.path(), ¤t_policy, &prefix)
|
||||
manager
|
||||
.append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(prefix))
|
||||
.await
|
||||
.expect("update policy");
|
||||
let updated_policy = manager.current();
|
||||
|
||||
let evaluation = current_policy.read().await.check(
|
||||
let evaluation = updated_policy.check(
|
||||
&["echo".to_string(), "hello".to_string(), "world".to_string()],
|
||||
&|_| Decision::Allow,
|
||||
);
|
||||
@@ -599,10 +828,11 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
#[tokio::test]
|
||||
async fn append_execpolicy_amendment_rejects_empty_prefix() {
|
||||
let codex_home = tempdir().expect("create temp dir");
|
||||
let current_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
let manager = ExecPolicyManager::default();
|
||||
|
||||
let result =
|
||||
append_execpolicy_amendment_and_update(codex_home.path(), ¤t_policy, &[]).await;
|
||||
let result = manager
|
||||
.append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(vec![]))
|
||||
.await;
|
||||
|
||||
assert!(matches!(
|
||||
result,
|
||||
@@ -617,16 +847,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
async fn proposed_execpolicy_amendment_is_present_for_single_command_without_policy_match() {
|
||||
let command = vec!["cargo".to_string(), "build".to_string()];
|
||||
|
||||
let empty_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&empty_policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -644,15 +874,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
let mut features = Features::with_defaults();
|
||||
features.disable(Feature::ExecPolicy);
|
||||
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&Arc::new(RwLock::new(Policy::empty())),
|
||||
&features,
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&features,
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -670,23 +901,24 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let policy = Arc::new(parser.build());
|
||||
let command = vec!["rm".to_string()];
|
||||
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: Some(PROMPT_REASON.to_string()),
|
||||
reason: Some("`rm` requires approval by policy".to_string()),
|
||||
proposed_execpolicy_amendment: None,
|
||||
}
|
||||
);
|
||||
@@ -699,15 +931,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
"-lc".to_string(),
|
||||
"cargo build && echo ok".to_string(),
|
||||
];
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&Arc::new(RwLock::new(Policy::empty())),
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -728,7 +961,7 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let policy = Arc::new(parser.build());
|
||||
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
@@ -737,15 +970,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
ExecPolicyManager::new(policy)
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: None,
|
||||
proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![
|
||||
@@ -759,15 +992,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
async fn proposed_execpolicy_amendment_is_present_when_heuristics_allow() {
|
||||
let command = vec!["echo".to_string(), "safe".to_string()];
|
||||
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&Arc::new(RwLock::new(Policy::empty())),
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -785,18 +1019,19 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let policy = Arc::new(parser.build());
|
||||
let command = vec!["echo".to_string(), "safe".to_string()];
|
||||
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config::profile::ConfigProfile;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
@@ -91,6 +92,8 @@ pub enum Feature {
|
||||
Tui2,
|
||||
/// Enable discovery and injection of skills.
|
||||
Skills,
|
||||
/// Enforce UTF8 output in Powershell.
|
||||
PowershellUtf8,
|
||||
}
|
||||
|
||||
impl Feature {
|
||||
@@ -252,6 +255,10 @@ impl Features {
|
||||
|
||||
features
|
||||
}
|
||||
|
||||
pub fn enabled_features(&self) -> Vec<Feature> {
|
||||
self.enabled.iter().copied().collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Keys accepted in `[features]` tables.
|
||||
@@ -270,7 +277,7 @@ pub fn is_known_feature_key(key: &str) -> bool {
|
||||
}
|
||||
|
||||
/// Deserializable features table for TOML.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct FeaturesToml {
|
||||
#[serde(flatten)]
|
||||
pub entries: BTreeMap<String, bool>,
|
||||
@@ -386,6 +393,12 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: true,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::PowershellUtf8,
|
||||
key: "powershell_utf8",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::Tui2,
|
||||
key: "tui2",
|
||||
|
||||
@@ -79,26 +79,60 @@ pub const DEFAULT_STARTUP_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
/// Default timeout for individual tool calls.
|
||||
const DEFAULT_TOOL_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
/// The Responses API requires tool names to match `^[a-zA-Z0-9_-]+$`.
|
||||
/// MCP server/tool names are user-controlled, so sanitize the fully-qualified
|
||||
/// name we expose to the model by replacing any disallowed character with `_`.
|
||||
fn sanitize_responses_api_tool_name(name: &str) -> String {
|
||||
let mut sanitized = String::with_capacity(name.len());
|
||||
for c in name.chars() {
|
||||
if c.is_ascii_alphanumeric() || c == '_' || c == '-' {
|
||||
sanitized.push(c);
|
||||
} else {
|
||||
sanitized.push('_');
|
||||
}
|
||||
}
|
||||
|
||||
if sanitized.is_empty() {
|
||||
"_".to_string()
|
||||
} else {
|
||||
sanitized
|
||||
}
|
||||
}
|
||||
|
||||
fn sha1_hex(s: &str) -> String {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(s.as_bytes());
|
||||
let sha1 = hasher.finalize();
|
||||
format!("{sha1:x}")
|
||||
}
|
||||
|
||||
fn qualify_tools<I>(tools: I) -> HashMap<String, ToolInfo>
|
||||
where
|
||||
I: IntoIterator<Item = ToolInfo>,
|
||||
{
|
||||
let mut used_names = HashSet::new();
|
||||
let mut seen_raw_names = HashSet::new();
|
||||
let mut qualified_tools = HashMap::new();
|
||||
for tool in tools {
|
||||
let mut qualified_name = format!(
|
||||
let qualified_name_raw = format!(
|
||||
"mcp{}{}{}{}",
|
||||
MCP_TOOL_NAME_DELIMITER, tool.server_name, MCP_TOOL_NAME_DELIMITER, tool.tool_name
|
||||
);
|
||||
if !seen_raw_names.insert(qualified_name_raw.clone()) {
|
||||
warn!("skipping duplicated tool {}", qualified_name_raw);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Start from a "pretty" name (sanitized), then deterministically disambiguate on
|
||||
// collisions by appending a hash of the *raw* (unsanitized) qualified name. This
|
||||
// ensures tools like `foo.bar` and `foo_bar` don't collapse to the same key.
|
||||
let mut qualified_name = sanitize_responses_api_tool_name(&qualified_name_raw);
|
||||
|
||||
// Enforce length constraints early; use the raw name for the hash input so the
|
||||
// output remains stable even when sanitization changes.
|
||||
if qualified_name.len() > MAX_TOOL_NAME_LENGTH {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(qualified_name.as_bytes());
|
||||
let sha1 = hasher.finalize();
|
||||
let sha1_str = format!("{sha1:x}");
|
||||
|
||||
// Truncate to make room for the hash suffix
|
||||
let sha1_str = sha1_hex(&qualified_name_raw);
|
||||
let prefix_len = MAX_TOOL_NAME_LENGTH - sha1_str.len();
|
||||
|
||||
qualified_name = format!("{}{}", &qualified_name[..prefix_len], sha1_str);
|
||||
}
|
||||
|
||||
@@ -1035,6 +1069,28 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_qualify_tools_sanitizes_invalid_characters() {
|
||||
let tools = vec![create_test_tool("server.one", "tool.two")];
|
||||
|
||||
let qualified_tools = qualify_tools(tools);
|
||||
|
||||
assert_eq!(qualified_tools.len(), 1);
|
||||
let (qualified_name, tool) = qualified_tools.into_iter().next().expect("one tool");
|
||||
assert_eq!(qualified_name, "mcp__server_one__tool_two");
|
||||
|
||||
// The key is sanitized for OpenAI, but we keep original parts for the actual MCP call.
|
||||
assert_eq!(tool.server_name, "server.one");
|
||||
assert_eq!(tool.tool_name, "tool.two");
|
||||
|
||||
assert!(
|
||||
qualified_name
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || c == '_' || c == '-'),
|
||||
"qualified name must be Responses API compatible: {qualified_name:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_filter_allows_by_default() {
|
||||
let filter = ToolFilter::default();
|
||||
|
||||
@@ -77,7 +77,7 @@ impl ModelsManager {
|
||||
}
|
||||
|
||||
/// Fetch the latest remote models, using the on-disk cache when still fresh.
|
||||
pub async fn refresh_available_models(&self, config: &Config) -> CoreResult<()> {
|
||||
pub async fn refresh_available_models_with_cache(&self, config: &Config) -> CoreResult<()> {
|
||||
if !config.features.enabled(Feature::RemoteModels)
|
||||
|| self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey)
|
||||
{
|
||||
@@ -86,7 +86,17 @@ impl ModelsManager {
|
||||
if self.try_load_cache().await {
|
||||
return Ok(());
|
||||
}
|
||||
self.refresh_available_models_no_cache(config.features.enabled(Feature::RemoteModels))
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn refresh_available_models_no_cache(
|
||||
&self,
|
||||
remote_models_feature: bool,
|
||||
) -> CoreResult<()> {
|
||||
if !remote_models_feature || self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey) {
|
||||
return Ok(());
|
||||
}
|
||||
let auth = self.auth_manager.auth();
|
||||
let api_provider = self.provider.to_api_provider(Some(AuthMode::ChatGPT))?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.provider).await?;
|
||||
@@ -94,13 +104,11 @@ impl ModelsManager {
|
||||
let client = ModelsClient::new(transport, api_provider, api_auth);
|
||||
|
||||
let client_version = format_client_version_to_whole();
|
||||
let ModelsResponse { models, etag } = client
|
||||
let (models, etag) = client
|
||||
.list_models(&client_version, HeaderMap::new())
|
||||
.await
|
||||
.map_err(map_api_error)?;
|
||||
|
||||
let etag = (!etag.is_empty()).then_some(etag);
|
||||
|
||||
self.apply_remote_models(models.clone()).await;
|
||||
*self.etag.write().await = etag.clone();
|
||||
self.persist_cache(&models, etag).await;
|
||||
@@ -108,7 +116,7 @@ impl ModelsManager {
|
||||
}
|
||||
|
||||
pub async fn list_models(&self, config: &Config) -> Vec<ModelPreset> {
|
||||
if let Err(err) = self.refresh_available_models(config).await {
|
||||
if let Err(err) = self.refresh_available_models_with_cache(config).await {
|
||||
error!("failed to refresh available models: {err}");
|
||||
}
|
||||
let remote_models = self.remote_models(config).await;
|
||||
@@ -135,7 +143,7 @@ impl ModelsManager {
|
||||
if let Some(model) = model.as_ref() {
|
||||
return model.to_string();
|
||||
}
|
||||
if let Err(err) = self.refresh_available_models(config).await {
|
||||
if let Err(err) = self.refresh_available_models_with_cache(config).await {
|
||||
error!("failed to refresh available models: {err}");
|
||||
}
|
||||
// if codex-auto-balanced exists & signed in with chatgpt mode, return it, otherwise return the default model
|
||||
@@ -153,6 +161,18 @@ impl ModelsManager {
|
||||
}
|
||||
OPENAI_DEFAULT_API_MODEL.to_string()
|
||||
}
|
||||
pub async fn refresh_if_new_etag(&self, etag: String, remote_models_feature: bool) {
|
||||
let current_etag = self.get_etag().await;
|
||||
if current_etag.clone().is_some() && current_etag.as_deref() == Some(etag.as_str()) {
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self
|
||||
.refresh_available_models_no_cache(remote_models_feature)
|
||||
.await
|
||||
{
|
||||
error!("failed to refresh available models: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
pub fn get_model_offline(model: Option<&str>) -> String {
|
||||
@@ -165,6 +185,10 @@ impl ModelsManager {
|
||||
Self::find_family_for_model(model).with_config_overrides(config)
|
||||
}
|
||||
|
||||
async fn get_etag(&self) -> Option<String> {
|
||||
self.etag.read().await.clone()
|
||||
}
|
||||
|
||||
/// Replace the cached remote models and rebuild the derived presets list.
|
||||
async fn apply_remote_models(&self, models: Vec<ModelInfo>) {
|
||||
*self.remote_models.write().await = models;
|
||||
@@ -288,26 +312,14 @@ impl ModelsManager {
|
||||
|
||||
/// Convert a client version string to a whole version string (e.g. "1.2.3-alpha.4" -> "1.2.3")
|
||||
fn format_client_version_to_whole() -> String {
|
||||
format_client_version_from_parts(
|
||||
format!(
|
||||
"{}.{}.{}",
|
||||
env!("CARGO_PKG_VERSION_MAJOR"),
|
||||
env!("CARGO_PKG_VERSION_MINOR"),
|
||||
env!("CARGO_PKG_VERSION_PATCH"),
|
||||
env!("CARGO_PKG_VERSION_PATCH")
|
||||
)
|
||||
}
|
||||
|
||||
fn format_client_version_from_parts(major: &str, minor: &str, patch: &str) -> String {
|
||||
const DEV_VERSION: &str = "0.0.0";
|
||||
const FALLBACK_VERSION: &str = "99.99.99";
|
||||
|
||||
let normalized = format!("{major}.{minor}.{patch}");
|
||||
|
||||
if normalized == DEV_VERSION {
|
||||
FALLBACK_VERSION.to_string()
|
||||
} else {
|
||||
normalized
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::cache::ModelsCache;
|
||||
@@ -354,7 +366,6 @@ mod tests {
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.expect("valid model")
|
||||
@@ -389,7 +400,6 @@ mod tests {
|
||||
&server,
|
||||
ModelsResponse {
|
||||
models: remote_models.clone(),
|
||||
etag: String::new(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -407,7 +417,7 @@ mod tests {
|
||||
let manager = ModelsManager::with_provider(auth_manager, provider);
|
||||
|
||||
manager
|
||||
.refresh_available_models(&config)
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
.expect("refresh succeeds");
|
||||
let cached_remote = manager.remote_models(&config).await;
|
||||
@@ -446,7 +456,6 @@ mod tests {
|
||||
&server,
|
||||
ModelsResponse {
|
||||
models: remote_models.clone(),
|
||||
etag: String::new(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -467,7 +476,7 @@ mod tests {
|
||||
let manager = ModelsManager::with_provider(auth_manager, provider);
|
||||
|
||||
manager
|
||||
.refresh_available_models(&config)
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
.expect("first refresh succeeds");
|
||||
assert_eq!(
|
||||
@@ -478,7 +487,7 @@ mod tests {
|
||||
|
||||
// Second call should read from cache and avoid the network.
|
||||
manager
|
||||
.refresh_available_models(&config)
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
.expect("cached refresh succeeds");
|
||||
assert_eq!(
|
||||
@@ -501,7 +510,6 @@ mod tests {
|
||||
&server,
|
||||
ModelsResponse {
|
||||
models: initial_models.clone(),
|
||||
etag: String::new(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -522,7 +530,7 @@ mod tests {
|
||||
let manager = ModelsManager::with_provider(auth_manager, provider);
|
||||
|
||||
manager
|
||||
.refresh_available_models(&config)
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
.expect("initial refresh succeeds");
|
||||
|
||||
@@ -542,13 +550,12 @@ mod tests {
|
||||
&server,
|
||||
ModelsResponse {
|
||||
models: updated_models.clone(),
|
||||
etag: String::new(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
manager
|
||||
.refresh_available_models(&config)
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
.expect("second refresh succeeds");
|
||||
assert_eq!(
|
||||
@@ -576,7 +583,6 @@ mod tests {
|
||||
&server,
|
||||
ModelsResponse {
|
||||
models: initial_models,
|
||||
etag: String::new(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -595,7 +601,7 @@ mod tests {
|
||||
manager.cache_ttl = Duration::ZERO;
|
||||
|
||||
manager
|
||||
.refresh_available_models(&config)
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
.expect("initial refresh succeeds");
|
||||
|
||||
@@ -605,13 +611,12 @@ mod tests {
|
||||
&server,
|
||||
ModelsResponse {
|
||||
models: refreshed_models,
|
||||
etag: String::new(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
manager
|
||||
.refresh_available_models(&config)
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
.expect("second refresh succeeds");
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ use codex_protocol::openai_models::ApplyPatchToolType;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
@@ -48,9 +47,6 @@ pub struct ModelFamily {
|
||||
// The reasoning effort to use for this model family when none is explicitly chosen.
|
||||
pub default_reasoning_effort: Option<ReasoningEffort>,
|
||||
|
||||
// Define if we need a special handling of reasoning summary
|
||||
pub reasoning_summary_format: ReasoningSummaryFormat,
|
||||
|
||||
/// Whether this model supports parallel tool calls when using the
|
||||
/// Responses API.
|
||||
pub supports_parallel_tool_calls: bool,
|
||||
@@ -88,9 +84,6 @@ impl ModelFamily {
|
||||
if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries {
|
||||
self.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
}
|
||||
if let Some(reasoning_summary_format) = config.model_reasoning_summary_format.as_ref() {
|
||||
self.reasoning_summary_format = reasoning_summary_format.clone();
|
||||
}
|
||||
if let Some(context_window) = config.model_context_window {
|
||||
self.context_window = Some(context_window);
|
||||
}
|
||||
@@ -117,7 +110,6 @@ impl ModelFamily {
|
||||
supported_reasoning_levels: _,
|
||||
shell_type,
|
||||
visibility: _,
|
||||
minimal_client_version: _,
|
||||
supported_in_api: _,
|
||||
priority: _,
|
||||
upgrade: _,
|
||||
@@ -129,7 +121,6 @@ impl ModelFamily {
|
||||
truncation_policy,
|
||||
supports_parallel_tool_calls,
|
||||
context_window,
|
||||
reasoning_summary_format,
|
||||
experimental_supported_tools,
|
||||
} = model;
|
||||
|
||||
@@ -145,7 +136,6 @@ impl ModelFamily {
|
||||
self.truncation_policy = truncation_policy.into();
|
||||
self.supports_parallel_tool_calls = supports_parallel_tool_calls;
|
||||
self.context_window = context_window;
|
||||
self.reasoning_summary_format = reasoning_summary_format;
|
||||
self.experimental_supported_tools = experimental_supported_tools;
|
||||
}
|
||||
|
||||
@@ -176,7 +166,6 @@ macro_rules! model_family {
|
||||
context_window: Some(CONTEXT_WINDOW_272K),
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
@@ -251,7 +240,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
experimental_supported_tools: vec![
|
||||
"grep_files".to_string(),
|
||||
@@ -271,7 +259,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -300,7 +287,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -313,7 +299,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -326,7 +311,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -342,7 +326,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -417,7 +400,6 @@ fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
@@ -434,7 +416,6 @@ fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
@@ -451,7 +432,6 @@ mod tests {
|
||||
}],
|
||||
shell_type: shell,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 1,
|
||||
upgrade: None,
|
||||
@@ -463,7 +443,6 @@ mod tests {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
@@ -527,7 +506,6 @@ mod tests {
|
||||
experimental_supported_tools: vec!["local".to_string()],
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
context_window: Some(100),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
);
|
||||
|
||||
let updated = family.with_remote_overrides(vec![ModelInfo {
|
||||
@@ -541,7 +519,6 @@ mod tests {
|
||||
}],
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 10,
|
||||
upgrade: None,
|
||||
@@ -553,7 +530,6 @@ mod tests {
|
||||
truncation_policy: TruncationPolicyConfig::tokens(2_000),
|
||||
supports_parallel_tool_calls: true,
|
||||
context_window: Some(400_000),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
experimental_supported_tools: vec!["alpha".to_string(), "beta".to_string()],
|
||||
}]);
|
||||
|
||||
@@ -572,10 +548,6 @@ mod tests {
|
||||
assert_eq!(updated.truncation_policy, TruncationPolicy::Tokens(2_000));
|
||||
assert!(updated.supports_parallel_tool_calls);
|
||||
assert_eq!(updated.context_window, Some(400_000));
|
||||
assert_eq!(
|
||||
updated.reasoning_summary_format,
|
||||
ReasoningSummaryFormat::Experimental
|
||||
);
|
||||
assert_eq!(
|
||||
updated.experimental_supported_tools,
|
||||
vec!["alpha".to_string(), "beta".to_string()]
|
||||
|
||||
@@ -8,6 +8,30 @@ use crate::shell::detect_shell_type;
|
||||
|
||||
const POWERSHELL_FLAGS: &[&str] = &["-nologo", "-noprofile", "-command", "-c"];
|
||||
|
||||
/// Prefixed command for powershell shell calls to force UTF-8 console output.
|
||||
pub(crate) const UTF8_OUTPUT_PREFIX: &str =
|
||||
"[Console]::OutputEncoding=[System.Text.Encoding]::UTF8;\n";
|
||||
|
||||
pub(crate) fn prefix_powershell_script_with_utf8(command: &[String]) -> Vec<String> {
|
||||
let Some((_, script)) = extract_powershell_command(command) else {
|
||||
return command.to_vec();
|
||||
};
|
||||
|
||||
let trimmed = script.trim_start();
|
||||
let script = if trimmed.starts_with(UTF8_OUTPUT_PREFIX) {
|
||||
script.to_string()
|
||||
} else {
|
||||
format!("{UTF8_OUTPUT_PREFIX}{script}")
|
||||
};
|
||||
|
||||
let mut command: Vec<String> = command[..(command.len() - 1)]
|
||||
.iter()
|
||||
.map(std::string::ToString::to_string)
|
||||
.collect();
|
||||
command.push(script);
|
||||
command
|
||||
}
|
||||
|
||||
/// Extract the PowerShell script body from an invocation such as:
|
||||
///
|
||||
/// - ["pwsh", "-NoProfile", "-Command", "Get-ChildItem -Recurse | Select-String foo"]
|
||||
@@ -22,7 +46,10 @@ pub fn extract_powershell_command(command: &[String]) -> Option<(&str, &str)> {
|
||||
}
|
||||
|
||||
let shell = &command[0];
|
||||
if detect_shell_type(&PathBuf::from(shell)) != Some(ShellType::PowerShell) {
|
||||
if !matches!(
|
||||
detect_shell_type(&PathBuf::from(shell)),
|
||||
Some(ShellType::PowerShell)
|
||||
) {
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -36,7 +63,7 @@ pub fn extract_powershell_command(command: &[String]) -> Option<(&str, &str)> {
|
||||
}
|
||||
if flag.eq_ignore_ascii_case("-Command") || flag.eq_ignore_ascii_case("-c") {
|
||||
let script = &command[i + 1];
|
||||
return Some((shell, script.as_str()));
|
||||
return Some((shell, script));
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
@@ -513,9 +513,9 @@ mod tests {
|
||||
)
|
||||
.unwrap_or_else(|_| cfg.codex_home.join("skills/pdf-processing/SKILL.md"));
|
||||
let expected_path_str = expected_path.to_string_lossy().replace('\\', "/");
|
||||
let usage_rules = "- Discovery: Available skills are listed in project docs and may also appear in a runtime \"## Skills\" section (name + description + file path). These are the sources of truth; skill bodies live on disk at the listed paths.\n- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.\n- Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback.\n- How to use a skill (progressive disclosure):\n 1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow.\n 2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything.\n 3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks.\n 4) If `assets/` or templates exist, reuse them instead of recreating from scratch.\n- Description as trigger: The YAML `description` in `SKILL.md` is the primary trigger signal; rely on it to decide applicability. If unsure, ask a brief clarification before proceeding.\n- Coordination and sequencing:\n - If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them.\n - Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why.\n- Context hygiene:\n - Keep context small: summarize long sections instead of pasting them; only load extra files when needed.\n - Avoid deeply nested references; prefer one-hop files explicitly linked from `SKILL.md`.\n - When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice.\n- Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue.";
|
||||
let usage_rules = "- Discovery: The list above is the skills available in this session (name + description + file path). Skill bodies live on disk at the listed paths.\n- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description shown above, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.\n- Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback.\n- How to use a skill (progressive disclosure):\n 1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow.\n 2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything.\n 3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks.\n 4) If `assets/` or templates exist, reuse them instead of recreating from scratch.\n- Coordination and sequencing:\n - If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them.\n - Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why.\n- Context hygiene:\n - Keep context small: summarize long sections instead of pasting them; only load extra files when needed.\n - Avoid deep reference-chasing: prefer opening only files directly linked from `SKILL.md` unless you're blocked.\n - When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice.\n- Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue.";
|
||||
let expected = format!(
|
||||
"base doc\n\n## Skills\nThese skills are discovered at startup from multiple local sources. Each entry includes a name, description, and file path so you can open the source for full instructions.\n- pdf-processing: extract from pdfs (file: {expected_path_str})\n{usage_rules}"
|
||||
"base doc\n\n## Skills\nA skill is a set of local instructions to follow that is stored in a `SKILL.md` file. Below is the list of skills that can be used. Each entry includes a name, description, and file path so you can open the source for full instructions when using a specific skill.\n### Available skills\n- pdf-processing: extract from pdfs (file: {expected_path_str})\n### How to use skills\n{usage_rules}"
|
||||
);
|
||||
assert_eq!(res, expected);
|
||||
}
|
||||
@@ -537,9 +537,9 @@ mod tests {
|
||||
dunce::canonicalize(cfg.codex_home.join("skills/linting/SKILL.md").as_path())
|
||||
.unwrap_or_else(|_| cfg.codex_home.join("skills/linting/SKILL.md"));
|
||||
let expected_path_str = expected_path.to_string_lossy().replace('\\', "/");
|
||||
let usage_rules = "- Discovery: Available skills are listed in project docs and may also appear in a runtime \"## Skills\" section (name + description + file path). These are the sources of truth; skill bodies live on disk at the listed paths.\n- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.\n- Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback.\n- How to use a skill (progressive disclosure):\n 1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow.\n 2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything.\n 3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks.\n 4) If `assets/` or templates exist, reuse them instead of recreating from scratch.\n- Description as trigger: The YAML `description` in `SKILL.md` is the primary trigger signal; rely on it to decide applicability. If unsure, ask a brief clarification before proceeding.\n- Coordination and sequencing:\n - If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them.\n - Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why.\n- Context hygiene:\n - Keep context small: summarize long sections instead of pasting them; only load extra files when needed.\n - Avoid deeply nested references; prefer one-hop files explicitly linked from `SKILL.md`.\n - When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice.\n- Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue.";
|
||||
let usage_rules = "- Discovery: The list above is the skills available in this session (name + description + file path). Skill bodies live on disk at the listed paths.\n- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description shown above, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.\n- Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback.\n- How to use a skill (progressive disclosure):\n 1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow.\n 2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything.\n 3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks.\n 4) If `assets/` or templates exist, reuse them instead of recreating from scratch.\n- Coordination and sequencing:\n - If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them.\n - Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why.\n- Context hygiene:\n - Keep context small: summarize long sections instead of pasting them; only load extra files when needed.\n - Avoid deep reference-chasing: prefer opening only files directly linked from `SKILL.md` unless you're blocked.\n - When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice.\n- Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue.";
|
||||
let expected = format!(
|
||||
"## Skills\nThese skills are discovered at startup from multiple local sources. Each entry includes a name, description, and file path so you can open the source for full instructions.\n- linting: run clippy (file: {expected_path_str})\n{usage_rules}"
|
||||
"## Skills\nA skill is a set of local instructions to follow that is stored in a `SKILL.md` file. Below is the list of skills that can be used. Each entry includes a name, description, and file path so you can open the source for full instructions when using a specific skill.\n### Available skills\n- linting: run clippy (file: {expected_path_str})\n### How to use skills\n{usage_rules}"
|
||||
);
|
||||
assert_eq!(res, expected);
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use crate::config::Config;
|
||||
use crate::git_info::resolve_root_git_project_for_trust;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::skills::model::SkillError;
|
||||
use crate::skills::model::SkillLoadOutcome;
|
||||
use crate::skills::model::SkillMetadata;
|
||||
use crate::skills::system::system_cache_root_dir;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_protocol::protocol::SkillScope;
|
||||
use dunce::canonicalize as normalize_path;
|
||||
use serde::Deserialize;
|
||||
@@ -32,8 +33,6 @@ struct SkillFrontmatterMetadata {
|
||||
|
||||
const SKILLS_FILENAME: &str = "SKILL.md";
|
||||
const SKILLS_DIR_NAME: &str = "skills";
|
||||
const REPO_ROOT_CONFIG_DIR_NAME: &str = ".codex";
|
||||
const ADMIN_SKILLS_ROOT: &str = "/etc/codex/skills";
|
||||
const MAX_NAME_LEN: usize = 64;
|
||||
const MAX_DESCRIPTION_LEN: usize = 1024;
|
||||
const MAX_SHORT_DESCRIPTION_LEN: usize = MAX_DESCRIPTION_LEN;
|
||||
@@ -88,86 +87,81 @@ where
|
||||
.skills
|
||||
.retain(|skill| seen.insert(skill.name.clone()));
|
||||
|
||||
outcome
|
||||
.skills
|
||||
.sort_by(|a, b| a.name.cmp(&b.name).then_with(|| a.path.cmp(&b.path)));
|
||||
|
||||
outcome
|
||||
}
|
||||
|
||||
pub(crate) fn user_skills_root(codex_home: &Path) -> SkillRoot {
|
||||
SkillRoot {
|
||||
path: codex_home.join(SKILLS_DIR_NAME),
|
||||
scope: SkillScope::User,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn system_skills_root(codex_home: &Path) -> SkillRoot {
|
||||
SkillRoot {
|
||||
path: system_cache_root_dir(codex_home),
|
||||
scope: SkillScope::System,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn admin_skills_root() -> SkillRoot {
|
||||
SkillRoot {
|
||||
path: PathBuf::from(ADMIN_SKILLS_ROOT),
|
||||
scope: SkillScope::Admin,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn repo_skills_root(cwd: &Path) -> Option<SkillRoot> {
|
||||
let base = if cwd.is_dir() { cwd } else { cwd.parent()? };
|
||||
let base = normalize_path(base).unwrap_or_else(|_| base.to_path_buf());
|
||||
|
||||
let repo_root =
|
||||
resolve_root_git_project_for_trust(&base).map(|root| normalize_path(&root).unwrap_or(root));
|
||||
|
||||
let scope = SkillScope::Repo;
|
||||
if let Some(repo_root) = repo_root.as_deref() {
|
||||
for dir in base.ancestors() {
|
||||
let skills_root = dir.join(REPO_ROOT_CONFIG_DIR_NAME).join(SKILLS_DIR_NAME);
|
||||
if skills_root.is_dir() {
|
||||
return Some(SkillRoot {
|
||||
path: skills_root,
|
||||
scope,
|
||||
});
|
||||
}
|
||||
|
||||
if dir == repo_root {
|
||||
break;
|
||||
}
|
||||
fn scope_rank(scope: SkillScope) -> u8 {
|
||||
// Higher-priority scopes first (matches dedupe priority order).
|
||||
match scope {
|
||||
SkillScope::Repo => 0,
|
||||
SkillScope::User => 1,
|
||||
SkillScope::System => 2,
|
||||
SkillScope::Admin => 3,
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
let skills_root = base.join(REPO_ROOT_CONFIG_DIR_NAME).join(SKILLS_DIR_NAME);
|
||||
skills_root.is_dir().then_some(SkillRoot {
|
||||
path: skills_root,
|
||||
scope,
|
||||
})
|
||||
outcome.skills.sort_by(|a, b| {
|
||||
scope_rank(a.scope)
|
||||
.cmp(&scope_rank(b.scope))
|
||||
.then_with(|| a.name.cmp(&b.name))
|
||||
.then_with(|| a.path.cmp(&b.path))
|
||||
});
|
||||
|
||||
outcome
|
||||
}
|
||||
|
||||
pub(crate) fn skill_roots_for_cwd(codex_home: &Path, cwd: &Path) -> Vec<SkillRoot> {
|
||||
fn skill_roots_from_layer_stack_inner(config_layer_stack: &ConfigLayerStack) -> Vec<SkillRoot> {
|
||||
let mut roots = Vec::new();
|
||||
|
||||
if let Some(repo_root) = repo_skills_root(cwd) {
|
||||
roots.push(repo_root);
|
||||
}
|
||||
for layer in config_layer_stack.layers_high_to_low() {
|
||||
let Some(config_folder) = layer.config_folder() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Load order matters: we dedupe by name, keeping the first occurrence.
|
||||
// Priority order: repo, user, system, then admin.
|
||||
roots.push(user_skills_root(codex_home));
|
||||
roots.push(system_skills_root(codex_home));
|
||||
if cfg!(unix) {
|
||||
roots.push(admin_skills_root());
|
||||
match &layer.name {
|
||||
ConfigLayerSource::Project { .. } => {
|
||||
roots.push(SkillRoot {
|
||||
path: config_folder.as_path().join(SKILLS_DIR_NAME),
|
||||
scope: SkillScope::Repo,
|
||||
});
|
||||
}
|
||||
ConfigLayerSource::User { .. } => {
|
||||
// `$CODEX_HOME/skills` (user-installed skills).
|
||||
roots.push(SkillRoot {
|
||||
path: config_folder.as_path().join(SKILLS_DIR_NAME),
|
||||
scope: SkillScope::User,
|
||||
});
|
||||
|
||||
// Embedded system skills are cached under `$CODEX_HOME/skills/.system` and are a
|
||||
// special case (not a config layer).
|
||||
roots.push(SkillRoot {
|
||||
path: system_cache_root_dir(config_folder.as_path()),
|
||||
scope: SkillScope::System,
|
||||
});
|
||||
}
|
||||
ConfigLayerSource::System { .. } => {
|
||||
// The system config layer lives under `/etc/codex/` on Unix, so treat
|
||||
// `/etc/codex/skills` as admin-scoped skills.
|
||||
roots.push(SkillRoot {
|
||||
path: config_folder.as_path().join(SKILLS_DIR_NAME),
|
||||
scope: SkillScope::Admin,
|
||||
});
|
||||
}
|
||||
ConfigLayerSource::Mdm { .. }
|
||||
| ConfigLayerSource::SessionFlags
|
||||
| ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. }
|
||||
| ConfigLayerSource::LegacyManagedConfigTomlFromMdm => {}
|
||||
}
|
||||
}
|
||||
|
||||
roots
|
||||
}
|
||||
|
||||
fn skill_roots(config: &Config) -> Vec<SkillRoot> {
|
||||
skill_roots_for_cwd(&config.codex_home, &config.cwd)
|
||||
skill_roots_from_layer_stack_inner(&config.config_layer_stack)
|
||||
}
|
||||
|
||||
pub(crate) fn skill_roots_from_layer_stack(
|
||||
config_layer_stack: &ConfigLayerStack,
|
||||
) -> Vec<SkillRoot> {
|
||||
skill_roots_from_layer_stack_inner(config_layer_stack)
|
||||
}
|
||||
|
||||
fn discover_skills_under_root(root: &Path, scope: SkillScope, outcome: &mut SkillLoadOutcome) {
|
||||
@@ -318,21 +312,91 @@ fn extract_frontmatter(contents: &str) -> Option<String> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config_loader::ConfigLayerEntry;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use codex_protocol::protocol::SkillScope;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use tempfile::TempDir;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
const REPO_ROOT_CONFIG_DIR_NAME: &str = ".codex";
|
||||
|
||||
async fn make_config(codex_home: &TempDir) -> Config {
|
||||
let mut config = ConfigBuilder::default()
|
||||
make_config_for_cwd(codex_home, codex_home.path().to_path_buf()).await
|
||||
}
|
||||
|
||||
async fn make_config_for_cwd(codex_home: &TempDir, cwd: PathBuf) -> Config {
|
||||
let harness_overrides = ConfigOverrides {
|
||||
cwd: Some(cwd),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.harness_overrides(harness_overrides)
|
||||
.build()
|
||||
.await
|
||||
.expect("defaults for test should always succeed");
|
||||
.expect("defaults for test should always succeed")
|
||||
}
|
||||
|
||||
config.cwd = codex_home.path().to_path_buf();
|
||||
config
|
||||
fn mark_as_git_repo(dir: &Path) {
|
||||
// Config/project-root discovery only checks for the presence of `.git` (file or dir),
|
||||
// so we can avoid shelling out to `git init` in tests.
|
||||
fs::write(dir.join(".git"), "gitdir: fake\n").unwrap();
|
||||
}
|
||||
|
||||
fn normalized(path: &Path) -> PathBuf {
|
||||
normalize_path(path).unwrap_or_else(|_| path.to_path_buf())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skill_roots_from_layer_stack_maps_user_to_user_and_system_cache_and_system_to_admin()
|
||||
-> anyhow::Result<()> {
|
||||
let tmp = tempfile::tempdir()?;
|
||||
|
||||
let system_folder = tmp.path().join("etc/codex");
|
||||
let user_folder = tmp.path().join("home/codex");
|
||||
fs::create_dir_all(&system_folder)?;
|
||||
fs::create_dir_all(&user_folder)?;
|
||||
|
||||
// The file path doesn't need to exist; it's only used to derive the config folder.
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(system_folder.join("config.toml"))?;
|
||||
let user_file = AbsolutePathBuf::from_absolute_path(user_folder.join("config.toml"))?;
|
||||
|
||||
let layers = vec![
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::System { file: system_file },
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
),
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User { file: user_file },
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
),
|
||||
];
|
||||
let stack = ConfigLayerStack::new(layers, ConfigRequirements::default())?;
|
||||
|
||||
let got = skill_roots_from_layer_stack(&stack)
|
||||
.into_iter()
|
||||
.map(|root| (root.scope, root.path))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
got,
|
||||
vec![
|
||||
(SkillScope::User, user_folder.join("skills")),
|
||||
(
|
||||
SkillScope::System,
|
||||
user_folder.join("skills").join(".system")
|
||||
),
|
||||
(SkillScope::Admin, system_folder.join("skills")),
|
||||
]
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_skill(codex_home: &TempDir, dir: &str, name: &str, description: &str) -> PathBuf {
|
||||
@@ -368,7 +432,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn loads_valid_skill() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
write_skill(&codex_home, "demo", "demo-skill", "does things\ncarefully");
|
||||
let skill_path = write_skill(&codex_home, "demo", "demo-skill", "does things\ncarefully");
|
||||
let cfg = make_config(&codex_home).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
@@ -377,15 +441,15 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
let skill = &outcome.skills[0];
|
||||
assert_eq!(skill.name, "demo-skill");
|
||||
assert_eq!(skill.description, "does things carefully");
|
||||
assert_eq!(skill.short_description, None);
|
||||
let path_str = skill.path.to_string_lossy().replace('\\', "/");
|
||||
assert!(
|
||||
path_str.ends_with("skills/demo/SKILL.md"),
|
||||
"unexpected path {path_str}"
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "demo-skill".to_string(),
|
||||
description: "does things carefully".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&skill_path),
|
||||
scope: SkillScope::User,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -395,7 +459,8 @@ mod tests {
|
||||
let skill_dir = codex_home.path().join("skills/demo");
|
||||
fs::create_dir_all(&skill_dir).unwrap();
|
||||
let contents = "---\nname: demo-skill\ndescription: long description\nmetadata:\n short-description: short summary\n---\n\n# Body\n";
|
||||
fs::write(skill_dir.join(SKILLS_FILENAME), contents).unwrap();
|
||||
let skill_path = skill_dir.join(SKILLS_FILENAME);
|
||||
fs::write(&skill_path, contents).unwrap();
|
||||
|
||||
let cfg = make_config(&codex_home).await;
|
||||
let outcome = load_skills(&cfg);
|
||||
@@ -404,10 +469,15 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(
|
||||
outcome.skills[0].short_description,
|
||||
Some("short summary".to_string())
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "demo-skill".to_string(),
|
||||
description: "long description".to_string(),
|
||||
short_description: Some("short summary".to_string()),
|
||||
path: normalized(&skill_path),
|
||||
scope: SkillScope::User,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -493,22 +563,14 @@ mod tests {
|
||||
async fn loads_skills_from_repo_root() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let repo_dir = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
let status = Command::new("git")
|
||||
.arg("init")
|
||||
.current_dir(repo_dir.path())
|
||||
.status()
|
||||
.expect("git init");
|
||||
assert!(status.success(), "git init failed");
|
||||
mark_as_git_repo(repo_dir.path());
|
||||
|
||||
let skills_root = repo_dir
|
||||
.path()
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
.join(SKILLS_DIR_NAME);
|
||||
write_skill_at(&skills_root, "repo", "repo-skill", "from repo");
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = repo_dir.path().to_path_buf();
|
||||
let repo_root = normalize_path(&skills_root).unwrap_or_else(|_| skills_root.clone());
|
||||
let skill_path = write_skill_at(&skills_root, "repo", "repo-skill", "from repo");
|
||||
let cfg = make_config_for_cwd(&codex_home, repo_dir.path().to_path_buf()).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -516,28 +578,28 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
let skill = &outcome.skills[0];
|
||||
assert_eq!(skill.name, "repo-skill");
|
||||
assert!(skill.path.starts_with(&repo_root));
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "repo-skill".to_string(),
|
||||
description: "from repo".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&skill_path),
|
||||
scope: SkillScope::Repo,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn loads_skills_from_nearest_codex_dir_under_repo_root() {
|
||||
async fn loads_skills_from_all_codex_dirs_under_project_root() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let repo_dir = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
let status = Command::new("git")
|
||||
.arg("init")
|
||||
.current_dir(repo_dir.path())
|
||||
.status()
|
||||
.expect("git init");
|
||||
assert!(status.success(), "git init failed");
|
||||
mark_as_git_repo(repo_dir.path());
|
||||
|
||||
let nested_dir = repo_dir.path().join("nested/inner");
|
||||
fs::create_dir_all(&nested_dir).unwrap();
|
||||
|
||||
write_skill_at(
|
||||
let root_skill_path = write_skill_at(
|
||||
&repo_dir
|
||||
.path()
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
@@ -546,7 +608,7 @@ mod tests {
|
||||
"root-skill",
|
||||
"from root",
|
||||
);
|
||||
write_skill_at(
|
||||
let nested_skill_path = write_skill_at(
|
||||
&repo_dir
|
||||
.path()
|
||||
.join("nested")
|
||||
@@ -557,8 +619,7 @@ mod tests {
|
||||
"from nested",
|
||||
);
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = nested_dir;
|
||||
let cfg = make_config_for_cwd(&codex_home, nested_dir).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -566,8 +627,25 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "nested-skill");
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![
|
||||
SkillMetadata {
|
||||
name: "nested-skill".to_string(),
|
||||
description: "from nested".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&nested_skill_path),
|
||||
scope: SkillScope::Repo,
|
||||
},
|
||||
SkillMetadata {
|
||||
name: "root-skill".to_string(),
|
||||
description: "from root".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&root_skill_path),
|
||||
scope: SkillScope::Repo,
|
||||
},
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -575,7 +653,7 @@ mod tests {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let work_dir = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
write_skill_at(
|
||||
let skill_path = write_skill_at(
|
||||
&work_dir
|
||||
.path()
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
@@ -585,8 +663,7 @@ mod tests {
|
||||
"from cwd",
|
||||
);
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = work_dir.path().to_path_buf();
|
||||
let cfg = make_config_for_cwd(&codex_home, work_dir.path().to_path_buf()).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -594,25 +671,26 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "local-skill");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::Repo);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "local-skill".to_string(),
|
||||
description: "from cwd".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&skill_path),
|
||||
scope: SkillScope::Repo,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn deduplicates_by_name_preferring_repo_over_user() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let repo_dir = tempfile::tempdir().expect("tempdir");
|
||||
mark_as_git_repo(repo_dir.path());
|
||||
|
||||
let status = Command::new("git")
|
||||
.arg("init")
|
||||
.current_dir(repo_dir.path())
|
||||
.status()
|
||||
.expect("git init");
|
||||
assert!(status.success(), "git init failed");
|
||||
|
||||
write_skill(&codex_home, "user", "dupe-skill", "from user");
|
||||
write_skill_at(
|
||||
let _user_skill_path = write_skill(&codex_home, "user", "dupe-skill", "from user");
|
||||
let repo_skill_path = write_skill_at(
|
||||
&repo_dir
|
||||
.path()
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
@@ -622,8 +700,7 @@ mod tests {
|
||||
"from repo",
|
||||
);
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = repo_dir.path().to_path_buf();
|
||||
let cfg = make_config_for_cwd(&codex_home, repo_dir.path().to_path_buf()).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -631,17 +708,25 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "dupe-skill");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::Repo);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "dupe-skill".to_string(),
|
||||
description: "from repo".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&repo_skill_path),
|
||||
scope: SkillScope::Repo,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn loads_system_skills_when_present() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
write_system_skill(&codex_home, "system", "dupe-skill", "from system");
|
||||
write_skill(&codex_home, "user", "dupe-skill", "from user");
|
||||
let _system_skill_path =
|
||||
write_system_skill(&codex_home, "system", "dupe-skill", "from system");
|
||||
let user_skill_path = write_skill(&codex_home, "user", "dupe-skill", "from user");
|
||||
|
||||
let cfg = make_config(&codex_home).await;
|
||||
let outcome = load_skills(&cfg);
|
||||
@@ -650,9 +735,16 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].description, "from user");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::User);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "dupe-skill".to_string(),
|
||||
description: "from user".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&user_skill_path),
|
||||
scope: SkillScope::User,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -672,15 +764,9 @@ mod tests {
|
||||
"from outer",
|
||||
);
|
||||
|
||||
let status = Command::new("git")
|
||||
.arg("init")
|
||||
.current_dir(&repo_dir)
|
||||
.status()
|
||||
.expect("git init");
|
||||
assert!(status.success(), "git init failed");
|
||||
mark_as_git_repo(&repo_dir);
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = repo_dir;
|
||||
let cfg = make_config_for_cwd(&codex_home, repo_dir).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -695,15 +781,9 @@ mod tests {
|
||||
async fn loads_skills_when_cwd_is_file_in_repo() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let repo_dir = tempfile::tempdir().expect("tempdir");
|
||||
mark_as_git_repo(repo_dir.path());
|
||||
|
||||
let status = Command::new("git")
|
||||
.arg("init")
|
||||
.current_dir(repo_dir.path())
|
||||
.status()
|
||||
.expect("git init");
|
||||
assert!(status.success(), "git init failed");
|
||||
|
||||
write_skill_at(
|
||||
let skill_path = write_skill_at(
|
||||
&repo_dir
|
||||
.path()
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
@@ -715,8 +795,7 @@ mod tests {
|
||||
let file_path = repo_dir.path().join("some-file.txt");
|
||||
fs::write(&file_path, "contents").unwrap();
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = file_path;
|
||||
let cfg = make_config_for_cwd(&codex_home, file_path).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -724,9 +803,16 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "repo-skill");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::Repo);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "repo-skill".to_string(),
|
||||
description: "from repo".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&skill_path),
|
||||
scope: SkillScope::Repo,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -746,8 +832,7 @@ mod tests {
|
||||
"from outer",
|
||||
);
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = nested_dir;
|
||||
let cfg = make_config_for_cwd(&codex_home, nested_dir).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -763,10 +848,9 @@ mod tests {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let work_dir = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
write_system_skill(&codex_home, "system", "system-skill", "from system");
|
||||
let skill_path = write_system_skill(&codex_home, "system", "system-skill", "from system");
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = work_dir.path().to_path_buf();
|
||||
let cfg = make_config_for_cwd(&codex_home, work_dir.path().to_path_buf()).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -774,9 +858,16 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "system-skill");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::System);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "system-skill".to_string(),
|
||||
description: "from system".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&skill_path),
|
||||
scope: SkillScope::System,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -800,8 +891,10 @@ mod tests {
|
||||
let system_dir = tempfile::tempdir().expect("tempdir");
|
||||
let admin_dir = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
write_skill_at(system_dir.path(), "system", "dupe-skill", "from system");
|
||||
write_skill_at(admin_dir.path(), "admin", "dupe-skill", "from admin");
|
||||
let system_skill_path =
|
||||
write_skill_at(system_dir.path(), "system", "dupe-skill", "from system");
|
||||
let _admin_skill_path =
|
||||
write_skill_at(admin_dir.path(), "admin", "dupe-skill", "from admin");
|
||||
|
||||
let outcome = load_skills_from_roots([
|
||||
SkillRoot {
|
||||
@@ -819,9 +912,16 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "dupe-skill");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::System);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "dupe-skill".to_string(),
|
||||
description: "from system".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&system_skill_path),
|
||||
scope: SkillScope::System,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -829,11 +929,11 @@ mod tests {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let work_dir = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
write_skill(&codex_home, "user", "dupe-skill", "from user");
|
||||
write_system_skill(&codex_home, "system", "dupe-skill", "from system");
|
||||
let user_skill_path = write_skill(&codex_home, "user", "dupe-skill", "from user");
|
||||
let _system_skill_path =
|
||||
write_system_skill(&codex_home, "system", "dupe-skill", "from system");
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = work_dir.path().to_path_buf();
|
||||
let cfg = make_config_for_cwd(&codex_home, work_dir.path().to_path_buf()).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -841,24 +941,25 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "dupe-skill");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::User);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "dupe-skill".to_string(),
|
||||
description: "from user".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&user_skill_path),
|
||||
scope: SkillScope::User,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn deduplicates_by_name_preferring_repo_over_system() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let repo_dir = tempfile::tempdir().expect("tempdir");
|
||||
mark_as_git_repo(repo_dir.path());
|
||||
|
||||
let status = Command::new("git")
|
||||
.arg("init")
|
||||
.current_dir(repo_dir.path())
|
||||
.status()
|
||||
.expect("git init");
|
||||
assert!(status.success(), "git init failed");
|
||||
|
||||
write_skill_at(
|
||||
let repo_skill_path = write_skill_at(
|
||||
&repo_dir
|
||||
.path()
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
@@ -867,10 +968,10 @@ mod tests {
|
||||
"dupe-skill",
|
||||
"from repo",
|
||||
);
|
||||
write_system_skill(&codex_home, "system", "dupe-skill", "from system");
|
||||
let _system_skill_path =
|
||||
write_system_skill(&codex_home, "system", "dupe-skill", "from system");
|
||||
|
||||
let mut cfg = make_config(&codex_home).await;
|
||||
cfg.cwd = repo_dir.path().to_path_buf();
|
||||
let cfg = make_config_for_cwd(&codex_home, repo_dir.path().to_path_buf()).await;
|
||||
|
||||
let outcome = load_skills(&cfg);
|
||||
assert!(
|
||||
@@ -878,8 +979,66 @@ mod tests {
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
assert_eq!(outcome.skills.len(), 1);
|
||||
assert_eq!(outcome.skills[0].name, "dupe-skill");
|
||||
assert_eq!(outcome.skills[0].scope, SkillScope::Repo);
|
||||
assert_eq!(
|
||||
outcome.skills,
|
||||
vec![SkillMetadata {
|
||||
name: "dupe-skill".to_string(),
|
||||
description: "from repo".to_string(),
|
||||
short_description: None,
|
||||
path: normalized(&repo_skill_path),
|
||||
scope: SkillScope::Repo,
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn deduplicates_by_name_preferring_nearest_project_codex_dir() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let repo_dir = tempfile::tempdir().expect("tempdir");
|
||||
mark_as_git_repo(repo_dir.path());
|
||||
|
||||
let nested_dir = repo_dir.path().join("nested/inner");
|
||||
fs::create_dir_all(&nested_dir).unwrap();
|
||||
|
||||
let _root_skill_path = write_skill_at(
|
||||
&repo_dir
|
||||
.path()
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
.join(SKILLS_DIR_NAME),
|
||||
"root",
|
||||
"dupe-skill",
|
||||
"from root",
|
||||
);
|
||||
let nested_skill_path = write_skill_at(
|
||||
&repo_dir
|
||||
.path()
|
||||
.join("nested")
|
||||
.join(REPO_ROOT_CONFIG_DIR_NAME)
|
||||
.join(SKILLS_DIR_NAME),
|
||||
"nested",
|
||||
"dupe-skill",
|
||||
"from nested",
|
||||
);
|
||||
|
||||
let cfg = make_config_for_cwd(&codex_home, nested_dir).await;
|
||||
let outcome = load_skills(&cfg);
|
||||
|
||||
assert!(
|
||||
outcome.errors.is_empty(),
|
||||
"unexpected errors: {:?}",
|
||||
outcome.errors
|
||||
);
|
||||
let expected_path =
|
||||
normalize_path(&nested_skill_path).unwrap_or_else(|_| nested_skill_path.clone());
|
||||
assert_eq!(
|
||||
vec![SkillMetadata {
|
||||
name: "dupe-skill".to_string(),
|
||||
description: "from nested".to_string(),
|
||||
short_description: None,
|
||||
path: expected_path,
|
||||
scope: SkillScope::Repo,
|
||||
}],
|
||||
outcome.skills
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,17 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::config_loader::LoaderOverrides;
|
||||
use crate::config_loader::load_config_layers_state;
|
||||
use crate::skills::SkillLoadOutcome;
|
||||
use crate::skills::loader::load_skills_from_roots;
|
||||
use crate::skills::loader::skill_roots_for_cwd;
|
||||
use crate::skills::loader::skill_roots_from_layer_stack;
|
||||
use crate::skills::system::install_system_skills;
|
||||
|
||||
pub struct SkillsManager {
|
||||
codex_home: PathBuf,
|
||||
cache_by_cwd: RwLock<HashMap<PathBuf, SkillLoadOutcome>>,
|
||||
@@ -24,11 +31,32 @@ impl SkillsManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn skills_for_cwd(&self, cwd: &Path) -> SkillLoadOutcome {
|
||||
self.skills_for_cwd_with_options(cwd, false)
|
||||
/// Load skills for an already-constructed [`Config`], avoiding any additional config-layer
|
||||
/// loading. This also seeds the per-cwd cache for subsequent lookups.
|
||||
pub fn skills_for_config(&self, config: &Config) -> SkillLoadOutcome {
|
||||
let cwd = &config.cwd;
|
||||
let cached = match self.cache_by_cwd.read() {
|
||||
Ok(cache) => cache.get(cwd).cloned(),
|
||||
Err(err) => err.into_inner().get(cwd).cloned(),
|
||||
};
|
||||
if let Some(outcome) = cached {
|
||||
return outcome;
|
||||
}
|
||||
|
||||
let roots = skill_roots_from_layer_stack(&config.config_layer_stack);
|
||||
let outcome = load_skills_from_roots(roots);
|
||||
match self.cache_by_cwd.write() {
|
||||
Ok(mut cache) => {
|
||||
cache.insert(cwd.to_path_buf(), outcome.clone());
|
||||
}
|
||||
Err(err) => {
|
||||
err.into_inner().insert(cwd.to_path_buf(), outcome.clone());
|
||||
}
|
||||
}
|
||||
outcome
|
||||
}
|
||||
|
||||
pub fn skills_for_cwd_with_options(&self, cwd: &Path, force_reload: bool) -> SkillLoadOutcome {
|
||||
pub async fn skills_for_cwd(&self, cwd: &Path, force_reload: bool) -> SkillLoadOutcome {
|
||||
let cached = match self.cache_by_cwd.read() {
|
||||
Ok(cache) => cache.get(cwd).cloned(),
|
||||
Err(err) => err.into_inner().get(cwd).cloned(),
|
||||
@@ -37,7 +65,41 @@ impl SkillsManager {
|
||||
return outcome;
|
||||
}
|
||||
|
||||
let roots = skill_roots_for_cwd(&self.codex_home, cwd);
|
||||
let cwd_abs = match AbsolutePathBuf::try_from(cwd) {
|
||||
Ok(cwd_abs) => cwd_abs,
|
||||
Err(err) => {
|
||||
return SkillLoadOutcome {
|
||||
errors: vec![crate::skills::model::SkillError {
|
||||
path: cwd.to_path_buf(),
|
||||
message: err.to_string(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
let cli_overrides: Vec<(String, TomlValue)> = Vec::new();
|
||||
let config_layer_stack = match load_config_layers_state(
|
||||
&self.codex_home,
|
||||
Some(cwd_abs),
|
||||
&cli_overrides,
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(config_layer_stack) => config_layer_stack,
|
||||
Err(err) => {
|
||||
return SkillLoadOutcome {
|
||||
errors: vec![crate::skills::model::SkillError {
|
||||
path: cwd.to_path_buf(),
|
||||
message: err.to_string(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
let roots = skill_roots_from_layer_stack(&config_layer_stack);
|
||||
let outcome = load_skills_from_roots(roots);
|
||||
match self.cache_by_cwd.write() {
|
||||
Ok(mut cache) => {
|
||||
@@ -50,3 +112,52 @@ impl SkillsManager {
|
||||
outcome
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::config::ConfigOverrides;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::fs;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn write_user_skill(codex_home: &TempDir, dir: &str, name: &str, description: &str) {
|
||||
let skill_dir = codex_home.path().join("skills").join(dir);
|
||||
fs::create_dir_all(&skill_dir).unwrap();
|
||||
let content = format!("---\nname: {name}\ndescription: {description}\n---\n\n# Body\n");
|
||||
fs::write(skill_dir.join("SKILL.md"), content).unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn skills_for_config_seeds_cache_by_cwd() {
|
||||
let codex_home = tempfile::tempdir().expect("tempdir");
|
||||
let cwd = tempfile::tempdir().expect("tempdir");
|
||||
|
||||
let cfg = ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.harness_overrides(ConfigOverrides {
|
||||
cwd: Some(cwd.path().to_path_buf()),
|
||||
..Default::default()
|
||||
})
|
||||
.build()
|
||||
.await
|
||||
.expect("defaults for test should always succeed");
|
||||
|
||||
let skills_manager = SkillsManager::new(codex_home.path().to_path_buf());
|
||||
|
||||
write_user_skill(&codex_home, "a", "skill-a", "from a");
|
||||
let outcome1 = skills_manager.skills_for_config(&cfg);
|
||||
assert!(
|
||||
outcome1.skills.iter().any(|s| s.name == "skill-a"),
|
||||
"expected skill-a to be discovered"
|
||||
);
|
||||
|
||||
// Write a new skill after the first call; the second call should hit the cache and not
|
||||
// reflect the new file.
|
||||
write_user_skill(&codex_home, "b", "skill-b", "from b");
|
||||
let outcome2 = skills_manager.skills_for_config(&cfg);
|
||||
assert_eq!(outcome2.errors, outcome1.errors);
|
||||
assert_eq!(outcome2.skills, outcome1.skills);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ pub fn render_skills_section(skills: &[SkillMetadata]) -> Option<String> {
|
||||
|
||||
let mut lines: Vec<String> = Vec::new();
|
||||
lines.push("## Skills".to_string());
|
||||
lines.push("These skills are discovered at startup from multiple local sources. Each entry includes a name, description, and file path so you can open the source for full instructions.".to_string());
|
||||
lines.push("A skill is a set of local instructions to follow that is stored in a `SKILL.md` file. Below is the list of skills that can be used. Each entry includes a name, description, and file path so you can open the source for full instructions when using a specific skill.".to_string());
|
||||
lines.push("### Available skills".to_string());
|
||||
|
||||
for skill in skills {
|
||||
let path_str = skill.path.to_string_lossy().replace('\\', "/");
|
||||
@@ -16,22 +17,22 @@ pub fn render_skills_section(skills: &[SkillMetadata]) -> Option<String> {
|
||||
lines.push(format!("- {name}: {description} (file: {path_str})"));
|
||||
}
|
||||
|
||||
lines.push("### How to use skills".to_string());
|
||||
lines.push(
|
||||
r###"- Discovery: Available skills are listed in project docs and may also appear in a runtime "## Skills" section (name + description + file path). These are the sources of truth; skill bodies live on disk at the listed paths.
|
||||
- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.
|
||||
r###"- Discovery: The list above is the skills available in this session (name + description + file path). Skill bodies live on disk at the listed paths.
|
||||
- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description shown above, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.
|
||||
- Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback.
|
||||
- How to use a skill (progressive disclosure):
|
||||
1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow.
|
||||
2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything.
|
||||
3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks.
|
||||
4) If `assets/` or templates exist, reuse them instead of recreating from scratch.
|
||||
- Description as trigger: The YAML `description` in `SKILL.md` is the primary trigger signal; rely on it to decide applicability. If unsure, ask a brief clarification before proceeding.
|
||||
- Coordination and sequencing:
|
||||
- If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them.
|
||||
- Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why.
|
||||
- Context hygiene:
|
||||
- Keep context small: summarize long sections instead of pasting them; only load extra files when needed.
|
||||
- Avoid deeply nested references; prefer one-hop files explicitly linked from `SKILL.md`.
|
||||
- Avoid deep reference-chasing: prefer opening only files directly linked from `SKILL.md` unless you're blocked.
|
||||
- When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice.
|
||||
- Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue."###
|
||||
.to_string(),
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::sync::Arc;
|
||||
|
||||
use crate::AuthManager;
|
||||
use crate::RolloutRecorder;
|
||||
use crate::exec_policy::ExecPolicyManager;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::skills::SkillsManager;
|
||||
@@ -21,6 +22,7 @@ pub(crate) struct SessionServices {
|
||||
pub(crate) rollout: Mutex<Option<RolloutRecorder>>,
|
||||
pub(crate) user_shell: Arc<crate::shell::Shell>,
|
||||
pub(crate) show_raw_agent_reasoning: bool,
|
||||
pub(crate) exec_policy: ExecPolicyManager,
|
||||
pub(crate) auth_manager: Arc<AuthManager>,
|
||||
pub(crate) models_manager: Arc<ModelsManager>,
|
||||
pub(crate) otel_manager: OtelManager,
|
||||
|
||||
@@ -6,7 +6,6 @@ use std::sync::Arc;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::exec::ExecParams;
|
||||
use crate::exec_env::create_env;
|
||||
use crate::exec_policy::create_exec_approval_requirement_for_command;
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::is_safe_command::is_known_safe_command;
|
||||
use crate::protocol::ExecCommandSource;
|
||||
@@ -252,15 +251,17 @@ impl ShellHandler {
|
||||
emitter.begin(event_ctx).await;
|
||||
|
||||
let features = session.features();
|
||||
let exec_approval_requirement = create_exec_approval_requirement_for_command(
|
||||
&turn.exec_policy,
|
||||
&features,
|
||||
&exec_params.command,
|
||||
turn.approval_policy,
|
||||
&turn.sandbox_policy,
|
||||
exec_params.sandbox_permissions,
|
||||
)
|
||||
.await;
|
||||
let exec_approval_requirement = session
|
||||
.services
|
||||
.exec_policy
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&features,
|
||||
&exec_params.command,
|
||||
turn.approval_policy,
|
||||
&turn.sandbox_policy,
|
||||
exec_params.sandbox_permissions,
|
||||
)
|
||||
.await;
|
||||
|
||||
let req = ShellRequest {
|
||||
command: exec_params.command.clone(),
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::is_safe_command::is_known_safe_command;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::ExecCommandSource;
|
||||
use crate::protocol::TerminalInteractionEvent;
|
||||
use crate::sandboxing::SandboxPermissions;
|
||||
use crate::shell::Shell;
|
||||
@@ -9,9 +8,6 @@ use crate::shell::get_shell_by_model_provided_path;
|
||||
use crate::tools::context::ToolInvocation;
|
||||
use crate::tools::context::ToolOutput;
|
||||
use crate::tools::context::ToolPayload;
|
||||
use crate::tools::events::ToolEmitter;
|
||||
use crate::tools::events::ToolEventCtx;
|
||||
use crate::tools::events::ToolEventStage;
|
||||
use crate::tools::handlers::apply_patch::intercept_apply_patch;
|
||||
use crate::tools::registry::ToolHandler;
|
||||
use crate::tools::registry::ToolKind;
|
||||
@@ -172,20 +168,6 @@ impl ToolHandler for UnifiedExecHandler {
|
||||
return Ok(output);
|
||||
}
|
||||
|
||||
let event_ctx = ToolEventCtx::new(
|
||||
context.session.as_ref(),
|
||||
context.turn.as_ref(),
|
||||
&context.call_id,
|
||||
None,
|
||||
);
|
||||
let emitter = ToolEmitter::unified_exec(
|
||||
&command,
|
||||
cwd.clone(),
|
||||
ExecCommandSource::UnifiedExecStartup,
|
||||
Some(process_id.clone()),
|
||||
);
|
||||
emitter.emit(event_ctx, ToolEventStage::Begin).await;
|
||||
|
||||
manager
|
||||
.exec_command(
|
||||
ExecCommandRequest {
|
||||
|
||||
@@ -5,8 +5,11 @@ Executes shell requests under the orchestrator: asks for approval when needed,
|
||||
builds a CommandSpec, and runs it under the current SandboxAttempt.
|
||||
*/
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::features::Feature;
|
||||
use crate::powershell::prefix_powershell_script_with_utf8;
|
||||
use crate::sandboxing::SandboxPermissions;
|
||||
use crate::sandboxing::execute_env;
|
||||
use crate::shell::ShellType;
|
||||
use crate::tools::runtimes::build_command_spec;
|
||||
use crate::tools::runtimes::maybe_wrap_shell_lc_with_snapshot;
|
||||
use crate::tools::sandboxing::Approvable;
|
||||
@@ -144,6 +147,13 @@ impl ToolRuntime<ShellRequest, ExecToolCallOutput> for ShellRuntime {
|
||||
let base_command = &req.command;
|
||||
let session_shell = ctx.session.user_shell();
|
||||
let command = maybe_wrap_shell_lc_with_snapshot(base_command, session_shell.as_ref());
|
||||
let command = if matches!(session_shell.shell_type, ShellType::PowerShell)
|
||||
&& ctx.session.features().enabled(Feature::PowershellUtf8)
|
||||
{
|
||||
prefix_powershell_script_with_utf8(&command)
|
||||
} else {
|
||||
command
|
||||
};
|
||||
|
||||
let spec = build_command_spec(
|
||||
&command,
|
||||
|
||||
@@ -7,7 +7,10 @@ the session manager to spawn PTYs once an ExecEnv is prepared.
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::SandboxErr;
|
||||
use crate::exec::ExecExpiration;
|
||||
use crate::features::Feature;
|
||||
use crate::powershell::prefix_powershell_script_with_utf8;
|
||||
use crate::sandboxing::SandboxPermissions;
|
||||
use crate::shell::ShellType;
|
||||
use crate::tools::runtimes::build_command_spec;
|
||||
use crate::tools::runtimes::maybe_wrap_shell_lc_with_snapshot;
|
||||
use crate::tools::sandboxing::Approvable;
|
||||
@@ -165,6 +168,13 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecSession> for UnifiedExecRunt
|
||||
let base_command = &req.command;
|
||||
let session_shell = ctx.session.user_shell();
|
||||
let command = maybe_wrap_shell_lc_with_snapshot(base_command, session_shell.as_ref());
|
||||
let command = if matches!(session_shell.shell_type, ShellType::PowerShell)
|
||||
&& ctx.session.features().enabled(Feature::PowershellUtf8)
|
||||
{
|
||||
prefix_powershell_script_with_utf8(&command)
|
||||
} else {
|
||||
command
|
||||
};
|
||||
|
||||
let spec = build_command_spec(
|
||||
&command,
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::config::Config;
|
||||
use codex_protocol::models::FunctionCallOutputContentItem;
|
||||
use codex_protocol::openai_models::TruncationMode;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use codex_protocol::protocol::TruncationPolicy as ProtocolTruncationPolicy;
|
||||
|
||||
const APPROX_BYTES_PER_TOKEN: usize = 4;
|
||||
|
||||
@@ -15,6 +16,15 @@ pub enum TruncationPolicy {
|
||||
Tokens(usize),
|
||||
}
|
||||
|
||||
impl From<TruncationPolicy> for ProtocolTruncationPolicy {
|
||||
fn from(value: TruncationPolicy) -> Self {
|
||||
match value {
|
||||
TruncationPolicy::Bytes(bytes) => Self::Bytes(bytes),
|
||||
TruncationPolicy::Tokens(tokens) => Self::Tokens(tokens),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TruncationPolicyConfig> for TruncationPolicy {
|
||||
fn from(config: TruncationPolicyConfig) -> Self {
|
||||
match config.mode {
|
||||
|
||||
@@ -7,9 +7,12 @@ use tokio::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
use tokio::time::Sleep;
|
||||
|
||||
use super::UnifiedExecContext;
|
||||
use super::session::UnifiedExecSession;
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::exec::MAX_EXEC_OUTPUT_DELTAS_PER_CALL;
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::ExecCommandOutputDeltaEvent;
|
||||
@@ -18,20 +21,25 @@ use crate::protocol::ExecOutputStream;
|
||||
use crate::tools::events::ToolEmitter;
|
||||
use crate::tools::events::ToolEventCtx;
|
||||
use crate::tools::events::ToolEventStage;
|
||||
|
||||
use super::CommandTranscript;
|
||||
use super::UnifiedExecContext;
|
||||
use super::session::UnifiedExecSession;
|
||||
use crate::unified_exec::head_tail_buffer::HeadTailBuffer;
|
||||
|
||||
pub(crate) const TRAILING_OUTPUT_GRACE: Duration = Duration::from_millis(100);
|
||||
|
||||
/// Upper bound for a single ExecCommandOutputDelta chunk emitted by unified exec.
|
||||
///
|
||||
/// The unified exec output buffer already caps *retained* output (see
|
||||
/// `UNIFIED_EXEC_OUTPUT_MAX_BYTES`), but we also cap per-event payload size so
|
||||
/// downstream event consumers (especially app-server JSON-RPC) don't have to
|
||||
/// process arbitrarily large delta payloads.
|
||||
const UNIFIED_EXEC_OUTPUT_DELTA_MAX_BYTES: usize = 8192;
|
||||
|
||||
/// Spawn a background task that continuously reads from the PTY, appends to the
|
||||
/// shared transcript, and emits ExecCommandOutputDelta events on UTF‑8
|
||||
/// boundaries.
|
||||
pub(crate) fn start_streaming_output(
|
||||
session: &UnifiedExecSession,
|
||||
context: &UnifiedExecContext,
|
||||
transcript: Arc<Mutex<CommandTranscript>>,
|
||||
transcript: Arc<Mutex<HeadTailBuffer>>,
|
||||
) {
|
||||
let mut receiver = session.output_receiver();
|
||||
let output_drained = session.output_drained_notify();
|
||||
@@ -45,6 +53,7 @@ pub(crate) fn start_streaming_output(
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
|
||||
let mut pending = Vec::<u8>::new();
|
||||
let mut emitted_deltas: usize = 0;
|
||||
|
||||
let mut grace_sleep: Option<Pin<Box<Sleep>>> = None;
|
||||
|
||||
@@ -82,6 +91,7 @@ pub(crate) fn start_streaming_output(
|
||||
&call_id,
|
||||
&session_ref,
|
||||
&turn_ref,
|
||||
&mut emitted_deltas,
|
||||
chunk,
|
||||
).await;
|
||||
}
|
||||
@@ -101,7 +111,7 @@ pub(crate) fn spawn_exit_watcher(
|
||||
command: Vec<String>,
|
||||
cwd: PathBuf,
|
||||
process_id: String,
|
||||
transcript: Arc<Mutex<CommandTranscript>>,
|
||||
transcript: Arc<Mutex<HeadTailBuffer>>,
|
||||
started_at: Instant,
|
||||
) {
|
||||
let exit_token = session.cancellation_token();
|
||||
@@ -131,17 +141,22 @@ pub(crate) fn spawn_exit_watcher(
|
||||
|
||||
async fn process_chunk(
|
||||
pending: &mut Vec<u8>,
|
||||
transcript: &Arc<Mutex<CommandTranscript>>,
|
||||
transcript: &Arc<Mutex<HeadTailBuffer>>,
|
||||
call_id: &str,
|
||||
session_ref: &Arc<Session>,
|
||||
turn_ref: &Arc<TurnContext>,
|
||||
emitted_deltas: &mut usize,
|
||||
chunk: Vec<u8>,
|
||||
) {
|
||||
pending.extend_from_slice(&chunk);
|
||||
while let Some(prefix) = split_valid_utf8_prefix(pending) {
|
||||
{
|
||||
let mut guard = transcript.lock().await;
|
||||
guard.append(&prefix);
|
||||
guard.push_chunk(prefix.to_vec());
|
||||
}
|
||||
|
||||
if *emitted_deltas >= MAX_EXEC_OUTPUT_DELTAS_PER_CALL {
|
||||
continue;
|
||||
}
|
||||
|
||||
let event = ExecCommandOutputDeltaEvent {
|
||||
@@ -152,6 +167,7 @@ async fn process_chunk(
|
||||
session_ref
|
||||
.send_event(turn_ref.as_ref(), EventMsg::ExecCommandOutputDelta(event))
|
||||
.await;
|
||||
*emitted_deltas += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,7 +182,7 @@ pub(crate) async fn emit_exec_end_for_unified_exec(
|
||||
command: Vec<String>,
|
||||
cwd: PathBuf,
|
||||
process_id: Option<String>,
|
||||
transcript: Arc<Mutex<CommandTranscript>>,
|
||||
transcript: Arc<Mutex<HeadTailBuffer>>,
|
||||
fallback_output: String,
|
||||
exit_code: i32,
|
||||
duration: Duration,
|
||||
@@ -193,12 +209,16 @@ pub(crate) async fn emit_exec_end_for_unified_exec(
|
||||
}
|
||||
|
||||
fn split_valid_utf8_prefix(buffer: &mut Vec<u8>) -> Option<Vec<u8>> {
|
||||
split_valid_utf8_prefix_with_max(buffer, UNIFIED_EXEC_OUTPUT_DELTA_MAX_BYTES)
|
||||
}
|
||||
|
||||
fn split_valid_utf8_prefix_with_max(buffer: &mut Vec<u8>, max_bytes: usize) -> Option<Vec<u8>> {
|
||||
if buffer.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let len = buffer.len();
|
||||
let mut split = len;
|
||||
let max_len = buffer.len().min(max_bytes);
|
||||
let mut split = max_len;
|
||||
while split > 0 {
|
||||
if std::str::from_utf8(&buffer[..split]).is_ok() {
|
||||
let prefix = buffer[..split].to_vec();
|
||||
@@ -206,7 +226,7 @@ fn split_valid_utf8_prefix(buffer: &mut Vec<u8>) -> Option<Vec<u8>> {
|
||||
return Some(prefix);
|
||||
}
|
||||
|
||||
if len - split > 4 {
|
||||
if max_len - split > 4 {
|
||||
break;
|
||||
}
|
||||
split -= 1;
|
||||
@@ -219,13 +239,52 @@ fn split_valid_utf8_prefix(buffer: &mut Vec<u8>) -> Option<Vec<u8>> {
|
||||
}
|
||||
|
||||
async fn resolve_aggregated_output(
|
||||
transcript: &Arc<Mutex<CommandTranscript>>,
|
||||
transcript: &Arc<Mutex<HeadTailBuffer>>,
|
||||
fallback: String,
|
||||
) -> String {
|
||||
let guard = transcript.lock().await;
|
||||
if guard.data.is_empty() {
|
||||
if guard.retained_bytes() == 0 {
|
||||
return fallback;
|
||||
}
|
||||
|
||||
String::from_utf8_lossy(&guard.data).to_string()
|
||||
String::from_utf8_lossy(&guard.to_bytes()).to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::split_valid_utf8_prefix_with_max;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn split_valid_utf8_prefix_respects_max_bytes_for_ascii() {
|
||||
let mut buf = b"hello word!".to_vec();
|
||||
|
||||
let first = split_valid_utf8_prefix_with_max(&mut buf, 5).expect("expected prefix");
|
||||
assert_eq!(first, b"hello".to_vec());
|
||||
assert_eq!(buf, b" word!".to_vec());
|
||||
|
||||
let second = split_valid_utf8_prefix_with_max(&mut buf, 5).expect("expected prefix");
|
||||
assert_eq!(second, b" word".to_vec());
|
||||
assert_eq!(buf, b"!".to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split_valid_utf8_prefix_avoids_splitting_utf8_codepoints() {
|
||||
// "é" is 2 bytes in UTF-8. With a max of 3 bytes, we should only emit 1 char (2 bytes).
|
||||
let mut buf = "ééé".as_bytes().to_vec();
|
||||
|
||||
let first = split_valid_utf8_prefix_with_max(&mut buf, 3).expect("expected prefix");
|
||||
assert_eq!(std::str::from_utf8(&first).unwrap(), "é");
|
||||
assert_eq!(buf, "éé".as_bytes().to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split_valid_utf8_prefix_makes_progress_on_invalid_utf8() {
|
||||
let mut buf = vec![0xff, b'a', b'b'];
|
||||
|
||||
let first = split_valid_utf8_prefix_with_max(&mut buf, 2).expect("expected prefix");
|
||||
assert_eq!(first, vec![0xff]);
|
||||
assert_eq!(buf, b"ab".to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
272
codex-rs/core/src/unified_exec/head_tail_buffer.rs
Normal file
272
codex-rs/core/src/unified_exec/head_tail_buffer.rs
Normal file
@@ -0,0 +1,272 @@
|
||||
use crate::unified_exec::UNIFIED_EXEC_OUTPUT_MAX_BYTES;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
/// A capped buffer that preserves a stable prefix ("head") and suffix ("tail"),
|
||||
/// dropping the middle once it exceeds the configured maximum. The buffer is
|
||||
/// symmetric meaning 50% of the capacity is allocated to the head and 50% is
|
||||
/// allocated to the tail.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct HeadTailBuffer {
|
||||
max_bytes: usize,
|
||||
head_budget: usize,
|
||||
tail_budget: usize,
|
||||
head: VecDeque<Vec<u8>>,
|
||||
tail: VecDeque<Vec<u8>>,
|
||||
head_bytes: usize,
|
||||
tail_bytes: usize,
|
||||
omitted_bytes: usize,
|
||||
}
|
||||
|
||||
impl Default for HeadTailBuffer {
|
||||
fn default() -> Self {
|
||||
Self::new(UNIFIED_EXEC_OUTPUT_MAX_BYTES)
|
||||
}
|
||||
}
|
||||
|
||||
impl HeadTailBuffer {
|
||||
/// Create a new buffer that retains at most `max_bytes` of output.
|
||||
///
|
||||
/// The retained output is split across a prefix ("head") and suffix ("tail")
|
||||
/// budget, dropping bytes from the middle once the limit is exceeded.
|
||||
pub(crate) fn new(max_bytes: usize) -> Self {
|
||||
let head_budget = max_bytes / 2;
|
||||
let tail_budget = max_bytes.saturating_sub(head_budget);
|
||||
Self {
|
||||
max_bytes,
|
||||
head_budget,
|
||||
tail_budget,
|
||||
head: VecDeque::new(),
|
||||
tail: VecDeque::new(),
|
||||
head_bytes: 0,
|
||||
tail_bytes: 0,
|
||||
omitted_bytes: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Used for tests.
|
||||
#[allow(dead_code)]
|
||||
/// Total bytes currently retained by the buffer (head + tail).
|
||||
pub(crate) fn retained_bytes(&self) -> usize {
|
||||
self.head_bytes.saturating_add(self.tail_bytes)
|
||||
}
|
||||
|
||||
// Used for tests.
|
||||
#[allow(dead_code)]
|
||||
/// Total bytes that were dropped from the middle due to the size cap.
|
||||
pub(crate) fn omitted_bytes(&self) -> usize {
|
||||
self.omitted_bytes
|
||||
}
|
||||
|
||||
/// Append a chunk of bytes to the buffer.
|
||||
///
|
||||
/// Bytes are first added to the head until the head budget is full; any
|
||||
/// remaining bytes are added to the tail, with older tail bytes being
|
||||
/// dropped to preserve the tail budget.
|
||||
pub(crate) fn push_chunk(&mut self, chunk: Vec<u8>) {
|
||||
if self.max_bytes == 0 {
|
||||
self.omitted_bytes = self.omitted_bytes.saturating_add(chunk.len());
|
||||
return;
|
||||
}
|
||||
|
||||
// Fill the head budget first, then keep a capped tail.
|
||||
if self.head_bytes < self.head_budget {
|
||||
let remaining_head = self.head_budget.saturating_sub(self.head_bytes);
|
||||
if chunk.len() <= remaining_head {
|
||||
self.head_bytes = self.head_bytes.saturating_add(chunk.len());
|
||||
self.head.push_back(chunk);
|
||||
return;
|
||||
}
|
||||
|
||||
// Split the chunk: part goes to head, remainder goes to tail.
|
||||
let (head_part, tail_part) = chunk.split_at(remaining_head);
|
||||
if !head_part.is_empty() {
|
||||
self.head_bytes = self.head_bytes.saturating_add(head_part.len());
|
||||
self.head.push_back(head_part.to_vec());
|
||||
}
|
||||
self.push_to_tail(tail_part.to_vec());
|
||||
return;
|
||||
}
|
||||
|
||||
self.push_to_tail(chunk);
|
||||
}
|
||||
|
||||
/// Snapshot the retained output as a list of chunks.
|
||||
///
|
||||
/// The returned chunks are ordered as: head chunks first, then tail chunks.
|
||||
/// Omitted bytes are not represented in the snapshot.
|
||||
pub(crate) fn snapshot_chunks(&self) -> Vec<Vec<u8>> {
|
||||
let mut out = Vec::new();
|
||||
out.extend(self.head.iter().cloned());
|
||||
out.extend(self.tail.iter().cloned());
|
||||
out
|
||||
}
|
||||
|
||||
/// Return the retained output as a single byte vector.
|
||||
///
|
||||
/// The output is formed by concatenating head chunks, then tail chunks.
|
||||
/// Omitted bytes are not represented in the returned value.
|
||||
pub(crate) fn to_bytes(&self) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(self.retained_bytes());
|
||||
for chunk in self.head.iter() {
|
||||
out.extend_from_slice(chunk);
|
||||
}
|
||||
for chunk in self.tail.iter() {
|
||||
out.extend_from_slice(chunk);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Drain all retained chunks from the buffer and reset its state.
|
||||
///
|
||||
/// The drained chunks are returned in head-then-tail order. Omitted bytes
|
||||
/// are discarded along with the retained content.
|
||||
pub(crate) fn drain_chunks(&mut self) -> Vec<Vec<u8>> {
|
||||
let mut out: Vec<Vec<u8>> = self.head.drain(..).collect();
|
||||
out.extend(self.tail.drain(..));
|
||||
self.head_bytes = 0;
|
||||
self.tail_bytes = 0;
|
||||
self.omitted_bytes = 0;
|
||||
out
|
||||
}
|
||||
|
||||
fn push_to_tail(&mut self, chunk: Vec<u8>) {
|
||||
if self.tail_budget == 0 {
|
||||
self.omitted_bytes = self.omitted_bytes.saturating_add(chunk.len());
|
||||
return;
|
||||
}
|
||||
|
||||
if chunk.len() >= self.tail_budget {
|
||||
// This single chunk is larger than the whole tail budget. Keep only the last
|
||||
// tail_budget bytes and drop everything else.
|
||||
let start = chunk.len().saturating_sub(self.tail_budget);
|
||||
let kept = chunk[start..].to_vec();
|
||||
let dropped = chunk.len().saturating_sub(kept.len());
|
||||
self.omitted_bytes = self
|
||||
.omitted_bytes
|
||||
.saturating_add(self.tail_bytes)
|
||||
.saturating_add(dropped);
|
||||
self.tail.clear();
|
||||
self.tail_bytes = kept.len();
|
||||
self.tail.push_back(kept);
|
||||
return;
|
||||
}
|
||||
|
||||
self.tail_bytes = self.tail_bytes.saturating_add(chunk.len());
|
||||
self.tail.push_back(chunk);
|
||||
self.trim_tail_to_budget();
|
||||
}
|
||||
|
||||
fn trim_tail_to_budget(&mut self) {
|
||||
let mut excess = self.tail_bytes.saturating_sub(self.tail_budget);
|
||||
while excess > 0 {
|
||||
match self.tail.front_mut() {
|
||||
Some(front) if excess >= front.len() => {
|
||||
excess -= front.len();
|
||||
self.tail_bytes = self.tail_bytes.saturating_sub(front.len());
|
||||
self.omitted_bytes = self.omitted_bytes.saturating_add(front.len());
|
||||
self.tail.pop_front();
|
||||
}
|
||||
Some(front) => {
|
||||
front.drain(..excess);
|
||||
self.tail_bytes = self.tail_bytes.saturating_sub(excess);
|
||||
self.omitted_bytes = self.omitted_bytes.saturating_add(excess);
|
||||
break;
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::HeadTailBuffer;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn keeps_prefix_and_suffix_when_over_budget() {
|
||||
let mut buf = HeadTailBuffer::new(10);
|
||||
|
||||
buf.push_chunk(b"0123456789".to_vec());
|
||||
assert_eq!(buf.omitted_bytes(), 0);
|
||||
|
||||
// Exceeds max by 2; we should keep head+tail and omit the middle.
|
||||
buf.push_chunk(b"ab".to_vec());
|
||||
assert!(buf.omitted_bytes() > 0);
|
||||
|
||||
let rendered = String::from_utf8_lossy(&buf.to_bytes()).to_string();
|
||||
assert!(rendered.starts_with("01234"));
|
||||
assert!(rendered.ends_with("89ab"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_bytes_zero_drops_everything() {
|
||||
let mut buf = HeadTailBuffer::new(0);
|
||||
buf.push_chunk(b"abc".to_vec());
|
||||
|
||||
assert_eq!(buf.retained_bytes(), 0);
|
||||
assert_eq!(buf.omitted_bytes(), 3);
|
||||
assert_eq!(buf.to_bytes(), b"".to_vec());
|
||||
assert_eq!(buf.snapshot_chunks(), Vec::<Vec<u8>>::new());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn head_budget_zero_keeps_only_last_byte_in_tail() {
|
||||
let mut buf = HeadTailBuffer::new(1);
|
||||
buf.push_chunk(b"abc".to_vec());
|
||||
|
||||
assert_eq!(buf.retained_bytes(), 1);
|
||||
assert_eq!(buf.omitted_bytes(), 2);
|
||||
assert_eq!(buf.to_bytes(), b"c".to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn draining_resets_state() {
|
||||
let mut buf = HeadTailBuffer::new(10);
|
||||
buf.push_chunk(b"0123456789".to_vec());
|
||||
buf.push_chunk(b"ab".to_vec());
|
||||
|
||||
let drained = buf.drain_chunks();
|
||||
assert!(!drained.is_empty());
|
||||
|
||||
assert_eq!(buf.retained_bytes(), 0);
|
||||
assert_eq!(buf.omitted_bytes(), 0);
|
||||
assert_eq!(buf.to_bytes(), b"".to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chunk_larger_than_tail_budget_keeps_only_tail_end() {
|
||||
let mut buf = HeadTailBuffer::new(10);
|
||||
buf.push_chunk(b"0123456789".to_vec());
|
||||
|
||||
// Tail budget is 5 bytes. This chunk should replace the tail and keep only its last 5 bytes.
|
||||
buf.push_chunk(b"ABCDEFGHIJK".to_vec());
|
||||
|
||||
let out = String::from_utf8_lossy(&buf.to_bytes()).to_string();
|
||||
assert!(out.starts_with("01234"));
|
||||
assert!(out.ends_with("GHIJK"));
|
||||
assert!(buf.omitted_bytes() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fills_head_then_tail_across_multiple_chunks() {
|
||||
let mut buf = HeadTailBuffer::new(10);
|
||||
|
||||
// Fill the 5-byte head budget across multiple chunks.
|
||||
buf.push_chunk(b"01".to_vec());
|
||||
buf.push_chunk(b"234".to_vec());
|
||||
assert_eq!(buf.to_bytes(), b"01234".to_vec());
|
||||
|
||||
// Then fill the 5-byte tail budget.
|
||||
buf.push_chunk(b"567".to_vec());
|
||||
buf.push_chunk(b"89".to_vec());
|
||||
assert_eq!(buf.to_bytes(), b"0123456789".to_vec());
|
||||
assert_eq!(buf.omitted_bytes(), 0);
|
||||
|
||||
// One more byte causes the tail to drop its oldest byte.
|
||||
buf.push_chunk(b"a".to_vec());
|
||||
assert_eq!(buf.to_bytes(), b"012346789a".to_vec());
|
||||
assert_eq!(buf.omitted_bytes(), 1);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user