mirror of
https://github.com/openai/codex.git
synced 2026-02-03 23:43:39 +00:00
Compare commits
15 Commits
dev/cc/tmp
...
tag
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
124c7fc2be | ||
|
|
0a1323747b | ||
|
|
348d379509 | ||
|
|
6912ba9fda | ||
|
|
27cec53ddc | ||
|
|
42273d94e8 | ||
|
|
1a5289a4ef | ||
|
|
359142f22f | ||
|
|
ecff4d4f72 | ||
|
|
985333feff | ||
|
|
e01610f762 | ||
|
|
09693d259b | ||
|
|
f8ba48d995 | ||
|
|
677532f97b | ||
|
|
beb83225e5 |
BIN
.github/codex-cli-login.png
vendored
Normal file
BIN
.github/codex-cli-login.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.9 MiB |
BIN
.github/codex-cli-permissions.png
vendored
Normal file
BIN
.github/codex-cli-permissions.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 408 KiB |
BIN
.github/codex-cli-splash.png
vendored
BIN
.github/codex-cli-splash.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 818 KiB After Width: | Height: | Size: 3.1 MiB |
BIN
.github/demo.gif
vendored
Normal file
BIN
.github/demo.gif
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 19 MiB |
2
.github/workflows/cargo-deny.yml
vendored
2
.github/workflows/cargo-deny.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@v2
|
||||
uses: EmbarkStudios/cargo-deny-action@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
|
||||
@@ -12,8 +12,6 @@ permissions:
|
||||
|
||||
jobs:
|
||||
close-stale-contributor-prs:
|
||||
# Prevent scheduled runs on forks
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Close inactive PRs from contributors
|
||||
|
||||
3
.github/workflows/issue-deduplicator.yml
vendored
3
.github/workflows/issue-deduplicator.yml
vendored
@@ -9,8 +9,7 @@ on:
|
||||
jobs:
|
||||
gather-duplicates:
|
||||
name: Identify potential duplicates
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate'))
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-deduplicate') }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
3
.github/workflows/issue-labeler.yml
vendored
3
.github/workflows/issue-labeler.yml
vendored
@@ -9,8 +9,7 @@ on:
|
||||
jobs:
|
||||
gather-labels:
|
||||
name: Generate label suggestions
|
||||
# Prevent runs on forks (requires OpenAI API key, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex' && (github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label'))
|
||||
if: ${{ github.event.action == 'opened' || (github.event.action == 'labeled' && github.event.label.name == 'codex-label') }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
4
.github/workflows/rust-release-prepare.yml
vendored
4
.github/workflows/rust-release-prepare.yml
vendored
@@ -14,8 +14,6 @@ permissions:
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
# Prevent scheduled runs on forks (no secrets, wastes Actions minutes)
|
||||
if: github.repository == 'openai/codex'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
@@ -43,7 +41,7 @@ jobs:
|
||||
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json
|
||||
|
||||
- name: Open pull request (if changed)
|
||||
uses: peter-evans/create-pull-request@v8
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
commit-message: "Update models.json"
|
||||
title: "Update models.json"
|
||||
|
||||
@@ -77,12 +77,6 @@ If you don’t have the tool:
|
||||
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
|
||||
- Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above.
|
||||
|
||||
### Spawning workspace binaries in tests (Cargo vs Buck2)
|
||||
|
||||
- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries.
|
||||
- Under Buck2, `CARGO_BIN_EXE_*` may be project-relative (e.g. `buck-out/...`), which breaks if a test changes its working directory. `codex_utils_cargo_bin::cargo_bin` resolves to an absolute path first.
|
||||
- When locating fixture files under Buck2, avoid `env!("CARGO_MANIFEST_DIR")` (Buck codegen sets it to `"."`). Prefer deriving paths from `codex_utils_cargo_bin::buck_project_root()` when needed.
|
||||
|
||||
### Integration tests (core)
|
||||
|
||||
- Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests.
|
||||
|
||||
77
README.md
77
README.md
@@ -1,11 +1,13 @@
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
|
||||
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
</br>
|
||||
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
|
||||
</p>
|
||||
</br>
|
||||
If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE.</a>
|
||||
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
@@ -13,19 +15,25 @@ If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="http
|
||||
|
||||
### Installing and running Codex CLI
|
||||
|
||||
Install globally with your preferred package manager:
|
||||
Install globally with your preferred package manager. If you use npm:
|
||||
|
||||
```shell
|
||||
# Install using npm
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
Alternatively, if you use Homebrew:
|
||||
|
||||
```shell
|
||||
# Install using Homebrew
|
||||
brew install --cask codex
|
||||
```
|
||||
|
||||
Then simply run `codex` to get started.
|
||||
Then simply run `codex` to get started:
|
||||
|
||||
```shell
|
||||
codex
|
||||
```
|
||||
|
||||
If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me).
|
||||
|
||||
<details>
|
||||
<summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary>
|
||||
@@ -45,15 +53,60 @@ Each archive contains a single entry with the platform baked into the name (e.g.
|
||||
|
||||
### Using Codex with your ChatGPT plan
|
||||
|
||||
<p align="center">
|
||||
<img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" />
|
||||
</p>
|
||||
|
||||
Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt).
|
||||
|
||||
You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key).
|
||||
You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243).
|
||||
|
||||
## Docs
|
||||
### Model Context Protocol (MCP)
|
||||
|
||||
- [**Codex Documentation**](https://developers.openai.com/codex)
|
||||
Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers).
|
||||
|
||||
### Configuration
|
||||
|
||||
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
|
||||
|
||||
### Execpolicy
|
||||
|
||||
See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute.
|
||||
|
||||
### Docs & FAQ
|
||||
|
||||
- [**Getting started**](./docs/getting-started.md)
|
||||
- [CLI usage](./docs/getting-started.md#cli-usage)
|
||||
- [Slash Commands](./docs/slash_commands.md)
|
||||
- [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input)
|
||||
- [Example prompts](./docs/getting-started.md#example-prompts)
|
||||
- [Custom prompts](./docs/prompts.md)
|
||||
- [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd)
|
||||
- [**Configuration**](./docs/config.md)
|
||||
- [Example config](./docs/example-config.md)
|
||||
- [**Sandbox & approvals**](./docs/sandbox.md)
|
||||
- [**Execpolicy quickstart**](./docs/execpolicy.md)
|
||||
- [**Authentication**](./docs/authentication.md)
|
||||
- [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced)
|
||||
- [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine)
|
||||
- **Automating Codex**
|
||||
- [GitHub Action](https://github.com/openai/codex-action)
|
||||
- [TypeScript SDK](./sdk/typescript/README.md)
|
||||
- [Non-interactive mode (`codex exec`)](./docs/exec.md)
|
||||
- [**Advanced**](./docs/advanced.md)
|
||||
- [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging)
|
||||
- [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp)
|
||||
- [**Zero data retention (ZDR)**](./docs/zdr.md)
|
||||
- [**Contributing**](./docs/contributing.md)
|
||||
- [**Installing & building**](./docs/install.md)
|
||||
- [**Install & build**](./docs/install.md)
|
||||
- [System Requirements](./docs/install.md#system-requirements)
|
||||
- [DotSlash](./docs/install.md#dotslash)
|
||||
- [Build from source](./docs/install.md#build-from-source)
|
||||
- [**FAQ**](./docs/faq.md)
|
||||
- [**Open source fund**](./docs/open-source-fund.md)
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
|
||||
|
||||
import argparse
|
||||
from contextlib import contextmanager
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
@@ -13,7 +12,6 @@ import zipfile
|
||||
from dataclasses import dataclass
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from typing import Iterable, Sequence
|
||||
from urllib.parse import urlparse
|
||||
from urllib.request import urlopen
|
||||
@@ -79,45 +77,6 @@ RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [
|
||||
RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS}
|
||||
DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS]
|
||||
|
||||
# urllib.request.urlopen() defaults to no timeout (can hang indefinitely), which is painful in CI.
|
||||
DOWNLOAD_TIMEOUT_SECS = 60
|
||||
|
||||
|
||||
def _gha_enabled() -> bool:
|
||||
# GitHub Actions supports "workflow commands" (e.g. ::group:: / ::error::) that make logs
|
||||
# much easier to scan: groups collapse noisy sections and error annotations surface the
|
||||
# failure in the UI without changing the actual exception/traceback output.
|
||||
return os.environ.get("GITHUB_ACTIONS") == "true"
|
||||
|
||||
|
||||
def _gha_escape(value: str) -> str:
|
||||
# Workflow commands require percent/newline escaping.
|
||||
return value.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
|
||||
|
||||
|
||||
def _gha_error(*, title: str, message: str) -> None:
|
||||
# Emit a GitHub Actions error annotation. This does not replace stdout/stderr logs; it just
|
||||
# adds a prominent summary line to the job UI so the root cause is easier to spot.
|
||||
if not _gha_enabled():
|
||||
return
|
||||
print(
|
||||
f"::error title={_gha_escape(title)}::{_gha_escape(message)}",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _gha_group(title: str):
|
||||
# Wrap a block in a collapsible log group on GitHub Actions. Outside of GHA this is a no-op
|
||||
# so local output remains unchanged.
|
||||
if _gha_enabled():
|
||||
print(f"::group::{_gha_escape(title)}", flush=True)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if _gha_enabled():
|
||||
print("::endgroup::", flush=True)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Install native Codex binaries.")
|
||||
@@ -172,20 +131,18 @@ def main() -> int:
|
||||
workflow_id = workflow_url.rstrip("/").split("/")[-1]
|
||||
print(f"Downloading native artifacts from workflow {workflow_id}...")
|
||||
|
||||
with _gha_group(f"Download native artifacts from workflow {workflow_id}"):
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str:
|
||||
artifacts_dir = Path(artifacts_dir_str)
|
||||
_download_artifacts(workflow_id, artifacts_dir)
|
||||
install_binary_components(
|
||||
artifacts_dir,
|
||||
vendor_dir,
|
||||
[BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS],
|
||||
)
|
||||
|
||||
if "rg" in components:
|
||||
with _gha_group("Fetch ripgrep binaries"):
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
print("Fetching ripgrep binaries...")
|
||||
fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST)
|
||||
|
||||
print(f"Installed native dependencies into {vendor_dir}")
|
||||
return 0
|
||||
@@ -246,14 +203,7 @@ def fetch_rg(
|
||||
|
||||
for future in as_completed(future_map):
|
||||
target = future_map[future]
|
||||
try:
|
||||
results[target] = future.result()
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep install failed",
|
||||
message=f"target={target} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(f"Failed to install ripgrep for target {target}.") from exc
|
||||
results[target] = future.result()
|
||||
print(f" installed ripgrep for {target}")
|
||||
|
||||
return [results[target] for target in targets]
|
||||
@@ -351,8 +301,6 @@ def _fetch_single_rg(
|
||||
url = providers[0]["url"]
|
||||
archive_format = platform_info.get("format", "zst")
|
||||
archive_member = platform_info.get("path")
|
||||
digest = platform_info.get("digest")
|
||||
expected_size = platform_info.get("size")
|
||||
|
||||
dest_dir = vendor_dir / target / "path"
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -365,32 +313,10 @@ def _fetch_single_rg(
|
||||
tmp_dir = Path(tmp_dir_str)
|
||||
archive_filename = os.path.basename(urlparse(url).path)
|
||||
download_path = tmp_dir / archive_filename
|
||||
print(
|
||||
f" downloading ripgrep for {target} ({platform_key}) from {url}",
|
||||
flush=True,
|
||||
)
|
||||
try:
|
||||
_download_file(url, download_path)
|
||||
except Exception as exc:
|
||||
_gha_error(
|
||||
title="ripgrep download failed",
|
||||
message=f"target={target} platform={platform_key} url={url} error={exc!r}",
|
||||
)
|
||||
raise RuntimeError(
|
||||
"Failed to download ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"expected_size={expected_size!r}, digest={digest!r}, url={url}, dest={download_path})."
|
||||
) from exc
|
||||
_download_file(url, download_path)
|
||||
|
||||
dest.unlink(missing_ok=True)
|
||||
try:
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
except Exception as exc:
|
||||
raise RuntimeError(
|
||||
"Failed to extract ripgrep "
|
||||
f"(target={target}, platform={platform_key}, format={archive_format}, "
|
||||
f"member={archive_member!r}, url={url}, archive={download_path})."
|
||||
) from exc
|
||||
extract_archive(download_path, archive_format, archive_member, dest)
|
||||
|
||||
if not is_windows:
|
||||
dest.chmod(0o755)
|
||||
@@ -400,9 +326,7 @@ def _fetch_single_rg(
|
||||
|
||||
def _download_file(url: str, dest: Path) -> None:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
dest.unlink(missing_ok=True)
|
||||
|
||||
with urlopen(url, timeout=DOWNLOAD_TIMEOUT_SECS) as response, open(dest, "wb") as out:
|
||||
with urlopen(url) as response, open(dest, "wb") as out:
|
||||
shutil.copyfileobj(response, out)
|
||||
|
||||
|
||||
|
||||
394
codex-rs/Cargo.lock
generated
394
codex-rs/Cargo.lock
generated
@@ -42,7 +42,7 @@ dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"bytes",
|
||||
"bytestring",
|
||||
"derive_more 2.1.1",
|
||||
"derive_more 2.0.1",
|
||||
"encoding_rs",
|
||||
"foldhash 0.1.5",
|
||||
"futures-core",
|
||||
@@ -137,7 +137,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"bytestring",
|
||||
"cfg-if",
|
||||
"derive_more 2.1.1",
|
||||
"derive_more 2.0.1",
|
||||
"encoding_rs",
|
||||
"foldhash 0.1.5",
|
||||
"futures-core",
|
||||
@@ -329,12 +329,12 @@ name = "app_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"base64",
|
||||
"chrono",
|
||||
"codex-app-server-protocol",
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -360,17 +360,11 @@ dependencies = [
|
||||
"objc2-foundation",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
"wl-clipboard-rs",
|
||||
"x11rb",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.6"
|
||||
@@ -889,9 +883,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.53"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8"
|
||||
checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -899,9 +893,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.53"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00"
|
||||
checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -912,18 +906,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_complete"
|
||||
version = "4.5.64"
|
||||
version = "4.5.57"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c0da80818b2d95eca9aa614a30783e42f62bf5fdfee24e68cfb960b071ba8d1"
|
||||
checksum = "4d9501bd3f5f09f7bbee01da9a511073ed30a80cd7a509f1214bb74eadea71ad"
|
||||
dependencies = [
|
||||
"clap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.49"
|
||||
version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671"
|
||||
checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -993,6 +987,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"app_test_support",
|
||||
"assert_cmd",
|
||||
"base64",
|
||||
"chrono",
|
||||
"codex-app-server-protocol",
|
||||
@@ -1063,7 +1058,6 @@ dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"assert_matches",
|
||||
"codex-utils-cargo-bin",
|
||||
"pretty_assertions",
|
||||
"similar",
|
||||
"tempfile",
|
||||
@@ -1160,8 +1154,6 @@ dependencies = [
|
||||
"codex-stdio-to-uds",
|
||||
"codex-tui",
|
||||
"codex-tui2",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-windows-sandbox",
|
||||
"ctor 0.5.0",
|
||||
"libc",
|
||||
@@ -1263,7 +1255,6 @@ name = "codex-core"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"assert_cmd",
|
||||
"assert_matches",
|
||||
"async-channel",
|
||||
@@ -1286,7 +1277,6 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"codex-rmcp-client",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-readiness",
|
||||
"codex-utils-string",
|
||||
@@ -1336,7 +1326,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"toml 0.9.5",
|
||||
"toml_edit 0.24.0+spec-1.1.0",
|
||||
"toml_edit",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"tracing-test",
|
||||
@@ -1362,7 +1352,6 @@ dependencies = [
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"libc",
|
||||
"mcp-types",
|
||||
@@ -1388,11 +1377,11 @@ name = "codex-exec-server"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"async-trait",
|
||||
"clap",
|
||||
"codex-core",
|
||||
"codex-execpolicy",
|
||||
"codex-utils-cargo-bin",
|
||||
"exec_server_test_support",
|
||||
"libc",
|
||||
"maplit",
|
||||
@@ -1433,7 +1422,7 @@ dependencies = [
|
||||
"allocative",
|
||||
"anyhow",
|
||||
"clap",
|
||||
"derive_more 2.1.1",
|
||||
"derive_more 2.0.1",
|
||||
"env_logger",
|
||||
"log",
|
||||
"multimap",
|
||||
@@ -1454,7 +1443,6 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"pretty_assertions",
|
||||
"sentry",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
@@ -1466,7 +1454,6 @@ dependencies = [
|
||||
"clap",
|
||||
"ignore",
|
||||
"nucleo-matcher",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -1552,6 +1539,7 @@ name = "codex-mcp-server"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-arg0",
|
||||
"codex-common",
|
||||
"codex-core",
|
||||
@@ -1610,6 +1598,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"strum_macros 0.27.2",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"tracing-opentelemetry",
|
||||
"tracing-subscriber",
|
||||
@@ -1674,8 +1663,8 @@ dependencies = [
|
||||
"axum",
|
||||
"codex-keyring-store",
|
||||
"codex-protocol",
|
||||
"codex-utils-cargo-bin",
|
||||
"dirs",
|
||||
"escargot",
|
||||
"futures",
|
||||
"keyring",
|
||||
"mcp-types",
|
||||
@@ -1702,7 +1691,6 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-utils-cargo-bin",
|
||||
"pretty_assertions",
|
||||
"tempfile",
|
||||
"uds_windows",
|
||||
@@ -1732,7 +1720,7 @@ dependencies = [
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"crossterm",
|
||||
"derive_more 2.1.1",
|
||||
"derive_more 2.0.1",
|
||||
"diffy",
|
||||
"dirs",
|
||||
"dunce",
|
||||
@@ -1759,7 +1747,6 @@ dependencies = [
|
||||
"supports-color 3.0.2",
|
||||
"tempfile",
|
||||
"textwrap 0.16.2",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
@@ -1774,9 +1761,6 @@ dependencies = [
|
||||
"url",
|
||||
"uuid",
|
||||
"vt100",
|
||||
"which",
|
||||
"windows-sys 0.52.0",
|
||||
"winsplit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1805,7 +1789,7 @@ dependencies = [
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"crossterm",
|
||||
"derive_more 2.1.1",
|
||||
"derive_more 2.0.1",
|
||||
"diffy",
|
||||
"dirs",
|
||||
"dunce",
|
||||
@@ -1820,7 +1804,6 @@ dependencies = [
|
||||
"pulldown-cmark",
|
||||
"rand 0.9.2",
|
||||
"ratatui",
|
||||
"ratatui-core",
|
||||
"ratatui-macros",
|
||||
"regex-lite",
|
||||
"reqwest",
|
||||
@@ -1842,7 +1825,6 @@ dependencies = [
|
||||
"tracing-subscriber",
|
||||
"tree-sitter-bash",
|
||||
"tree-sitter-highlight",
|
||||
"tui-scrollbar",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.2.1",
|
||||
"url",
|
||||
@@ -1871,14 +1853,6 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-cargo-bin"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"assert_cmd",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-image"
|
||||
version = "0.0.0"
|
||||
@@ -2007,20 +1981,6 @@ dependencies = [
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "compact_str"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a"
|
||||
dependencies = [
|
||||
"castaway",
|
||||
"cfg-if",
|
||||
"itoa",
|
||||
"rustversion",
|
||||
"ryu",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concurrent-queue"
|
||||
version = "2.5.0"
|
||||
@@ -2042,18 +2002,6 @@ dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-hex"
|
||||
version = "1.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"proptest",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.6.0"
|
||||
@@ -2065,9 +2013,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.10.0"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
|
||||
checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7"
|
||||
dependencies = [
|
||||
"unicode-segmentation",
|
||||
]
|
||||
@@ -2108,7 +2056,6 @@ dependencies = [
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"notify",
|
||||
"pretty_assertions",
|
||||
"regex-lite",
|
||||
@@ -2454,11 +2401,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "2.1.1"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134"
|
||||
checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
|
||||
dependencies = [
|
||||
"derive_more-impl 2.1.1",
|
||||
"derive_more-impl 2.0.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2476,14 +2423,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "derive_more-impl"
|
||||
version = "2.1.1"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
|
||||
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
|
||||
dependencies = [
|
||||
"convert_case 0.10.0",
|
||||
"convert_case 0.7.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustc_version",
|
||||
"syn 2.0.104",
|
||||
"unicode-xid",
|
||||
]
|
||||
@@ -2599,15 +2545,6 @@ version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
|
||||
|
||||
[[package]]
|
||||
name = "document-features"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61"
|
||||
dependencies = [
|
||||
"litrs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dotenvy"
|
||||
version = "0.15.7"
|
||||
@@ -2781,7 +2718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2838,8 +2775,8 @@ name = "exec_server_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-utils-cargo-bin",
|
||||
"rmcp",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -2889,7 +2826,7 @@ checksum = "0ce92ff622d6dadf7349484f42c93271a0d49b7cc4d466a936405bacbe10aa78"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"rustix 1.0.8",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3774,14 +3711,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "insta"
|
||||
version = "1.46.0"
|
||||
version = "1.44.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b66886d14d18d420ab5052cbff544fc5d34d0b2cdd35eb5976aaa10a4a472e5"
|
||||
checksum = "b5c943d4415edd8153251b6f197de5eb1640e56d84e8d9159bea190421c73698"
|
||||
dependencies = [
|
||||
"console",
|
||||
"once_cell",
|
||||
"similar",
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3806,6 +3742,17 @@ dependencies = [
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipnet"
|
||||
version = "2.11.0"
|
||||
@@ -3830,7 +3777,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3934,16 +3881,6 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kasuari"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fe90c1150662e858c7d5f945089b7517b0a80d8bf7ba4b1b5ffc984e7230a5b"
|
||||
dependencies = [
|
||||
"hashbrown 0.16.0",
|
||||
"thiserror 2.0.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "keyring"
|
||||
version = "3.6.3"
|
||||
@@ -4015,9 +3952,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "landlock"
|
||||
version = "0.4.4"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49fefd6652c57d68aaa32544a4c0e642929725bdc1fd929367cdeb673ab81088"
|
||||
checksum = "b3d2ef408b88e913bfc6594f5e693d57676f6463ded7d8bf994175364320c706"
|
||||
dependencies = [
|
||||
"enumflags2",
|
||||
"libc",
|
||||
@@ -4089,12 +4026,6 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
|
||||
|
||||
[[package]]
|
||||
name = "litrs"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092"
|
||||
|
||||
[[package]]
|
||||
name = "local-waker"
|
||||
version = "0.1.4"
|
||||
@@ -4213,9 +4144,9 @@ name = "mcp_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"codex-core",
|
||||
"codex-mcp-server",
|
||||
"codex-utils-cargo-bin",
|
||||
"core_test_support",
|
||||
"mcp-types",
|
||||
"os_info",
|
||||
@@ -4703,9 +4634,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.111"
|
||||
version = "0.9.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321"
|
||||
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
@@ -4716,9 +4647,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0"
|
||||
checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
@@ -4730,9 +4661,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-appender-tracing"
|
||||
version = "0.31.1"
|
||||
version = "0.30.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ef6a1ac5ca3accf562b8c306fa8483c85f4390f768185ab775f242f7fe8fdcc2"
|
||||
checksum = "e68f63eca5fad47e570e00e893094fc17be959c80c79a7d6ec1abdd5ae6ffc16"
|
||||
dependencies = [
|
||||
"opentelemetry",
|
||||
"tracing",
|
||||
@@ -4742,9 +4673,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-http"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d"
|
||||
checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -4755,9 +4686,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf"
|
||||
checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b"
|
||||
dependencies = [
|
||||
"http 1.3.1",
|
||||
"opentelemetry",
|
||||
@@ -4775,32 +4706,30 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f"
|
||||
checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"const-hex",
|
||||
"hex",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"prost",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846"
|
||||
checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_sdk"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd"
|
||||
checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-executor",
|
||||
@@ -4808,6 +4737,7 @@ dependencies = [
|
||||
"opentelemetry",
|
||||
"percent-encoding",
|
||||
"rand 0.9.2",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -5163,7 +5093,7 @@ version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983"
|
||||
dependencies = [
|
||||
"toml_edit 0.23.10+spec-1.0.0",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5189,26 +5119,11 @@ dependencies = [
|
||||
"windows 0.61.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proptest"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"num-traits",
|
||||
"rand 0.9.2",
|
||||
"rand_chacha 0.9.0",
|
||||
"rand_xorshift",
|
||||
"regex-syntax 0.8.5",
|
||||
"unarray",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.14.1"
|
||||
version = "0.13.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d"
|
||||
checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive",
|
||||
@@ -5216,9 +5131,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.14.1"
|
||||
version = "0.13.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425"
|
||||
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.14.0",
|
||||
@@ -5331,7 +5246,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"socket2 0.6.1",
|
||||
"tracing",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5418,15 +5333,6 @@ dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_xorshift"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
|
||||
dependencies = [
|
||||
"rand_core 0.9.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ratatui"
|
||||
version = "0.29.0"
|
||||
@@ -5434,7 +5340,7 @@ source = "git+https://github.com/nornagon/ratatui?branch=nornagon-v0.29.0-patch#
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cassowary",
|
||||
"compact_str 0.8.1",
|
||||
"compact_str",
|
||||
"crossterm",
|
||||
"indoc",
|
||||
"instability",
|
||||
@@ -5443,27 +5349,7 @@ dependencies = [
|
||||
"paste",
|
||||
"strum 0.26.3",
|
||||
"unicode-segmentation",
|
||||
"unicode-truncate 1.1.0",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ratatui-core"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ef8dea09a92caaf73bff7adb70b76162e5937524058a7e5bff37869cbbec293"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"compact_str 0.9.0",
|
||||
"hashbrown 0.16.0",
|
||||
"indoc",
|
||||
"itertools 0.14.0",
|
||||
"kasuari",
|
||||
"lru 0.16.2",
|
||||
"strum 0.27.2",
|
||||
"thiserror 2.0.17",
|
||||
"unicode-segmentation",
|
||||
"unicode-truncate 2.0.0",
|
||||
"unicode-truncate",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
@@ -5552,9 +5438,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-lite"
|
||||
version = "0.1.8"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da"
|
||||
checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30"
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
@@ -5710,7 +5596,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.15",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5723,7 +5609,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.9.4",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6630,9 +6516,6 @@ name = "strum"
|
||||
version = "0.27.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
|
||||
dependencies = [
|
||||
"strum_macros 0.27.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum_macros"
|
||||
@@ -6840,9 +6723,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "test-log"
|
||||
version = "0.2.19"
|
||||
version = "0.2.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37d53ac171c92a39e4769491c4b4dde7022c60042254b5fc044ae409d34a24d4"
|
||||
checksum = "1e33b98a582ea0be1168eba097538ee8dd4bbe0f2b01b22ac92ea30054e5be7b"
|
||||
dependencies = [
|
||||
"env_logger",
|
||||
"test-log-macros",
|
||||
@@ -6851,9 +6734,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "test-log-macros"
|
||||
version = "0.2.19"
|
||||
version = "0.2.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be35209fd0781c5401458ab66e4f98accf63553e8fae7425503e92fdd319783b"
|
||||
checksum = "451b374529930d7601b1eef8d32bc79ae870b6079b069401709c2a8bf9e75f36"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -7024,26 +6907,29 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.48.0"
|
||||
version = "1.47.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
|
||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"slab",
|
||||
"socket2 0.6.1",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.61.1",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.6.0"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -7072,9 +6958,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-stream"
|
||||
version = "0.1.18"
|
||||
version = "0.1.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70"
|
||||
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"pin-project-lite",
|
||||
@@ -7136,30 +7022,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.7.5+spec-1.1.0"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347"
|
||||
checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.23.10+spec-1.0.0"
|
||||
version = "0.23.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269"
|
||||
dependencies = [
|
||||
"indexmap 2.12.0",
|
||||
"toml_datetime",
|
||||
"toml_parser",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.24.0+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c740b185920170a6d9191122cafef7010bd6270a3824594bff6784c04d7f09e"
|
||||
checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d"
|
||||
dependencies = [
|
||||
"indexmap 2.12.0",
|
||||
"toml_datetime",
|
||||
@@ -7170,28 +7044,30 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_parser"
|
||||
version = "1.0.6+spec-1.1.0"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44"
|
||||
checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e"
|
||||
dependencies = [
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_writer"
|
||||
version = "1.0.6+spec-1.1.0"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607"
|
||||
checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2"
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.14.2"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203"
|
||||
checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64",
|
||||
"bytes",
|
||||
"h2",
|
||||
"http 1.3.1",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
@@ -7200,8 +7076,9 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prost",
|
||||
"rustls-native-certs",
|
||||
"sync_wrapper",
|
||||
"socket2 0.5.10",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"tokio-stream",
|
||||
@@ -7211,17 +7088,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tonic-prost"
|
||||
version = "0.14.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost",
|
||||
"tonic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.5.2"
|
||||
@@ -7339,16 +7205,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-opentelemetry"
|
||||
version = "0.32.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e"
|
||||
checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"rustversion",
|
||||
"smallvec",
|
||||
"thiserror 2.0.17",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
@@ -7358,9 +7223,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.22"
|
||||
version = "0.3.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e"
|
||||
checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
|
||||
dependencies = [
|
||||
"matchers",
|
||||
"nu-ansi-term",
|
||||
@@ -7479,16 +7344,6 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tui-scrollbar"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c42613099915b2e30e9f144670666e858e2538366f77742e1cf1c2f230efcacd"
|
||||
dependencies = [
|
||||
"document-features",
|
||||
"ratatui-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.18.0"
|
||||
@@ -7515,12 +7370,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unarray"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "2.8.1"
|
||||
@@ -7556,17 +7405,6 @@ dependencies = [
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-truncate"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fbf03860ff438702f3910ca5f28f8dac63c1c11e7efb5012b8b175493606330"
|
||||
dependencies = [
|
||||
"itertools 0.13.0",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.2.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
@@ -8011,7 +7849,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8532,12 +8370,6 @@ version = "0.0.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
|
||||
|
||||
[[package]]
|
||||
name = "winsplit"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ab703352da6a72f35c39a533526393725640575bb211f61987a2748323ad956"
|
||||
|
||||
[[package]]
|
||||
name = "wiremock"
|
||||
version = "0.6.5"
|
||||
|
||||
@@ -36,7 +36,6 @@ members = [
|
||||
"tui",
|
||||
"tui2",
|
||||
"utils/absolute-path",
|
||||
"utils/cargo-bin",
|
||||
"utils/git",
|
||||
"utils/cache",
|
||||
"utils/image",
|
||||
@@ -94,7 +93,6 @@ codex-tui = { path = "tui" }
|
||||
codex-tui2 = { path = "tui2" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
codex-utils-cache = { path = "utils/cache" }
|
||||
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
|
||||
codex-utils-image = { path = "utils/image" }
|
||||
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
|
||||
codex-utils-pty = { path = "utils/pty" }
|
||||
@@ -145,10 +143,10 @@ ignore = "0.4.23"
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
include_dir = "0.7.4"
|
||||
indexmap = "2.12.0"
|
||||
insta = "1.46.0"
|
||||
insta = "1.44.3"
|
||||
itertools = "0.14.0"
|
||||
keyring = { version = "3.6", default-features = false }
|
||||
landlock = "0.4.4"
|
||||
landlock = "0.4.1"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.177"
|
||||
log = "0.4"
|
||||
@@ -160,12 +158,12 @@ notify = "8.2.0"
|
||||
nucleo-matcher = "0.3.1"
|
||||
once_cell = "1.20.2"
|
||||
openssl-sys = "*"
|
||||
opentelemetry = "0.31.0"
|
||||
opentelemetry-appender-tracing = "0.31.0"
|
||||
opentelemetry-otlp = "0.31.0"
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
opentelemetry_sdk = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-appender-tracing = "0.30.0"
|
||||
opentelemetry-otlp = "0.30.0"
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
opentelemetry_sdk = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
os_info = "3.12.0"
|
||||
owo-colors = "4.2.0"
|
||||
path-absolutize = "3.1.1"
|
||||
@@ -176,10 +174,9 @@ pretty_assertions = "1.4.1"
|
||||
pulldown-cmark = "0.10"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-core = "0.1.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
regex = "1.12.2"
|
||||
regex-lite = "0.1.8"
|
||||
regex-lite = "0.1.7"
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.12.0", default-features = false }
|
||||
schemars = "0.8.22"
|
||||
@@ -201,26 +198,26 @@ strum_macros = "0.27.2"
|
||||
supports-color = "3.0.2"
|
||||
sys-locale = "0.3.2"
|
||||
tempfile = "3.23.0"
|
||||
test-log = "0.2.19"
|
||||
test-log = "0.2.18"
|
||||
textwrap = "0.16.2"
|
||||
thiserror = "2.0.17"
|
||||
time = "0.3"
|
||||
tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
tokio-stream = "0.1.18"
|
||||
tokio-stream = "0.1.17"
|
||||
tokio-test = "0.4"
|
||||
tokio-util = "0.7.16"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.24.0"
|
||||
toml_edit = "0.23.5"
|
||||
tonic = "0.13.1"
|
||||
tracing = "0.1.43"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = "0.3.22"
|
||||
tracing-subscriber = "0.3.20"
|
||||
tracing-test = "0.2.5"
|
||||
tree-sitter = "0.25.10"
|
||||
tree-sitter-bash = "0.25"
|
||||
tree-sitter-highlight = "0.25.10"
|
||||
ts-rs = "11"
|
||||
tui-scrollbar = "0.2.1"
|
||||
uds_windows = "1.1.0"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.2"
|
||||
|
||||
@@ -15,8 +15,8 @@ You can also install via Homebrew (`brew install --cask codex`) or download a pl
|
||||
|
||||
## Documentation quickstart
|
||||
|
||||
- First run with Codex? Start with [`docs/getting-started.md`](../docs/getting-started.md) (links to the walkthrough for prompts, keyboard shortcuts, and session management).
|
||||
- Want deeper control? See [`docs/config.md`](../docs/config.md) and [`docs/install.md`](../docs/install.md).
|
||||
- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management.
|
||||
- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md).
|
||||
|
||||
## What's new in the Rust CLI
|
||||
|
||||
@@ -30,7 +30,7 @@ Codex supports a rich set of configuration options. Note that the Rust CLI uses
|
||||
|
||||
#### MCP client
|
||||
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#connecting-to-mcp-servers) for details.
|
||||
Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details.
|
||||
|
||||
#### MCP server (experimental)
|
||||
|
||||
|
||||
@@ -384,8 +384,6 @@ pub struct SendUserTurnParams {
|
||||
pub model: String,
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
pub summary: ReasoningSummary,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
|
||||
@@ -227,8 +227,6 @@ pub enum ConfigLayerSource {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
System {
|
||||
/// This is the path to the system config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
@@ -239,19 +237,9 @@ pub enum ConfigLayerSource {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
User {
|
||||
/// This is the path to the user's config.toml file, though it is not
|
||||
/// guaranteed to exist.
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Path to a .codex/ folder within a project. There could be multiple of
|
||||
/// these between `cwd` and the project/repo root.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
Project {
|
||||
dot_codex_folder: AbsolutePathBuf,
|
||||
},
|
||||
|
||||
/// Session-layer overrides supplied via `-c`/`--config`.
|
||||
SessionFlags,
|
||||
|
||||
@@ -259,8 +247,6 @@ pub enum ConfigLayerSource {
|
||||
/// as the last layer on top of everything else. This scheme did not quite
|
||||
/// work out as intended, but we keep this variant as a "best effort" while
|
||||
/// we phase out `managed_config.toml` in favor of `requirements.toml`.
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(rename_all = "camelCase")]
|
||||
LegacyManagedConfigTomlFromFile {
|
||||
file: AbsolutePathBuf,
|
||||
},
|
||||
@@ -276,7 +262,6 @@ impl ConfigLayerSource {
|
||||
ConfigLayerSource::Mdm { .. } => 0,
|
||||
ConfigLayerSource::System { .. } => 10,
|
||||
ConfigLayerSource::User { .. } => 20,
|
||||
ConfigLayerSource::Project { .. } => 25,
|
||||
ConfigLayerSource::SessionFlags => 30,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => 40,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => 50,
|
||||
@@ -1274,8 +1259,6 @@ pub struct Turn {
|
||||
pub struct TurnError {
|
||||
pub message: String,
|
||||
pub codex_error_info: Option<CodexErrorInfo>,
|
||||
#[serde(default)]
|
||||
pub additional_details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -1319,8 +1302,6 @@ pub struct TurnStartParams {
|
||||
pub effort: Option<ReasoningEffort>,
|
||||
/// Override the reasoning summary for this turn and subsequent turns.
|
||||
pub summary: Option<ReasoningSummary>,
|
||||
/// Optional JSON Schema used to constrain the final assistant message for this turn.
|
||||
pub output_schema: Option<JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
|
||||
@@ -13,7 +13,6 @@ use std::time::Duration;
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::bail;
|
||||
use clap::ArgAction;
|
||||
use clap::Parser;
|
||||
use clap::Subcommand;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
@@ -66,19 +65,6 @@ struct Cli {
|
||||
#[arg(long, env = "CODEX_BIN", default_value = "codex")]
|
||||
codex_bin: String,
|
||||
|
||||
/// Forwarded to the `codex` CLI as `--config key=value`. Repeatable.
|
||||
///
|
||||
/// Example:
|
||||
/// `--config 'model_providers.mock.base_url="http://localhost:4010/v2"'`
|
||||
#[arg(
|
||||
short = 'c',
|
||||
long = "config",
|
||||
value_name = "key=value",
|
||||
action = ArgAction::Append,
|
||||
global = true
|
||||
)]
|
||||
config_overrides: Vec<String>,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: CliCommand,
|
||||
}
|
||||
@@ -130,42 +116,29 @@ enum CliCommand {
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let Cli {
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
command,
|
||||
} = Cli::parse();
|
||||
let Cli { codex_bin, command } = Cli::parse();
|
||||
|
||||
match command {
|
||||
CliCommand::SendMessage { user_message } => {
|
||||
send_message(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::SendMessageV2 { user_message } => {
|
||||
send_message_v2(&codex_bin, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::SendMessage { user_message } => send_message(codex_bin, user_message),
|
||||
CliCommand::SendMessageV2 { user_message } => send_message_v2(codex_bin, user_message),
|
||||
CliCommand::TriggerCmdApproval { user_message } => {
|
||||
trigger_cmd_approval(&codex_bin, &config_overrides, user_message)
|
||||
trigger_cmd_approval(codex_bin, user_message)
|
||||
}
|
||||
CliCommand::TriggerPatchApproval { user_message } => {
|
||||
trigger_patch_approval(&codex_bin, &config_overrides, user_message)
|
||||
trigger_patch_approval(codex_bin, user_message)
|
||||
}
|
||||
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(&codex_bin, &config_overrides),
|
||||
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(codex_bin),
|
||||
CliCommand::SendFollowUpV2 {
|
||||
first_message,
|
||||
follow_up_message,
|
||||
} => send_follow_up_v2(
|
||||
&codex_bin,
|
||||
&config_overrides,
|
||||
first_message,
|
||||
follow_up_message,
|
||||
),
|
||||
CliCommand::TestLogin => test_login(&codex_bin, &config_overrides),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(&codex_bin, &config_overrides),
|
||||
} => send_follow_up_v2(codex_bin, first_message, follow_up_message),
|
||||
CliCommand::TestLogin => test_login(codex_bin),
|
||||
CliCommand::GetAccountRateLimits => get_account_rate_limits(codex_bin),
|
||||
}
|
||||
}
|
||||
|
||||
fn send_message(codex_bin: &str, config_overrides: &[String], user_message: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
fn send_message(codex_bin: String, user_message: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -186,61 +159,46 @@ fn send_message(codex_bin: &str, config_overrides: &[String], user_message: Stri
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send_message_v2(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
) -> Result<()> {
|
||||
send_message_v2_with_policies(codex_bin, config_overrides, user_message, None, None)
|
||||
fn send_message_v2(codex_bin: String, user_message: String) -> Result<()> {
|
||||
send_message_v2_with_policies(codex_bin, user_message, None, None)
|
||||
}
|
||||
|
||||
fn trigger_cmd_approval(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
fn trigger_cmd_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
|
||||
let default_prompt =
|
||||
"Run `touch /tmp/should-trigger-approval` so I can confirm the file exists.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
)
|
||||
}
|
||||
|
||||
fn trigger_patch_approval(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
user_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
fn trigger_patch_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
|
||||
let default_prompt =
|
||||
"Create a file named APPROVAL_DEMO.txt containing a short hello message using apply_patch.";
|
||||
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
|
||||
send_message_v2_with_policies(
|
||||
codex_bin,
|
||||
config_overrides,
|
||||
message,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
)
|
||||
}
|
||||
|
||||
fn no_trigger_cmd_approval(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
fn no_trigger_cmd_approval(codex_bin: String) -> Result<()> {
|
||||
let prompt = "Run `touch should_not_trigger_approval.txt`";
|
||||
send_message_v2_with_policies(codex_bin, config_overrides, prompt.to_string(), None, None)
|
||||
send_message_v2_with_policies(codex_bin, prompt.to_string(), None, None)
|
||||
}
|
||||
|
||||
fn send_message_v2_with_policies(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
codex_bin: String,
|
||||
user_message: String,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -264,12 +222,11 @@ fn send_message_v2_with_policies(
|
||||
}
|
||||
|
||||
fn send_follow_up_v2(
|
||||
codex_bin: &str,
|
||||
config_overrides: &[String],
|
||||
codex_bin: String,
|
||||
first_message: String,
|
||||
follow_up_message: String,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -302,8 +259,8 @@ fn send_follow_up_v2(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_login(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
fn test_login(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -332,8 +289,8 @@ fn test_login(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_account_rate_limits(codex_bin: &str, config_overrides: &[String]) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin, config_overrides)?;
|
||||
fn get_account_rate_limits(codex_bin: String) -> Result<()> {
|
||||
let mut client = CodexClient::spawn(codex_bin)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
@@ -352,12 +309,8 @@ struct CodexClient {
|
||||
}
|
||||
|
||||
impl CodexClient {
|
||||
fn spawn(codex_bin: &str, config_overrides: &[String]) -> Result<Self> {
|
||||
let mut cmd = Command::new(codex_bin);
|
||||
for override_kv in config_overrides {
|
||||
cmd.arg("--config").arg(override_kv);
|
||||
}
|
||||
let mut codex_app_server = cmd
|
||||
fn spawn(codex_bin: String) -> Result<Self> {
|
||||
let mut codex_app_server = Command::new(&codex_bin)
|
||||
.arg("app-server")
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
|
||||
@@ -48,6 +48,7 @@ uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
app_test_support = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
|
||||
@@ -82,7 +82,7 @@ Example (from OpenAI's official VSCode extension):
|
||||
- `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
|
||||
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering (thread-agnostic; does not include in-repo `.codex/` layers).
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering.
|
||||
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
|
||||
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
|
||||
|
||||
@@ -162,7 +162,7 @@ Turns attach user input (text or images) to a thread and trigger Codex generatio
|
||||
- `{"type":"image","url":"https://…png"}`
|
||||
- `{"type":"localImage","path":"/tmp/screenshot.png"}`
|
||||
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread. `outputSchema` applies only to the current turn.
|
||||
You can optionally specify config overrides on the new turn. If specified, these settings become the default for subsequent turns on the same thread.
|
||||
|
||||
```json
|
||||
{ "method": "turn/start", "id": 30, "params": {
|
||||
@@ -178,14 +178,7 @@ You can optionally specify config overrides on the new turn. If specified, these
|
||||
},
|
||||
"model": "gpt-5.1-codex",
|
||||
"effort": "medium",
|
||||
"summary": "concise",
|
||||
// Optional JSON Schema to constrain the final assistant message for this turn.
|
||||
"outputSchema": {
|
||||
"type": "object",
|
||||
"properties": { "answer": { "type": "string" } },
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
"summary": "concise"
|
||||
} }
|
||||
{ "id": 30, "result": { "turn": {
|
||||
"id": "turn_456",
|
||||
@@ -309,7 +302,7 @@ Event notifications are the server-initiated event stream for thread lifecycles,
|
||||
The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`.
|
||||
|
||||
- `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo?, additionalDetails? } }`.
|
||||
- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`.
|
||||
- `turn/diff/updated` — `{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items.
|
||||
- `turn/plan/updated` — `{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`.
|
||||
|
||||
@@ -359,7 +352,7 @@ There are additional item-specific events:
|
||||
|
||||
### Errors
|
||||
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo?, additionalDetails? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification.
|
||||
|
||||
`codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values:
|
||||
|
||||
|
||||
@@ -340,7 +340,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: None,
|
||||
};
|
||||
handle_error(conversation_id, turn_error.clone(), &turn_summary_store).await;
|
||||
outgoing
|
||||
@@ -358,7 +357,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let turn_error = TurnError {
|
||||
message: ev.message,
|
||||
codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from),
|
||||
additional_details: ev.additional_details,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::Error(ErrorNotification {
|
||||
@@ -1342,7 +1340,6 @@ mod tests {
|
||||
TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1354,7 +1351,6 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "boom".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::InternalServerError),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
@@ -1402,7 +1398,6 @@ mod tests {
|
||||
TurnError {
|
||||
message: "oops".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1444,7 +1439,6 @@ mod tests {
|
||||
TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1473,7 +1467,6 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "bad".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::Other),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1698,7 +1691,6 @@ mod tests {
|
||||
TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1718,7 +1710,6 @@ mod tests {
|
||||
TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
},
|
||||
&turn_summary_store,
|
||||
)
|
||||
@@ -1755,7 +1746,6 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "a1".to_string(),
|
||||
codex_error_info: Some(V2CodexErrorInfo::BadRequest),
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -1776,7 +1766,6 @@ mod tests {
|
||||
Some(TurnError {
|
||||
message: "b1".to_string(),
|
||||
codex_error_info: None,
|
||||
additional_details: None,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use crate::bespoke_event_handling::apply_bespoke_event_handling;
|
||||
use crate::config_api::ConfigApi;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::fuzzy_file_search::run_fuzzy_file_search;
|
||||
@@ -156,6 +155,7 @@ use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||
use codex_protocol::user_input::UserInput as CoreInputItem;
|
||||
use codex_rmcp_client::perform_oauth_login_return_url;
|
||||
use codex_utils_json_to_toml::json_to_toml;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsStr;
|
||||
@@ -215,7 +215,7 @@ pub(crate) struct CodexMessageProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
config: Arc<Config>,
|
||||
config_api: ConfigApi,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
|
||||
active_login: Arc<Mutex<Option<ActiveLogin>>>,
|
||||
// Queue of pending interrupt requests per conversation. We reply when TurnAborted arrives.
|
||||
@@ -265,14 +265,13 @@ impl CodexMessageProcessor {
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
feedback: CodexFeedback,
|
||||
) -> Self {
|
||||
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides.clone());
|
||||
Self {
|
||||
auth_manager,
|
||||
conversation_manager,
|
||||
outgoing,
|
||||
codex_linux_sandbox_exe,
|
||||
config,
|
||||
config_api,
|
||||
cli_overrides,
|
||||
conversation_listeners: HashMap::new(),
|
||||
active_login: Arc::new(Mutex::new(None)),
|
||||
pending_interrupts: Arc::new(Mutex::new(HashMap::new())),
|
||||
@@ -283,7 +282,13 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
|
||||
async fn load_latest_config(&self) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_api.load_latest_thread_agnostic_config().await
|
||||
Config::load_with_cli_overrides(self.cli_overrides.clone())
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn review_request_from_target(
|
||||
@@ -1181,22 +1186,10 @@ impl CodexMessageProcessor {
|
||||
arg0: None,
|
||||
};
|
||||
|
||||
let requested_policy = params.sandbox_policy.map(|policy| policy.to_core());
|
||||
let effective_policy = match requested_policy {
|
||||
Some(policy) => match self.config.sandbox_policy.can_set(&policy) {
|
||||
Ok(()) => policy,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid sandbox policy: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => self.config.sandbox_policy.get().clone(),
|
||||
};
|
||||
let effective_policy = params
|
||||
.sandbox_policy
|
||||
.map(|policy| policy.to_core())
|
||||
.unwrap_or_else(|| self.config.sandbox_policy.clone());
|
||||
|
||||
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
|
||||
let outgoing = self.outgoing.clone();
|
||||
@@ -1273,20 +1266,18 @@ impl CodexMessageProcessor {
|
||||
);
|
||||
}
|
||||
|
||||
let config =
|
||||
match derive_config_from_params(&self.config_api, overrides, Some(cli_overrides)).await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let config = match derive_config_from_params(overrides, Some(cli_overrides)).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match self.conversation_manager.new_conversation(config).await {
|
||||
Ok(conversation_id) => {
|
||||
@@ -1325,19 +1316,18 @@ impl CodexMessageProcessor {
|
||||
params.developer_instructions,
|
||||
);
|
||||
|
||||
let config =
|
||||
match derive_config_from_params(&self.config_api, overrides, params.config).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let config = match derive_config_from_params(overrides, params.config).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("error deriving config: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match self.conversation_manager.new_conversation(config).await {
|
||||
Ok(new_conv) => {
|
||||
@@ -1565,7 +1555,7 @@ impl CodexMessageProcessor {
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
);
|
||||
match derive_config_from_params(&self.config_api, overrides, cli_overrides).await {
|
||||
match derive_config_from_params(overrides, cli_overrides).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
@@ -1990,6 +1980,16 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
};
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: "OAuth login is only supported when [features].rmcp_client is true in config.toml".to_string(),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let McpServerOauthLoginParams {
|
||||
name,
|
||||
scopes,
|
||||
@@ -2226,7 +2226,7 @@ impl CodexMessageProcessor {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
derive_config_from_params(&self.config_api, overrides, Some(cli_overrides)).await
|
||||
derive_config_from_params(overrides, Some(cli_overrides)).await
|
||||
}
|
||||
None => Ok(self.config.as_ref().clone()),
|
||||
};
|
||||
@@ -2577,7 +2577,6 @@ impl CodexMessageProcessor {
|
||||
let _ = conversation
|
||||
.submit(Op::UserInput {
|
||||
items: mapped_items,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -2597,7 +2596,6 @@ impl CodexMessageProcessor {
|
||||
model,
|
||||
effort,
|
||||
summary,
|
||||
output_schema,
|
||||
} = params;
|
||||
|
||||
let Ok(conversation) = self
|
||||
@@ -2632,7 +2630,7 @@ impl CodexMessageProcessor {
|
||||
model,
|
||||
effort,
|
||||
summary,
|
||||
final_output_json_schema: output_schema,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -2741,7 +2739,6 @@ impl CodexMessageProcessor {
|
||||
let turn_id = conversation
|
||||
.submit(Op::UserInput {
|
||||
items: mapped_items,
|
||||
final_output_json_schema: params.output_schema,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -3342,13 +3339,16 @@ fn errors_to_info(
|
||||
}
|
||||
|
||||
async fn derive_config_from_params(
|
||||
config_api: &ConfigApi,
|
||||
overrides: ConfigOverrides,
|
||||
cli_overrides: Option<HashMap<String, serde_json::Value>>,
|
||||
) -> std::io::Result<Config> {
|
||||
config_api
|
||||
.load_thread_agnostic_config(overrides, cli_overrides)
|
||||
.await
|
||||
let cli_overrides = cli_overrides
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, json_to_toml(v)))
|
||||
.collect();
|
||||
|
||||
Config::load_with_cli_overrides_and_harness_overrides(cli_overrides, overrides).await
|
||||
}
|
||||
|
||||
async fn read_summary_from_rollout(
|
||||
|
||||
@@ -7,28 +7,21 @@ use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::config::ConfigService;
|
||||
use codex_core::config::ConfigServiceError;
|
||||
use codex_utils_json_to_toml::json_to_toml;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigApi {
|
||||
codex_home: PathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
service: ConfigService,
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
Self {
|
||||
service: ConfigService::new(codex_home.clone(), cli_overrides.clone()),
|
||||
codex_home,
|
||||
cli_overrides,
|
||||
service: ConfigService::new(codex_home, cli_overrides),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,30 +32,6 @@ impl ConfigApi {
|
||||
self.service.read(params).await.map_err(map_error)
|
||||
}
|
||||
|
||||
pub(crate) async fn load_thread_agnostic_config(
|
||||
&self,
|
||||
overrides: codex_core::config::ConfigOverrides,
|
||||
request_cli_overrides: Option<std::collections::HashMap<String, serde_json::Value>>,
|
||||
) -> std::io::Result<Config> {
|
||||
// Apply the app server's startup `--config` overrides, then apply request-scoped overrides
|
||||
// with higher precedence.
|
||||
let mut merged_cli_overrides = self.cli_overrides.clone();
|
||||
merged_cli_overrides.extend(
|
||||
request_cli_overrides
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, json_to_toml(v))),
|
||||
);
|
||||
|
||||
ConfigBuilder::default()
|
||||
.codex_home(self.codex_home.clone())
|
||||
.cli_overrides(merged_cli_overrides)
|
||||
.harness_overrides(overrides)
|
||||
.thread_agnostic()
|
||||
.build()
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
@@ -76,18 +45,6 @@ impl ConfigApi {
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
self.service.batch_write(params).await.map_err(map_error)
|
||||
}
|
||||
|
||||
pub(crate) async fn load_latest_thread_agnostic_config(
|
||||
&self,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.load_thread_agnostic_config(codex_core::config::ConfigOverrides::default(), None)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::num::NonZero;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -62,7 +63,11 @@ pub(crate) async fn run_fuzzy_file_search(
|
||||
Ok(Ok((root, res))) => {
|
||||
for m in res.matches {
|
||||
let path = m.path;
|
||||
let file_name = file_search::file_name_from_path(&path);
|
||||
//TODO(shijie): Move file name generation to file_search lib.
|
||||
let file_name = Path::new(&path)
|
||||
.file_name()
|
||||
.map(|name| name.to_string_lossy().into_owned())
|
||||
.unwrap_or_else(|| path.clone());
|
||||
let result = FuzzyFileSearchResult {
|
||||
root: root.clone(),
|
||||
path,
|
||||
|
||||
@@ -17,11 +17,13 @@ use tokio::io::BufReader;
|
||||
use tokio::io::{self};
|
||||
use tokio::sync::mpsc;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Level;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::Layer;
|
||||
use tracing_subscriber::filter::Targets;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
@@ -101,8 +103,11 @@ pub async fn run_main(
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL)
|
||||
.with_filter(EnvFilter::from_default_env());
|
||||
|
||||
let feedback_layer = feedback.logger_layer();
|
||||
let feedback_metadata_layer = feedback.metadata_layer();
|
||||
let feedback_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(feedback.make_writer())
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_filter(Targets::new().with_default(Level::TRACE));
|
||||
|
||||
let otel_logger_layer = otel.as_ref().and_then(|o| o.logger_layer());
|
||||
|
||||
@@ -111,7 +116,6 @@ pub async fn run_main(
|
||||
let _ = tracing_subscriber::registry()
|
||||
.with(stderr_fmt)
|
||||
.with(feedback_layer)
|
||||
.with(feedback_metadata_layer)
|
||||
.with(otel_logger_layer)
|
||||
.with(otel_tracing_layer)
|
||||
.try_init();
|
||||
|
||||
@@ -9,12 +9,12 @@ path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
assert_cmd = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true, features = ["test-support"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
|
||||
@@ -11,6 +11,7 @@ use tokio::process::ChildStdin;
|
||||
use tokio::process::ChildStdout;
|
||||
|
||||
use anyhow::Context;
|
||||
use assert_cmd::prelude::*;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::ArchiveConversationParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
@@ -48,6 +49,7 @@ use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use std::process::Command as StdCommand;
|
||||
use tokio::process::Command;
|
||||
|
||||
pub struct McpProcess {
|
||||
@@ -76,8 +78,12 @@ impl McpProcess {
|
||||
codex_home: &Path,
|
||||
env_overrides: &[(&str, Option<&str>)],
|
||||
) -> anyhow::Result<Self> {
|
||||
let program = codex_utils_cargo_bin::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-app-server")?;
|
||||
// Use assert_cmd to locate the binary path and then switch to tokio::process::Command
|
||||
let std_cmd = StdCommand::cargo_bin("codex-app-server")
|
||||
.context("should find binary for codex-mcp-server")?;
|
||||
|
||||
let program = std_cmd.get_program().to_owned();
|
||||
|
||||
let mut cmd = Command::new(program);
|
||||
|
||||
cmd.stdin(Stdio::piped());
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use codex_core::models_manager::model_presets::all_model_presets;
|
||||
use codex_core::openai_models::model_presets::all_model_presets;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -23,6 +25,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
} else {
|
||||
ModelVisibility::Hide
|
||||
},
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority,
|
||||
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
|
||||
@@ -34,6 +37,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,7 +305,6 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
// Acknowledge sendUserTurn
|
||||
@@ -419,7 +418,6 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -445,7 +443,6 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
|
||||
model: model.clone(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
|
||||
@@ -7,7 +7,6 @@ mod fuzzy_file_search;
|
||||
mod interrupt;
|
||||
mod list_resume;
|
||||
mod login;
|
||||
mod output_schema;
|
||||
mod send_message;
|
||||
mod set_default_model;
|
||||
mod user_agent;
|
||||
|
||||
@@ -1,282 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::AddConversationListenerParams;
|
||||
use codex_app_server_protocol::InputItem;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::NewConversationParams;
|
||||
use codex_app_server_protocol::NewConversationResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SendUserTurnParams;
|
||||
use codex_app_server_protocol::SendUserTurnResponse;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_accepts_output_schema_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_user_turn_output_schema_is_per_turn_v1() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let new_conv_id = mcp
|
||||
.send_new_conversation_request(NewConversationParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let new_conv_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)),
|
||||
)
|
||||
.await??;
|
||||
let NewConversationResponse {
|
||||
conversation_id, ..
|
||||
} = to_response::<NewConversationResponse>(new_conv_resp)?;
|
||||
|
||||
let listener_id = mcp
|
||||
.send_add_conversation_listener_request(AddConversationListenerParams {
|
||||
conversation_id,
|
||||
experimental_raw_events: false,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(listener_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let send_turn_id = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: Some(output_schema.clone()),
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let send_turn_id_2 = mcp
|
||||
.send_send_user_turn_request(SendUserTurnParams {
|
||||
conversation_id,
|
||||
items: vec![InputItem::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
cwd: codex_home.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
model: "mock-model".to_string(),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: ReasoningSummary::Auto,
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
let _send_turn_resp_2: SendUserTurnResponse = to_response::<SendUserTurnResponse>(
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id_2)),
|
||||
)
|
||||
.await??,
|
||||
)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("codex/event/task_complete"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -18,7 +18,6 @@ use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ToolsV2;
|
||||
use codex_app_server_protocol::WriteStatus;
|
||||
use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
@@ -74,7 +73,8 @@ sandbox_mode = "workspace-write"
|
||||
}
|
||||
);
|
||||
let layers = layers.expect("layers present");
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -136,7 +136,8 @@ view_image = false
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_layers_user_then_optional_system(&layers, user_file)?;
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -256,7 +257,12 @@ writable_roots = [{}]
|
||||
);
|
||||
|
||||
let layers = layers.expect("layers present");
|
||||
assert_layers_managed_user_then_optional_system(&layers, managed_file, user_file)?;
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -427,50 +433,3 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[1].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 1);
|
||||
assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_layers_managed_user_then_optional_system(
|
||||
layers: &[codex_app_server_protocol::ConfigLayer],
|
||||
managed_file: AbsolutePathBuf,
|
||||
user_file: AbsolutePathBuf,
|
||||
) -> Result<()> {
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?;
|
||||
assert_eq!(layers.len(), 3);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
assert_eq!(
|
||||
layers[2].name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2);
|
||||
assert_eq!(
|
||||
layers[0].name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
mod account;
|
||||
mod config_rpc;
|
||||
mod model_list;
|
||||
mod output_schema;
|
||||
mod rate_limits;
|
||||
mod review;
|
||||
mod thread_archive;
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_accepts_output_schema_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock = responses::mount_sse_once(&server, body).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let request = response_mock.single_request();
|
||||
let payload = request.body_json();
|
||||
let text = payload.get("text").expect("request missing text field");
|
||||
let format = text
|
||||
.get("format")
|
||||
.expect("request missing text.format field");
|
||||
assert_eq!(
|
||||
format,
|
||||
&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn turn_start_output_schema_is_per_turn_v2() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = responses::start_mock_server().await;
|
||||
let body1 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_assistant_message("msg-1", "Done"),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let response_mock1 = responses::mount_sse_once(&server, body1).await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_req = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
|
||||
|
||||
let output_schema = serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"answer": { "type": "string" }
|
||||
},
|
||||
"required": ["answer"],
|
||||
"additionalProperties": false
|
||||
});
|
||||
|
||||
let turn_req_1 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
}],
|
||||
output_schema: Some(output_schema.clone()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_1: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_1)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_1)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload1 = response_mock1.single_request().body_json();
|
||||
assert_eq!(
|
||||
payload1.pointer("/text/format"),
|
||||
Some(&serde_json::json!({
|
||||
"name": "codex_output_schema",
|
||||
"type": "json_schema",
|
||||
"strict": true,
|
||||
"schema": output_schema,
|
||||
}))
|
||||
);
|
||||
|
||||
let body2 = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_assistant_message("msg-2", "Done"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
let response_mock2 = responses::mount_sse_once(&server, body2).await;
|
||||
|
||||
let turn_req_2 = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello again".to_string(),
|
||||
}],
|
||||
output_schema: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_resp_2: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_req_2)),
|
||||
)
|
||||
.await??;
|
||||
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp_2)?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
let payload2 = response_mock2.single_request().body_json();
|
||||
assert_eq!(payload2.pointer("/text/format"), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
@@ -540,7 +540,6 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
@@ -567,7 +566,6 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(ReasoningEffort::Medium),
|
||||
summary: Some(ReasoningSummary::Auto),
|
||||
output_schema: None,
|
||||
})
|
||||
.await?;
|
||||
timeout(
|
||||
|
||||
@@ -25,6 +25,5 @@ tree-sitter-bash = { workspace = true }
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
use assert_cmd::Command;
|
||||
use assert_cmd::prelude::*;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn apply_patch_command() -> anyhow::Result<Command> {
|
||||
Ok(Command::new(codex_utils_cargo_bin::cargo_bin(
|
||||
"apply_patch",
|
||||
)?))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
@@ -21,7 +16,8 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
apply_patch_command()?
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
.arg(add_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -38,7 +34,8 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
apply_patch_command()?
|
||||
Command::cargo_bin("apply_patch")
|
||||
.expect("should find apply_patch binary")
|
||||
.arg(update_patch)
|
||||
.current_dir(tmp.path())
|
||||
.assert()
|
||||
@@ -62,9 +59,10 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+hello
|
||||
*** End Patch"#
|
||||
);
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(add_patch)
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(add_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
|
||||
@@ -79,9 +77,10 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> {
|
||||
+world
|
||||
*** End Patch"#
|
||||
);
|
||||
apply_patch_command()?
|
||||
.current_dir(tmp.path())
|
||||
.write_stdin(update_patch)
|
||||
let mut cmd =
|
||||
assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary");
|
||||
cmd.current_dir(tmp.path());
|
||||
cmd.write_stdin(update_patch)
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
@@ -8,8 +9,7 @@ use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
|
||||
let scenarios_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/scenarios");
|
||||
for scenario in fs::read_dir(scenarios_dir)? {
|
||||
for scenario in fs::read_dir("tests/fixtures/scenarios")? {
|
||||
let scenario = scenario?;
|
||||
let path = scenario.path();
|
||||
if path.is_dir() {
|
||||
@@ -36,7 +36,7 @@ fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> {
|
||||
// Run apply_patch in the temporary directory. We intentionally do not assert
|
||||
// on the exit status here; the scenarios are specified purely in terms of
|
||||
// final filesystem state, which we compare below.
|
||||
Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?)
|
||||
Command::cargo_bin("apply_patch")?
|
||||
.arg(patch)
|
||||
.current_dir(tmp.path())
|
||||
.output()?;
|
||||
@@ -82,15 +82,11 @@ fn snapshot_dir_recursive(
|
||||
continue;
|
||||
};
|
||||
let rel = stripped.to_path_buf();
|
||||
|
||||
// Under Buck2, files in `__srcs` are often materialized as symlinks.
|
||||
// Use `metadata()` (follows symlinks) so our fixture snapshots work
|
||||
// under both Cargo and Buck2.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
let file_type = entry.file_type()?;
|
||||
if file_type.is_dir() {
|
||||
entries.insert(rel.clone(), Entry::Dir);
|
||||
snapshot_dir_recursive(base, &path, entries)?;
|
||||
} else if metadata.is_file() {
|
||||
} else if file_type.is_file() {
|
||||
let contents = fs::read(&path)?;
|
||||
entries.insert(rel, Entry::File(contents));
|
||||
}
|
||||
@@ -102,14 +98,12 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> {
|
||||
for entry in fs::read_dir(src)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
let file_type = entry.file_type()?;
|
||||
let dest_path = dst.join(entry.file_name());
|
||||
|
||||
// See note in `snapshot_dir_recursive` about Buck2 symlink trees.
|
||||
let metadata = fs::metadata(&path)?;
|
||||
if metadata.is_dir() {
|
||||
if file_type.is_dir() {
|
||||
fs::create_dir_all(&dest_path)?;
|
||||
copy_dir_recursive(&path, &dest_path)?;
|
||||
} else if metadata.is_file() {
|
||||
} else if file_type.is_file() {
|
||||
if let Some(parent) = dest_path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
@@ -5,13 +5,13 @@ use std::path::Path;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn run_apply_patch_in_dir(dir: &Path, patch: &str) -> anyhow::Result<assert_cmd::assert::Assert> {
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd.arg(patch).assert())
|
||||
}
|
||||
|
||||
fn apply_patch_command(dir: &Path) -> anyhow::Result<Command> {
|
||||
let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?);
|
||||
let mut cmd = Command::cargo_bin("apply_patch")?;
|
||||
cmd.current_dir(dir);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -37,13 +37,13 @@ codex-rmcp-client = { workspace = true }
|
||||
codex-stdio-to-uds = { workspace = true }
|
||||
codex-tui = { workspace = true }
|
||||
codex-tui2 = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
owo-colors = { workspace = true }
|
||||
regex-lite = { workspace = true }
|
||||
regex-lite = { workspace = true}
|
||||
serde_json = { workspace = true }
|
||||
supports-color = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
@@ -51,7 +51,6 @@ tokio = { workspace = true, features = [
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
] }
|
||||
toml = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
@@ -60,7 +59,6 @@ codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows-
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
predicates = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -140,7 +140,7 @@ async fn run_command_under_sandbox(
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture;
|
||||
use codex_windows_sandbox::run_windows_sandbox_capture_elevated;
|
||||
|
||||
let policy_str = serde_json::to_string(config.sandbox_policy.get())?;
|
||||
let policy_str = serde_json::to_string(&config.sandbox_policy)?;
|
||||
|
||||
let sandbox_cwd = sandbox_policy_cwd.clone();
|
||||
let cwd_clone = cwd.clone();
|
||||
@@ -216,7 +216,7 @@ async fn run_command_under_sandbox(
|
||||
spawn_command_under_seatbelt(
|
||||
command,
|
||||
cwd,
|
||||
config.sandbox_policy.get(),
|
||||
&config.sandbox_policy,
|
||||
sandbox_policy_cwd.as_path(),
|
||||
stdio_policy,
|
||||
env,
|
||||
@@ -232,7 +232,7 @@ async fn run_command_under_sandbox(
|
||||
codex_linux_sandbox_exe,
|
||||
command,
|
||||
cwd,
|
||||
config.sandbox_policy.get(),
|
||||
&config.sandbox_policy,
|
||||
sandbox_policy_cwd.as_path(),
|
||||
stdio_policy,
|
||||
env,
|
||||
|
||||
@@ -44,7 +44,6 @@ use codex_core::features::Feature;
|
||||
use codex_core::features::FeatureOverrides;
|
||||
use codex_core::features::Features;
|
||||
use codex_core::features::is_known_feature_key;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
|
||||
/// Codex CLI
|
||||
///
|
||||
@@ -688,13 +687,7 @@ async fn is_tui2_enabled(cli: &TuiCli) -> std::io::Result<bool> {
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
|
||||
|
||||
let codex_home = find_codex_home()?;
|
||||
let cwd = cli.cwd.clone();
|
||||
let config_cwd = match cwd.as_deref() {
|
||||
Some(path) => AbsolutePathBuf::from_absolute_path(path)?,
|
||||
None => AbsolutePathBuf::current_dir()?,
|
||||
};
|
||||
let config_toml =
|
||||
load_config_as_toml_with_cli_overrides(&codex_home, &config_cwd, cli_kv_overrides).await?;
|
||||
let config_toml = load_config_as_toml_with_cli_overrides(&codex_home, cli_kv_overrides).await?;
|
||||
let config_profile = config_toml.get_config_profile(cli.config_profile.clone())?;
|
||||
let overrides = FeatureOverrides::default();
|
||||
let features = Features::from_config(&config_toml, &config_profile, overrides);
|
||||
|
||||
@@ -13,12 +13,15 @@ use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_global_mcp_servers;
|
||||
use codex_core::config::types::McpServerConfig;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::mcp::auth::compute_auth_statuses;
|
||||
use codex_core::protocol::McpAuthStatus;
|
||||
use codex_rmcp_client::delete_oauth_tokens;
|
||||
use codex_rmcp_client::perform_oauth_login;
|
||||
use codex_rmcp_client::supports_oauth_login;
|
||||
|
||||
/// [experimental] Launch Codex as an MCP server or manage configured MCP servers.
|
||||
///
|
||||
/// Subcommands:
|
||||
/// - `serve` — run the MCP server on stdio
|
||||
/// - `list` — list configured servers (with `--json`)
|
||||
@@ -36,11 +39,24 @@ pub struct McpCli {
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
pub enum McpSubcommand {
|
||||
/// [experimental] List configured MCP servers.
|
||||
List(ListArgs),
|
||||
|
||||
/// [experimental] Show details for a configured MCP server.
|
||||
Get(GetArgs),
|
||||
|
||||
/// [experimental] Add a global MCP server entry.
|
||||
Add(AddArgs),
|
||||
|
||||
/// [experimental] Remove a global MCP server entry.
|
||||
Remove(RemoveArgs),
|
||||
|
||||
/// [experimental] Authenticate with a configured MCP server via OAuth.
|
||||
/// Requires features.rmcp_client = true in config.toml.
|
||||
Login(LoginArgs),
|
||||
|
||||
/// [experimental] Remove stored OAuth credentials for a server.
|
||||
/// Requires features.rmcp_client = true in config.toml.
|
||||
Logout(LogoutArgs),
|
||||
}
|
||||
|
||||
@@ -266,17 +282,24 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
|
||||
{
|
||||
match supports_oauth_login(&url).await {
|
||||
Ok(true) => {
|
||||
println!("Detected OAuth support. Starting OAuth flow…");
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
&Vec::new(),
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
println!(
|
||||
"MCP server supports login. Add `features.rmcp_client = true` \
|
||||
to your config.toml and run `codex mcp login {name}` to login."
|
||||
);
|
||||
} else {
|
||||
println!("Detected OAuth support. Starting OAuth flow…");
|
||||
perform_oauth_login(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
&Vec::new(),
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
}
|
||||
}
|
||||
Ok(false) => {}
|
||||
Err(_) => println!(
|
||||
@@ -329,6 +352,12 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
if !config.features.enabled(Feature::RmcpClient) {
|
||||
bail!(
|
||||
"OAuth login is only supported when [features].rmcp_client is true in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details."
|
||||
);
|
||||
}
|
||||
|
||||
let LoginArgs { name, scopes } = login_args;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
|
||||
@@ -24,7 +24,7 @@ prefix_rule(
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let output = Command::new(codex_utils_cargo_bin::cargo_bin("codex")?)
|
||||
let output = Command::cargo_bin("codex")?
|
||||
.env("CODEX_HOME", codex_home.path())
|
||||
.args([
|
||||
"execpolicy",
|
||||
|
||||
@@ -8,7 +8,7 @@ use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use serde_json::json;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
|
||||
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
|
||||
let mut cmd = assert_cmd::Command::cargo_bin("codex")?;
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -59,7 +59,6 @@ pub enum ResponseEvent {
|
||||
summary_index: i64,
|
||||
},
|
||||
RateLimits(RateLimitSnapshot),
|
||||
ModelsEtag(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
|
||||
@@ -152,9 +152,6 @@ impl Stream for AggregatedStream {
|
||||
Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag)))) => {
|
||||
return Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag))));
|
||||
}
|
||||
Poll::Ready(Some(Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
|
||||
@@ -101,7 +101,7 @@ mod tests {
|
||||
struct CapturingTransport {
|
||||
last_request: Arc<Mutex<Option<Request>>>,
|
||||
body: Arc<ModelsResponse>,
|
||||
etag: Option<String>,
|
||||
response_etag: Arc<Option<String>>,
|
||||
}
|
||||
|
||||
impl Default for CapturingTransport {
|
||||
@@ -109,7 +109,7 @@ mod tests {
|
||||
Self {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(ModelsResponse { models: Vec::new() }),
|
||||
etag: None,
|
||||
response_etag: Arc::new(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -120,7 +120,7 @@ mod tests {
|
||||
*self.last_request.lock().unwrap() = Some(req);
|
||||
let body = serde_json::to_vec(&*self.body).unwrap();
|
||||
let mut headers = HeaderMap::new();
|
||||
if let Some(etag) = &self.etag {
|
||||
if let Some(etag) = self.response_etag.as_ref().as_deref() {
|
||||
headers.insert(ETAG, etag.parse().unwrap());
|
||||
}
|
||||
Ok(Response {
|
||||
@@ -169,7 +169,7 @@ mod tests {
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
response_etag: Arc::new(None),
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -178,7 +178,7 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let (models, _) = client
|
||||
let (models, _etag) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
@@ -223,6 +223,7 @@ mod tests {
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.unwrap(),
|
||||
@@ -232,7 +233,7 @@ mod tests {
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: None,
|
||||
response_etag: Arc::new(None),
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -241,7 +242,7 @@ mod tests {
|
||||
DummyAuth,
|
||||
);
|
||||
|
||||
let (models, _) = client
|
||||
let (models, _etag) = client
|
||||
.list_models("0.99.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
@@ -259,7 +260,7 @@ mod tests {
|
||||
let transport = CapturingTransport {
|
||||
last_request: Arc::new(Mutex::new(None)),
|
||||
body: Arc::new(response),
|
||||
etag: Some("\"abc\"".to_string()),
|
||||
response_etag: Arc::new(Some("\"abc\"".to_string())),
|
||||
};
|
||||
|
||||
let client = ModelsClient::new(
|
||||
@@ -274,6 +275,6 @@ mod tests {
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(models.len(), 0);
|
||||
assert_eq!(etag, Some("\"abc\"".to_string()));
|
||||
assert_eq!(etag.as_deref(), Some("\"abc\""));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,16 +204,24 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
call_id,
|
||||
..
|
||||
} => {
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": name,
|
||||
"arguments": arguments,
|
||||
}
|
||||
}]
|
||||
});
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
}
|
||||
ResponseItem::LocalShellCall {
|
||||
id,
|
||||
@@ -221,14 +229,22 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
status,
|
||||
action,
|
||||
} => {
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
let tool_call = json!({
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id.clone().unwrap_or_default(),
|
||||
"type": "local_shell_call",
|
||||
"status": status,
|
||||
"action": action,
|
||||
}]
|
||||
});
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
if let Some(reasoning) = reasoning_by_anchor_index.get(&idx)
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
}
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
let content_value = if let Some(items) = &output.content_items {
|
||||
@@ -261,16 +277,18 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
input,
|
||||
status: _,
|
||||
} => {
|
||||
let tool_call = json!({
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
});
|
||||
let reasoning = reasoning_by_anchor_index.get(&idx).map(String::as_str);
|
||||
push_tool_call_message(&mut messages, tool_call, reasoning);
|
||||
messages.push(json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [{
|
||||
"id": id,
|
||||
"type": "custom",
|
||||
"custom": {
|
||||
"name": name,
|
||||
"input": input,
|
||||
}
|
||||
}]
|
||||
}));
|
||||
}
|
||||
ResponseItem::CustomToolCallOutput { call_id, output } => {
|
||||
messages.push(json!({
|
||||
@@ -310,50 +328,11 @@ impl<'a> ChatRequestBuilder<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn push_tool_call_message(messages: &mut Vec<Value>, tool_call: Value, reasoning: Option<&str>) {
|
||||
// Chat Completions requires that tool calls are grouped into a single assistant message
|
||||
// (with `tool_calls: [...]`) followed by tool role responses.
|
||||
if let Some(Value::Object(obj)) = messages.last_mut()
|
||||
&& obj.get("role").and_then(Value::as_str) == Some("assistant")
|
||||
&& obj.get("content").is_some_and(Value::is_null)
|
||||
&& let Some(tool_calls) = obj.get_mut("tool_calls").and_then(Value::as_array_mut)
|
||||
{
|
||||
tool_calls.push(tool_call);
|
||||
if let Some(reasoning) = reasoning {
|
||||
if let Some(Value::String(existing)) = obj.get_mut("reasoning") {
|
||||
if !existing.is_empty() {
|
||||
existing.push('\n');
|
||||
}
|
||||
existing.push_str(reasoning);
|
||||
} else {
|
||||
obj.insert(
|
||||
"reasoning".to_string(),
|
||||
Value::String(reasoning.to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let mut msg = json!({
|
||||
"role": "assistant",
|
||||
"content": null,
|
||||
"tool_calls": [tool_call],
|
||||
});
|
||||
if let Some(reasoning) = reasoning
|
||||
&& let Some(obj) = msg.as_object_mut()
|
||||
{
|
||||
obj.insert("reasoning".to_string(), json!(reasoning));
|
||||
}
|
||||
messages.push(msg);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::provider::RetryConfig;
|
||||
use crate::provider::WireApi;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use http::HeaderValue;
|
||||
@@ -406,89 +385,4 @@ mod tests {
|
||||
Some(&HeaderValue::from_static("review"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn groups_consecutive_tool_calls_into_a_single_assistant_message() {
|
||||
let prompt_input = vec![
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: "read these".to_string(),
|
||||
}],
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"a.txt"}"#.to_string(),
|
||||
call_id: "call-a".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"b.txt"}"#.to_string(),
|
||||
call_id: "call-b".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCall {
|
||||
id: None,
|
||||
name: "read_file".to_string(),
|
||||
arguments: r#"{"path":"c.txt"}"#.to_string(),
|
||||
call_id: "call-c".to_string(),
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-a".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "A".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-b".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "B".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
ResponseItem::FunctionCallOutput {
|
||||
call_id: "call-c".to_string(),
|
||||
output: FunctionCallOutputPayload {
|
||||
content: "C".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
let req = ChatRequestBuilder::new("gpt-test", "inst", &prompt_input, &[])
|
||||
.build(&provider())
|
||||
.expect("request");
|
||||
|
||||
let messages = req
|
||||
.body
|
||||
.get("messages")
|
||||
.and_then(|v| v.as_array())
|
||||
.expect("messages array");
|
||||
// system + user + assistant(tool_calls=[...]) + 3 tool outputs
|
||||
assert_eq!(messages.len(), 6);
|
||||
|
||||
assert_eq!(messages[0]["role"], "system");
|
||||
assert_eq!(messages[1]["role"], "user");
|
||||
|
||||
let tool_calls_msg = &messages[2];
|
||||
assert_eq!(tool_calls_msg["role"], "assistant");
|
||||
assert_eq!(tool_calls_msg["content"], serde_json::Value::Null);
|
||||
let tool_calls = tool_calls_msg["tool_calls"]
|
||||
.as_array()
|
||||
.expect("tool_calls array");
|
||||
assert_eq!(tool_calls.len(), 3);
|
||||
assert_eq!(tool_calls[0]["id"], "call-a");
|
||||
assert_eq!(tool_calls[1]["id"], "call-b");
|
||||
assert_eq!(tool_calls[2]["id"], "call-c");
|
||||
|
||||
assert_eq!(messages[3]["role"], "tool");
|
||||
assert_eq!(messages[3]["tool_call_id"], "call-a");
|
||||
assert_eq!(messages[4]["role"], "tool");
|
||||
assert_eq!(messages[4]["tool_call_id"], "call-b");
|
||||
assert_eq!(messages[5]["role"], "tool");
|
||||
assert_eq!(messages[5]["tool_call_id"], "call-c");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,21 +30,6 @@ pub(crate) fn spawn_chat_stream(
|
||||
ResponseStream { rx_event }
|
||||
}
|
||||
|
||||
/// Processes Server-Sent Events from the legacy Chat Completions streaming API.
|
||||
///
|
||||
/// The upstream protocol terminates a streaming response with a final sentinel event
|
||||
/// (`data: [DONE]`). Historically, some of our test stubs have emitted `data: DONE`
|
||||
/// (without brackets) instead.
|
||||
///
|
||||
/// `eventsource_stream` delivers these sentinels as regular events rather than signaling
|
||||
/// end-of-stream. If we try to parse them as JSON, we log and skip them, then keep
|
||||
/// polling for more events.
|
||||
///
|
||||
/// On servers that keep the HTTP connection open after emitting the sentinel (notably
|
||||
/// wiremock on Windows), skipping the sentinel means we never emit `ResponseEvent::Completed`.
|
||||
/// Higher-level workflows/tests that wait for completion before issuing subsequent model
|
||||
/// calls will then stall, which shows up as "expected N requests, got 1" verification
|
||||
/// failures in the mock server.
|
||||
pub async fn process_chat_sse<S>(
|
||||
stream: S,
|
||||
tx_event: mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
@@ -72,31 +57,6 @@ pub async fn process_chat_sse<S>(
|
||||
let mut reasoning_item: Option<ResponseItem> = None;
|
||||
let mut completed_sent = false;
|
||||
|
||||
async fn flush_and_complete(
|
||||
tx_event: &mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
reasoning_item: &mut Option<ResponseItem>,
|
||||
assistant_item: &mut Option<ResponseItem>,
|
||||
) {
|
||||
if let Some(reasoning) = reasoning_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item.take() {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
|
||||
loop {
|
||||
let start = Instant::now();
|
||||
let response = timeout(idle_timeout, stream.next()).await;
|
||||
@@ -110,8 +70,24 @@ pub async fn process_chat_sse<S>(
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
if let Some(reasoning) = reasoning_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(reasoning)))
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(assistant) = assistant_item {
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::OutputItemDone(assistant)))
|
||||
.await;
|
||||
}
|
||||
if !completed_sent {
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
let _ = tx_event
|
||||
.send(Ok(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -125,25 +101,16 @@ pub async fn process_chat_sse<S>(
|
||||
|
||||
trace!("SSE event: {}", sse.data);
|
||||
|
||||
let data = sse.data.trim();
|
||||
|
||||
if data.is_empty() {
|
||||
if sse.data.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if data == "[DONE]" || data == "DONE" {
|
||||
if !completed_sent {
|
||||
flush_and_complete(&tx_event, &mut reasoning_item, &mut assistant_item).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let value: serde_json::Value = match serde_json::from_str(data) {
|
||||
let value: serde_json::Value = match serde_json::from_str(&sse.data) {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
debug!(
|
||||
"Failed to parse ChatCompletions SSE event: {err}, data: {}",
|
||||
data
|
||||
&sse.data
|
||||
);
|
||||
continue;
|
||||
}
|
||||
@@ -395,16 +362,6 @@ mod tests {
|
||||
body
|
||||
}
|
||||
|
||||
/// Regression test: the stream should complete when we see a `[DONE]` sentinel.
|
||||
///
|
||||
/// This is important for tests/mocks that don't immediately close the underlying
|
||||
/// connection after emitting the sentinel.
|
||||
#[tokio::test]
|
||||
async fn completes_on_done_sentinel_without_json() {
|
||||
let events = collect_events("event: message\ndata: [DONE]\n\n").await;
|
||||
assert_matches!(&events[..], [ResponseEvent::Completed { .. }]);
|
||||
}
|
||||
|
||||
async fn collect_events(body: &str) -> Vec<ResponseEvent> {
|
||||
let reader = ReaderStream::new(std::io::Cursor::new(body.to_string()))
|
||||
.map_err(|err| codex_client::TransportError::Network(err.to_string()));
|
||||
|
||||
@@ -51,19 +51,11 @@ pub fn spawn_response_stream(
|
||||
telemetry: Option<Arc<dyn SseTelemetry>>,
|
||||
) -> ResponseStream {
|
||||
let rate_limits = parse_rate_limit(&stream_response.headers);
|
||||
let models_etag = stream_response
|
||||
.headers
|
||||
.get("X-Models-Etag")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(ToString::to_string);
|
||||
let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600);
|
||||
tokio::spawn(async move {
|
||||
if let Some(snapshot) = rate_limits {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::RateLimits(snapshot))).await;
|
||||
}
|
||||
if let Some(etag) = models_etag {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await;
|
||||
}
|
||||
process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await;
|
||||
});
|
||||
|
||||
|
||||
@@ -4,12 +4,14 @@ use codex_api::provider::Provider;
|
||||
use codex_api::provider::RetryConfig;
|
||||
use codex_api::provider::WireApi;
|
||||
use codex_client::ReqwestTransport;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use http::HeaderMap;
|
||||
use http::Method;
|
||||
@@ -73,6 +75,7 @@ async fn models_client_hits_models_endpoint() {
|
||||
],
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 1,
|
||||
upgrade: None,
|
||||
@@ -84,6 +87,7 @@ async fn models_client_hits_models_endpoint() {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}],
|
||||
};
|
||||
@@ -101,7 +105,7 @@ async fn models_client_hits_models_endpoint() {
|
||||
let transport = ReqwestTransport::new(reqwest::Client::new());
|
||||
let client = ModelsClient::new(transport, provider(&base_url), DummyAuth);
|
||||
|
||||
let (models, _) = client
|
||||
let (models, _etag) = client
|
||||
.list_models("0.1.0", HeaderMap::new())
|
||||
.await
|
||||
.expect("models request should succeed");
|
||||
|
||||
@@ -69,15 +69,6 @@ impl ReqwestTransport {
|
||||
#[async_trait]
|
||||
impl HttpTransport for ReqwestTransport {
|
||||
async fn execute(&self, req: Request) -> Result<Response, TransportError> {
|
||||
if enabled!(Level::TRACE) {
|
||||
trace!(
|
||||
"{} to {}: {}",
|
||||
req.method,
|
||||
req.url,
|
||||
req.body.as_ref().unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
let builder = self.build(req)?;
|
||||
let resp = builder.send().await.map_err(Self::map_error)?;
|
||||
let status = resp.status();
|
||||
|
||||
@@ -10,10 +10,7 @@ pub fn create_config_summary_entries(config: &Config, model: &str) -> Vec<(&'sta
|
||||
("model", model.to_string()),
|
||||
("provider", config.model_provider_id.clone()),
|
||||
("approval", config.approval_policy.value().to_string()),
|
||||
(
|
||||
"sandbox",
|
||||
summarize_sandbox_policy(config.sandbox_policy.get()),
|
||||
),
|
||||
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
|
||||
];
|
||||
if config.model_provider.wire_api == WireApi::Responses {
|
||||
let reasoning_effort = config
|
||||
|
||||
@@ -16,7 +16,6 @@ workspace = true
|
||||
anyhow = { workspace = true }
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
arc-swap = "1.7.1"
|
||||
base64 = { workspace = true }
|
||||
chardetng = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
@@ -123,7 +122,6 @@ assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-core = { path = ".", features = ["deterministic_process_ids"] }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
escargot = { workspace = true }
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -67,6 +67,11 @@ pub(crate) fn map_api_error(err: ApiError) -> CodexErr {
|
||||
status,
|
||||
request_id: extract_request_id(headers.as_ref()),
|
||||
})
|
||||
} else if status == http::StatusCode::PRECONDITION_FAILED
|
||||
&& body_text
|
||||
.contains("Models catalog has changed. Please refresh your models list.")
|
||||
{
|
||||
CodexErr::OutdatedModels
|
||||
} else {
|
||||
CodexErr::UnexpectedStatus(UnexpectedResponseError {
|
||||
status,
|
||||
|
||||
@@ -46,7 +46,6 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<V
|
||||
"string_content",
|
||||
"raw_string",
|
||||
"number",
|
||||
"concatenation",
|
||||
];
|
||||
// Allow only safe punctuation / operator tokens; anything else causes reject.
|
||||
const ALLOWED_PUNCT_TOKENS: &[&str] = &["&&", "||", ";", "|", "\"", "'"];
|
||||
@@ -159,48 +158,6 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"concatenation" => {
|
||||
// Handle concatenated arguments like -g"*.py"
|
||||
let mut concatenated = String::new();
|
||||
let mut concat_cursor = child.walk();
|
||||
for part in child.named_children(&mut concat_cursor) {
|
||||
match part.kind() {
|
||||
"word" | "number" => {
|
||||
concatenated
|
||||
.push_str(part.utf8_text(src.as_bytes()).ok()?.to_owned().as_str());
|
||||
}
|
||||
"string" => {
|
||||
if part.child_count() == 3
|
||||
&& part.child(0)?.kind() == "\""
|
||||
&& part.child(1)?.kind() == "string_content"
|
||||
&& part.child(2)?.kind() == "\""
|
||||
{
|
||||
concatenated.push_str(
|
||||
part.child(1)?
|
||||
.utf8_text(src.as_bytes())
|
||||
.ok()?
|
||||
.to_owned()
|
||||
.as_str(),
|
||||
);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
"raw_string" => {
|
||||
let raw_string = part.utf8_text(src.as_bytes()).ok()?;
|
||||
let stripped = raw_string
|
||||
.strip_prefix('\'')
|
||||
.and_then(|s| s.strip_suffix('\''))?;
|
||||
concatenated.push_str(stripped);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
if concatenated.is_empty() {
|
||||
return None;
|
||||
}
|
||||
words.push(concatenated);
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
@@ -299,47 +256,4 @@ mod tests {
|
||||
let parsed = parse_shell_lc_plain_commands(&command).unwrap();
|
||||
assert_eq!(parsed, vec![vec!["ls".to_string()]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_and_value() {
|
||||
// Test case: -g"*.py" (flag directly concatenated with quoted value)
|
||||
let cmds = parse_seq("rg -n \"foo\" -g\"*.py\"").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"rg".to_string(),
|
||||
"-n".to_string(),
|
||||
"foo".to_string(),
|
||||
"-g*.py".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_concatenated_flag_with_single_quotes() {
|
||||
let cmds = parse_seq("grep -n 'pattern' -g'*.txt'").unwrap();
|
||||
assert_eq!(
|
||||
cmds,
|
||||
vec![vec![
|
||||
"grep".to_string(),
|
||||
"-n".to_string(),
|
||||
"pattern".to_string(),
|
||||
"-g*.txt".to_string(),
|
||||
]]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_variable_substitution() {
|
||||
// Environment variables in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$VAR\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"${VAR}\" pattern").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_concatenation_with_command_substitution() {
|
||||
// Command substitution in concatenated strings should be rejected
|
||||
assert!(parse_seq("rg -g\"$(pwd)\" pattern").is_none());
|
||||
assert!(parse_seq("rg -g\"$(echo '*.py')\" pattern").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ use http::StatusCode as HttpStatusCode;
|
||||
use reqwest::StatusCode;
|
||||
use serde_json::Value;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::warn;
|
||||
|
||||
@@ -49,15 +50,16 @@ use crate::features::FEATURES;
|
||||
use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::openai_models::model_family::ModelFamily;
|
||||
use crate::tools::spec::create_tools_json_for_chat_completions_api;
|
||||
use crate::tools::spec::create_tools_json_for_responses_api;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug)]
|
||||
pub struct ModelClient {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
model_family: ModelFamily,
|
||||
model_family: RwLock<ModelFamily>,
|
||||
models_etag: RwLock<Option<String>>,
|
||||
otel_manager: OtelManager,
|
||||
provider: ModelProviderInfo,
|
||||
conversation_id: ConversationId,
|
||||
@@ -72,6 +74,7 @@ impl ModelClient {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
model_family: ModelFamily,
|
||||
models_etag: Option<String>,
|
||||
otel_manager: OtelManager,
|
||||
provider: ModelProviderInfo,
|
||||
effort: Option<ReasoningEffortConfig>,
|
||||
@@ -82,7 +85,8 @@ impl ModelClient {
|
||||
Self {
|
||||
config,
|
||||
auth_manager,
|
||||
model_family,
|
||||
model_family: RwLock::new(model_family),
|
||||
models_etag: RwLock::new(models_etag),
|
||||
otel_manager,
|
||||
provider,
|
||||
conversation_id,
|
||||
@@ -92,8 +96,8 @@ impl ModelClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_model_context_window(&self) -> Option<i64> {
|
||||
let model_family = self.get_model_family();
|
||||
pub async fn get_model_context_window(&self) -> Option<i64> {
|
||||
let model_family = self.get_model_family().await;
|
||||
let effective_context_window_percent = model_family.effective_context_window_percent;
|
||||
model_family
|
||||
.context_window
|
||||
@@ -146,7 +150,7 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let model_family = self.get_model_family();
|
||||
let model_family = self.get_model_family().await;
|
||||
let instructions = prompt.get_full_instructions(&model_family).into_owned();
|
||||
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
|
||||
let api_prompt = build_api_prompt(prompt, instructions, tools_json);
|
||||
@@ -167,7 +171,7 @@ impl ModelClient {
|
||||
|
||||
let stream_result = client
|
||||
.stream_prompt(
|
||||
&self.get_model(),
|
||||
&self.get_model().await,
|
||||
&api_prompt,
|
||||
Some(conversation_id.clone()),
|
||||
Some(session_source.clone()),
|
||||
@@ -200,7 +204,7 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let model_family = self.get_model_family();
|
||||
let model_family = self.get_model_family().await;
|
||||
let instructions = prompt.get_full_instructions(&model_family).into_owned();
|
||||
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
|
||||
@@ -262,11 +266,14 @@ impl ModelClient {
|
||||
store_override: None,
|
||||
conversation_id: Some(conversation_id.clone()),
|
||||
session_source: Some(session_source.clone()),
|
||||
extra_headers: beta_feature_headers(&self.config),
|
||||
extra_headers: beta_feature_headers(
|
||||
&self.config,
|
||||
self.get_models_etag().await.clone(),
|
||||
),
|
||||
};
|
||||
|
||||
let stream_result = client
|
||||
.stream_prompt(&self.get_model(), &api_prompt, options)
|
||||
.stream_prompt(&self.get_model().await, &api_prompt, options)
|
||||
.await;
|
||||
|
||||
match stream_result {
|
||||
@@ -297,13 +304,25 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
/// Returns the currently configured model slug.
|
||||
pub fn get_model(&self) -> String {
|
||||
self.get_model_family().get_model_slug().to_string()
|
||||
pub async fn get_model(&self) -> String {
|
||||
self.get_model_family().await.get_model_slug().to_string()
|
||||
}
|
||||
|
||||
/// Returns the currently configured model family.
|
||||
pub fn get_model_family(&self) -> ModelFamily {
|
||||
self.model_family.clone()
|
||||
pub async fn get_model_family(&self) -> ModelFamily {
|
||||
self.model_family.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_models_etag(&self) -> Option<String> {
|
||||
self.models_etag.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn update_models_etag(&self, etag: Option<String>) {
|
||||
*self.models_etag.write().await = etag;
|
||||
}
|
||||
|
||||
pub async fn update_model_family(&self, model_family: ModelFamily) {
|
||||
*self.model_family.write().await = model_family;
|
||||
}
|
||||
|
||||
/// Returns the current reasoning effort setting.
|
||||
@@ -340,10 +359,10 @@ impl ModelClient {
|
||||
.with_telemetry(Some(request_telemetry));
|
||||
|
||||
let instructions = prompt
|
||||
.get_full_instructions(&self.get_model_family())
|
||||
.get_full_instructions(&self.get_model_family().await)
|
||||
.into_owned();
|
||||
let payload = ApiCompactionInput {
|
||||
model: &self.get_model(),
|
||||
model: &self.get_model().await,
|
||||
input: &prompt.input,
|
||||
instructions: &instructions,
|
||||
};
|
||||
@@ -398,7 +417,7 @@ fn build_api_prompt(prompt: &Prompt, instructions: String, tools_json: Vec<Value
|
||||
}
|
||||
}
|
||||
|
||||
fn beta_feature_headers(config: &Config) -> ApiHeaderMap {
|
||||
fn beta_feature_headers(config: &Config, models_etag: Option<String>) -> ApiHeaderMap {
|
||||
let enabled = FEATURES
|
||||
.iter()
|
||||
.filter_map(|spec| {
|
||||
@@ -416,6 +435,11 @@ fn beta_feature_headers(config: &Config) -> ApiHeaderMap {
|
||||
{
|
||||
headers.insert("x-codex-beta-features", header_value);
|
||||
}
|
||||
if let Some(etag) = models_etag
|
||||
&& let Ok(header_value) = HeaderValue::from_str(&etag)
|
||||
{
|
||||
headers.insert("X-If-Models-Match", header_value);
|
||||
}
|
||||
headers
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::error::Result;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::features::Feature;
|
||||
use crate::openai_models::model_family::ModelFamily;
|
||||
use crate::tools::ToolRouter;
|
||||
pub use codex_api::common::ResponseEvent;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
@@ -44,6 +48,28 @@ pub struct Prompt {
|
||||
}
|
||||
|
||||
impl Prompt {
|
||||
pub(crate) async fn new(
|
||||
sess: &Session,
|
||||
turn_context: &TurnContext,
|
||||
router: &ToolRouter,
|
||||
input: &[ResponseItem],
|
||||
) -> Prompt {
|
||||
let model_supports_parallel = turn_context
|
||||
.client
|
||||
.get_model_family()
|
||||
.await
|
||||
.supports_parallel_tool_calls;
|
||||
|
||||
Prompt {
|
||||
input: input.to_vec(),
|
||||
tools: router.specs(),
|
||||
parallel_tool_calls: model_supports_parallel
|
||||
&& sess.enabled(Feature::ParallelToolCalls),
|
||||
base_instructions_override: turn_context.base_instructions.clone(),
|
||||
output_schema: turn_context.final_output_json_schema.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> {
|
||||
let base = self
|
||||
.base_instructions_override
|
||||
@@ -259,7 +285,7 @@ mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use crate::config::test_config;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
|
||||
use super::*;
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ use crate::compact;
|
||||
use crate::compact::run_inline_auto_compact_task;
|
||||
use crate::compact::should_use_remote_compact_task;
|
||||
use crate::compact_remote::run_inline_remote_auto_compact_task;
|
||||
use crate::exec_policy::ExecPolicyManager;
|
||||
use crate::exec_policy::load_exec_policy_for_features;
|
||||
use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::openai_models::model_family::ModelFamily;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
use crate::parse_command::parse_command;
|
||||
use crate::parse_turn_item;
|
||||
use crate::stream_events_utils::HandleOutputCtx;
|
||||
@@ -78,6 +78,7 @@ use crate::client_common::ResponseEvent;
|
||||
use crate::compact::collect_user_messages;
|
||||
use crate::config::Config;
|
||||
use crate::config::Constrained;
|
||||
use crate::config::ConstraintError;
|
||||
use crate::config::ConstraintResult;
|
||||
use crate::config::GhostSnapshotConfig;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
@@ -88,7 +89,6 @@ use crate::error::Result as CodexResult;
|
||||
#[cfg(test)]
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::exec_policy::ExecPolicyUpdateError;
|
||||
use crate::feedback_tags;
|
||||
use crate::mcp::auth::compute_auth_statuses;
|
||||
use crate::mcp_connection_manager::McpConnectionManager;
|
||||
use crate::model_provider_info::CHAT_WIRE_API_DEPRECATION_SUMMARY;
|
||||
@@ -149,6 +149,7 @@ use crate::user_instructions::UserInstructions;
|
||||
use crate::user_notification::UserNotification;
|
||||
use crate::util::backoff;
|
||||
use codex_async_utils::OrCancelExt;
|
||||
use codex_execpolicy::Policy as ExecPolicy;
|
||||
use codex_otel::otel_manager::OtelManager;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
@@ -241,15 +242,14 @@ impl Codex {
|
||||
)
|
||||
.await;
|
||||
|
||||
let exec_policy = ExecPolicyManager::load(&config.features, &config.config_layer_stack)
|
||||
let exec_policy = load_exec_policy_for_features(&config.features, &config.codex_home)
|
||||
.await
|
||||
.map_err(|err| CodexErr::Fatal(format!("failed to load execpolicy: {err}")))?;
|
||||
let exec_policy = Arc::new(RwLock::new(exec_policy));
|
||||
|
||||
let config = Arc::new(config);
|
||||
if config.features.enabled(Feature::RemoteModels)
|
||||
&& let Err(err) = models_manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.await
|
||||
&& let Err(err) = models_manager.try_refresh_available_models(&config).await
|
||||
{
|
||||
error!("failed to refresh available models: {err:?}");
|
||||
}
|
||||
@@ -267,6 +267,7 @@ impl Codex {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy,
|
||||
session_source,
|
||||
};
|
||||
|
||||
@@ -278,7 +279,6 @@ impl Codex {
|
||||
config.clone(),
|
||||
auth_manager.clone(),
|
||||
models_manager.clone(),
|
||||
exec_policy,
|
||||
tx_event.clone(),
|
||||
conversation_history,
|
||||
session_source_clone,
|
||||
@@ -372,6 +372,7 @@ pub(crate) struct TurnContext {
|
||||
pub(crate) final_output_json_schema: Option<Value>,
|
||||
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub(crate) tool_call_gate: Arc<ReadinessFlag>,
|
||||
pub(crate) exec_policy: Arc<RwLock<ExecPolicy>>,
|
||||
pub(crate) truncation_policy: TruncationPolicy,
|
||||
}
|
||||
|
||||
@@ -415,7 +416,7 @@ pub(crate) struct SessionConfiguration {
|
||||
/// When to escalate for approval for execution
|
||||
approval_policy: Constrained<AskForApproval>,
|
||||
/// How to sandbox commands executed in the system
|
||||
sandbox_policy: Constrained<SandboxPolicy>,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
|
||||
/// Working directory that should be treated as the *root* of the
|
||||
/// session. All relative paths supplied by the model as well as the
|
||||
@@ -426,6 +427,9 @@ pub(crate) struct SessionConfiguration {
|
||||
/// operate deterministically.
|
||||
cwd: PathBuf,
|
||||
|
||||
/// Execpolicy policy, applied only when enabled by feature flag.
|
||||
exec_policy: Arc<RwLock<ExecPolicy>>,
|
||||
|
||||
// TODO(pakrym): Remove config from here
|
||||
original_config_do_not_use: Arc<Config>,
|
||||
/// Source of the session (cli, vscode, exec, mcp, ...)
|
||||
@@ -448,7 +452,7 @@ impl SessionConfiguration {
|
||||
next_configuration.approval_policy.set(approval_policy)?;
|
||||
}
|
||||
if let Some(sandbox_policy) = updates.sandbox_policy.clone() {
|
||||
next_configuration.sandbox_policy.set(sandbox_policy)?;
|
||||
next_configuration.sandbox_policy = sandbox_policy;
|
||||
}
|
||||
if let Some(cwd) = updates.cwd.clone() {
|
||||
next_configuration.cwd = cwd;
|
||||
@@ -488,6 +492,7 @@ impl Session {
|
||||
session_configuration: &SessionConfiguration,
|
||||
per_turn_config: Config,
|
||||
model_family: ModelFamily,
|
||||
models_etag: Option<String>,
|
||||
conversation_id: ConversationId,
|
||||
sub_id: String,
|
||||
) -> TurnContext {
|
||||
@@ -501,6 +506,7 @@ impl Session {
|
||||
per_turn_config.clone(),
|
||||
auth_manager,
|
||||
model_family.clone(),
|
||||
models_etag,
|
||||
otel_manager,
|
||||
provider,
|
||||
session_configuration.model_reasoning_effort,
|
||||
@@ -523,13 +529,14 @@ impl Session {
|
||||
compact_prompt: session_configuration.compact_prompt.clone(),
|
||||
user_instructions: session_configuration.user_instructions.clone(),
|
||||
approval_policy: session_configuration.approval_policy.value(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.clone(),
|
||||
shell_environment_policy: per_turn_config.shell_environment_policy.clone(),
|
||||
tools_config,
|
||||
ghost_snapshot: per_turn_config.ghost_snapshot.clone(),
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
exec_policy: session_configuration.exec_policy.clone(),
|
||||
truncation_policy: TruncationPolicy::new(
|
||||
per_turn_config.as_ref(),
|
||||
model_family.truncation_policy,
|
||||
@@ -543,7 +550,6 @@ impl Session {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
exec_policy: ExecPolicyManager,
|
||||
tx_event: Sender<Event>,
|
||||
initial_history: InitialHistory,
|
||||
session_source: SessionSource,
|
||||
@@ -640,7 +646,7 @@ impl Session {
|
||||
config.model_context_window,
|
||||
config.model_auto_compact_token_limit,
|
||||
config.approval_policy.value(),
|
||||
config.sandbox_policy.get().clone(),
|
||||
config.sandbox_policy.clone(),
|
||||
config.mcp_servers.keys().map(String::as_str).collect(),
|
||||
config.active_profile.clone(),
|
||||
);
|
||||
@@ -663,7 +669,6 @@ impl Session {
|
||||
rollout: Mutex::new(Some(rollout_recorder)),
|
||||
user_shell: Arc::new(default_shell),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_manager,
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
@@ -691,7 +696,7 @@ impl Session {
|
||||
model: session_configuration.model.clone(),
|
||||
model_provider_id: config.model_provider_id.clone(),
|
||||
approval_policy: session_configuration.approval_policy.value(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.clone(),
|
||||
cwd: session_configuration.cwd.clone(),
|
||||
reasoning_effort: session_configuration.model_reasoning_effort,
|
||||
history_log_id,
|
||||
@@ -708,7 +713,7 @@ impl Session {
|
||||
// Construct sandbox_state before initialize() so it can be sent to each
|
||||
// MCP server immediately after it becomes ready (avoiding blocking).
|
||||
let sandbox_state = SandboxState {
|
||||
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.clone(),
|
||||
codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(),
|
||||
sandbox_cwd: session_configuration.cwd.clone(),
|
||||
};
|
||||
@@ -785,7 +790,7 @@ impl Session {
|
||||
}
|
||||
})
|
||||
{
|
||||
let curr = turn_context.client.get_model();
|
||||
let curr = turn_context.client.get_model().await;
|
||||
if prev != curr {
|
||||
warn!(
|
||||
"resuming session with different model: previous={prev}, current={curr}"
|
||||
@@ -811,13 +816,6 @@ impl Session {
|
||||
.await;
|
||||
}
|
||||
|
||||
// Seed usage info from the recorded rollout so UIs can show token counts
|
||||
// immediately on resume/fork.
|
||||
if let Some(info) = Self::last_token_info_from_rollout(&rollout_items) {
|
||||
let mut state = self.state.lock().await;
|
||||
state.set_token_info(Some(info));
|
||||
}
|
||||
|
||||
// If persisting, persist all rollout items as-is (recorder filters)
|
||||
if persist && !rollout_items.is_empty() {
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
@@ -828,13 +826,6 @@ impl Session {
|
||||
}
|
||||
}
|
||||
|
||||
fn last_token_info_from_rollout(rollout_items: &[RolloutItem]) -> Option<TokenUsageInfo> {
|
||||
rollout_items.iter().rev().find_map(|item| match item {
|
||||
RolloutItem::EventMsg(EventMsg::TokenCount(ev)) => ev.info.clone(),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn update_settings(
|
||||
&self,
|
||||
updates: SessionSettingsUpdate,
|
||||
@@ -847,8 +838,11 @@ impl Session {
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("rejected session settings update: {err}");
|
||||
Err(err)
|
||||
let wrapped = ConstraintError {
|
||||
message: format!("Could not update config: {err}"),
|
||||
};
|
||||
warn!(%wrapped, "rejected session settings update");
|
||||
Err(wrapped)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -869,15 +863,18 @@ impl Session {
|
||||
}
|
||||
Err(err) => {
|
||||
drop(state);
|
||||
let wrapped = ConstraintError {
|
||||
message: format!("Could not update config: {err}"),
|
||||
};
|
||||
self.send_event_raw(Event {
|
||||
id: sub_id.clone(),
|
||||
msg: EventMsg::Error(ErrorEvent {
|
||||
message: err.to_string(),
|
||||
message: wrapped.to_string(),
|
||||
codex_error_info: Some(CodexErrorInfo::BadRequest),
|
||||
}),
|
||||
})
|
||||
.await;
|
||||
return Err(err);
|
||||
return Err(wrapped);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -903,7 +900,7 @@ impl Session {
|
||||
|
||||
if sandbox_policy_changed {
|
||||
let sandbox_state = SandboxState {
|
||||
sandbox_policy: per_turn_config.sandbox_policy.get().clone(),
|
||||
sandbox_policy: per_turn_config.sandbox_policy.clone(),
|
||||
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
|
||||
sandbox_cwd: per_turn_config.cwd.clone(),
|
||||
};
|
||||
@@ -924,6 +921,7 @@ impl Session {
|
||||
.models_manager
|
||||
.construct_model_family(session_configuration.model.as_str(), &per_turn_config)
|
||||
.await;
|
||||
let models_etag = self.services.models_manager.get_models_etag().await;
|
||||
let mut turn_context: TurnContext = Self::make_turn_context(
|
||||
Some(Arc::clone(&self.services.auth_manager)),
|
||||
&self.services.otel_manager,
|
||||
@@ -931,6 +929,7 @@ impl Session {
|
||||
&session_configuration,
|
||||
per_turn_config,
|
||||
model_family,
|
||||
models_etag,
|
||||
self.conversation_id,
|
||||
sub_id,
|
||||
);
|
||||
@@ -1037,24 +1036,29 @@ impl Session {
|
||||
amendment: &ExecPolicyAmendment,
|
||||
) -> Result<(), ExecPolicyUpdateError> {
|
||||
let features = self.features.clone();
|
||||
let codex_home = self
|
||||
.state
|
||||
.lock()
|
||||
.await
|
||||
.session_configuration
|
||||
.original_config_do_not_use
|
||||
.codex_home
|
||||
.clone();
|
||||
let (codex_home, current_policy) = {
|
||||
let state = self.state.lock().await;
|
||||
(
|
||||
state
|
||||
.session_configuration
|
||||
.original_config_do_not_use
|
||||
.codex_home
|
||||
.clone(),
|
||||
state.session_configuration.exec_policy.clone(),
|
||||
)
|
||||
};
|
||||
|
||||
if !features.enabled(Feature::ExecPolicy) {
|
||||
error!("attempted to append execpolicy rule while execpolicy feature is disabled");
|
||||
return Err(ExecPolicyUpdateError::FeatureDisabled);
|
||||
}
|
||||
|
||||
self.services
|
||||
.exec_policy
|
||||
.append_amendment_and_update(&codex_home, amendment)
|
||||
.await?;
|
||||
crate::exec_policy::append_execpolicy_amendment_and_update(
|
||||
&codex_home,
|
||||
¤t_policy,
|
||||
&amendment.command,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1334,7 +1338,7 @@ impl Session {
|
||||
if let Some(token_usage) = token_usage {
|
||||
state.update_token_info_from_usage(
|
||||
token_usage,
|
||||
turn_context.client.get_model_context_window(),
|
||||
turn_context.client.get_model_context_window().await,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1346,6 +1350,7 @@ impl Session {
|
||||
.clone_history()
|
||||
.await
|
||||
.estimate_token_count(turn_context)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
@@ -1366,7 +1371,7 @@ impl Session {
|
||||
};
|
||||
|
||||
if info.model_context_window.is_none() {
|
||||
info.model_context_window = turn_context.client.get_model_context_window();
|
||||
info.model_context_window = turn_context.client.get_model_context_window().await;
|
||||
}
|
||||
|
||||
state.set_token_info(Some(info));
|
||||
@@ -1396,7 +1401,7 @@ impl Session {
|
||||
}
|
||||
|
||||
pub(crate) async fn set_total_tokens_full(&self, turn_context: &TurnContext) {
|
||||
let context_window = turn_context.client.get_model_context_window();
|
||||
let context_window = turn_context.client.get_model_context_window().await;
|
||||
if let Some(context_window) = context_window {
|
||||
{
|
||||
let mut state = self.state.lock().await;
|
||||
@@ -1439,14 +1444,12 @@ impl Session {
|
||||
message: impl Into<String>,
|
||||
codex_error: CodexErr,
|
||||
) {
|
||||
let additional_details = codex_error.to_string();
|
||||
let codex_error_info = CodexErrorInfo::ResponseStreamDisconnected {
|
||||
http_status_code: codex_error.http_status_code_value(),
|
||||
};
|
||||
let event = EventMsg::StreamError(StreamErrorEvent {
|
||||
message: message.into(),
|
||||
codex_error_info: Some(codex_error_info),
|
||||
additional_details: Some(additional_details),
|
||||
});
|
||||
self.send_event(turn_context, event).await;
|
||||
}
|
||||
@@ -1776,16 +1779,7 @@ mod handlers {
|
||||
final_output_json_schema: Some(final_output_json_schema),
|
||||
},
|
||||
),
|
||||
Op::UserInput {
|
||||
items,
|
||||
final_output_json_schema,
|
||||
} => (
|
||||
items,
|
||||
SessionSettingsUpdate {
|
||||
final_output_json_schema: Some(final_output_json_schema),
|
||||
..Default::default()
|
||||
},
|
||||
),
|
||||
Op::UserInput { items } => (items, SessionSettingsUpdate::default()),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@@ -2077,7 +2071,7 @@ mod handlers {
|
||||
review_request: ReviewRequest,
|
||||
) {
|
||||
let turn_context = sess.new_default_turn_with_sub_id(sub_id.clone()).await;
|
||||
match resolve_review_request(review_request, turn_context.cwd.as_path()) {
|
||||
match resolve_review_request(review_request, config.cwd.as_path()) {
|
||||
Ok(resolved) => {
|
||||
spawn_review_thread(
|
||||
Arc::clone(sess),
|
||||
@@ -2116,6 +2110,7 @@ async fn spawn_review_thread(
|
||||
.models_manager
|
||||
.construct_model_family(&model, &config)
|
||||
.await;
|
||||
let models_etag = sess.services.models_manager.get_models_etag().await;
|
||||
// For reviews, disable web_search and view_image regardless of global settings.
|
||||
let mut review_features = sess.features.clone();
|
||||
review_features
|
||||
@@ -2148,6 +2143,7 @@ async fn spawn_review_thread(
|
||||
per_turn_config.clone(),
|
||||
auth_manager,
|
||||
model_family.clone(),
|
||||
models_etag,
|
||||
otel_manager,
|
||||
provider,
|
||||
per_turn_config.model_reasoning_effort,
|
||||
@@ -2172,6 +2168,7 @@ async fn spawn_review_thread(
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
exec_policy: parent_turn_context.exec_policy.clone(),
|
||||
truncation_policy: TruncationPolicy::new(&per_turn_config, model_family.truncation_policy),
|
||||
};
|
||||
|
||||
@@ -2241,6 +2238,7 @@ pub(crate) async fn run_task(
|
||||
let auto_compact_limit = turn_context
|
||||
.client
|
||||
.get_model_family()
|
||||
.await
|
||||
.auto_compact_token_limit()
|
||||
.unwrap_or(i64::MAX);
|
||||
let total_usage_tokens = sess.get_total_token_usage().await;
|
||||
@@ -2248,7 +2246,7 @@ pub(crate) async fn run_task(
|
||||
run_auto_compact(&sess, &turn_context).await;
|
||||
}
|
||||
let event = EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: turn_context.client.get_model_context_window(),
|
||||
model_context_window: turn_context.client.get_model_context_window().await,
|
||||
});
|
||||
sess.send_event(&turn_context, event).await;
|
||||
|
||||
@@ -2313,7 +2311,7 @@ pub(crate) async fn run_task(
|
||||
.collect::<Vec<String>>();
|
||||
match run_turn(
|
||||
Arc::clone(&sess),
|
||||
Arc::clone(&turn_context),
|
||||
&turn_context,
|
||||
Arc::clone(&turn_diff_tracker),
|
||||
turn_input,
|
||||
cancellation_token.child_token(),
|
||||
@@ -2372,6 +2370,36 @@ pub(crate) async fn run_task(
|
||||
last_agent_message
|
||||
}
|
||||
|
||||
pub(crate) async fn refresh_models_and_reset_turn_context(
|
||||
sess: &Arc<Session>,
|
||||
turn_context: &Arc<TurnContext>,
|
||||
) {
|
||||
let config = {
|
||||
let state = sess.state.lock().await;
|
||||
state
|
||||
.session_configuration
|
||||
.original_config_do_not_use
|
||||
.clone()
|
||||
};
|
||||
if let Err(err) = sess
|
||||
.services
|
||||
.models_manager
|
||||
.refresh_available_models(&config)
|
||||
.await
|
||||
{
|
||||
error!("failed to refresh models after outdated models error: {err}");
|
||||
}
|
||||
let model = turn_context.client.get_model().await;
|
||||
let model_family = sess
|
||||
.services
|
||||
.models_manager
|
||||
.construct_model_family(&model, &config)
|
||||
.await;
|
||||
let models_etag = sess.services.models_manager.get_models_etag().await;
|
||||
turn_context.client.update_model_family(model_family).await;
|
||||
turn_context.client.update_models_etag(models_etag).await;
|
||||
}
|
||||
|
||||
async fn run_auto_compact(sess: &Arc<Session>, turn_context: &Arc<TurnContext>) {
|
||||
if should_use_remote_compact_task(sess.as_ref(), &turn_context.client.get_provider()) {
|
||||
run_inline_remote_auto_compact_task(Arc::clone(sess), Arc::clone(turn_context)).await;
|
||||
@@ -2384,17 +2412,19 @@ async fn run_auto_compact(sess: &Arc<Session>, turn_context: &Arc<TurnContext>)
|
||||
skip_all,
|
||||
fields(
|
||||
turn_id = %turn_context.sub_id,
|
||||
model = %turn_context.client.get_model(),
|
||||
model = tracing::field::Empty,
|
||||
cwd = %turn_context.cwd.display()
|
||||
)
|
||||
)]
|
||||
async fn run_turn(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
turn_context: &Arc<TurnContext>,
|
||||
turn_diff_tracker: SharedTurnDiffTracker,
|
||||
input: Vec<ResponseItem>,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> CodexResult<TurnRunResult> {
|
||||
let model = turn_context.client.get_model().await;
|
||||
tracing::Span::current().record("model", field::display(&model));
|
||||
let mcp_tools = sess
|
||||
.services
|
||||
.mcp_connection_manager
|
||||
@@ -2403,37 +2433,32 @@ async fn run_turn(
|
||||
.list_all_tools()
|
||||
.or_cancel(&cancellation_token)
|
||||
.await?;
|
||||
let router = Arc::new(ToolRouter::from_config(
|
||||
&turn_context.tools_config,
|
||||
Some(
|
||||
mcp_tools
|
||||
.into_iter()
|
||||
.map(|(name, tool)| (name, tool.tool))
|
||||
.collect(),
|
||||
),
|
||||
));
|
||||
|
||||
let model_supports_parallel = turn_context
|
||||
.client
|
||||
.get_model_family()
|
||||
.supports_parallel_tool_calls;
|
||||
|
||||
let prompt = Prompt {
|
||||
input,
|
||||
tools: router.specs(),
|
||||
parallel_tool_calls: model_supports_parallel && sess.enabled(Feature::ParallelToolCalls),
|
||||
base_instructions_override: turn_context.base_instructions.clone(),
|
||||
output_schema: turn_context.final_output_json_schema.clone(),
|
||||
};
|
||||
|
||||
let mut retries = 0;
|
||||
loop {
|
||||
let router = Arc::new(ToolRouter::from_config(
|
||||
&turn_context.tools_config,
|
||||
Some(
|
||||
mcp_tools
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(name, tool)| (name, tool.tool))
|
||||
.collect(),
|
||||
),
|
||||
));
|
||||
let prompt = Prompt::new(
|
||||
sess.as_ref(),
|
||||
turn_context.as_ref(),
|
||||
router.as_ref(),
|
||||
&input,
|
||||
);
|
||||
|
||||
match try_run_turn(
|
||||
Arc::clone(&router),
|
||||
Arc::clone(&sess),
|
||||
Arc::clone(&turn_context),
|
||||
Arc::clone(turn_context),
|
||||
Arc::clone(&turn_diff_tracker),
|
||||
&prompt,
|
||||
&prompt.await,
|
||||
cancellation_token.child_token(),
|
||||
)
|
||||
.await
|
||||
@@ -2447,13 +2472,13 @@ async fn run_turn(
|
||||
Err(CodexErr::EnvVar(var)) => return Err(CodexErr::EnvVar(var)),
|
||||
Err(e @ CodexErr::Fatal(_)) => return Err(e),
|
||||
Err(e @ CodexErr::ContextWindowExceeded) => {
|
||||
sess.set_total_tokens_full(&turn_context).await;
|
||||
sess.set_total_tokens_full(turn_context).await;
|
||||
return Err(e);
|
||||
}
|
||||
Err(CodexErr::UsageLimitReached(e)) => {
|
||||
let rate_limits = e.rate_limits.clone();
|
||||
if let Some(rate_limits) = rate_limits {
|
||||
sess.update_rate_limits(&turn_context, rate_limits).await;
|
||||
sess.update_rate_limits(turn_context, rate_limits).await;
|
||||
}
|
||||
return Err(CodexErr::UsageLimitReached(e));
|
||||
}
|
||||
@@ -2467,6 +2492,11 @@ async fn run_turn(
|
||||
let max_retries = turn_context.client.get_provider().stream_max_retries();
|
||||
if retries < max_retries {
|
||||
retries += 1;
|
||||
// Refresh models if we got an outdated models error
|
||||
if matches!(e, CodexErr::OutdatedModels) {
|
||||
refresh_models_and_reset_turn_context(&sess, turn_context).await;
|
||||
continue;
|
||||
}
|
||||
let delay = match e {
|
||||
CodexErr::Stream(_, Some(delay)) => delay,
|
||||
_ => backoff(retries),
|
||||
@@ -2479,7 +2509,7 @@ async fn run_turn(
|
||||
// user understands what is happening instead of staring
|
||||
// at a seemingly frozen screen.
|
||||
sess.notify_stream_error(
|
||||
&turn_context,
|
||||
turn_context,
|
||||
format!("Reconnecting... {retries}/{max_retries}"),
|
||||
e,
|
||||
)
|
||||
@@ -2524,7 +2554,7 @@ async fn drain_in_flight(
|
||||
skip_all,
|
||||
fields(
|
||||
turn_id = %turn_context.sub_id,
|
||||
model = %turn_context.client.get_model()
|
||||
model = tracing::field::Empty,
|
||||
)
|
||||
)]
|
||||
async fn try_run_turn(
|
||||
@@ -2535,33 +2565,20 @@ async fn try_run_turn(
|
||||
prompt: &Prompt,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> CodexResult<TurnRunResult> {
|
||||
let model = turn_context.client.get_model().await;
|
||||
tracing::Span::current().record("model", field::display(&model));
|
||||
let rollout_item = RolloutItem::TurnContext(TurnContextItem {
|
||||
cwd: turn_context.cwd.clone(),
|
||||
approval_policy: turn_context.approval_policy,
|
||||
sandbox_policy: turn_context.sandbox_policy.clone(),
|
||||
model: turn_context.client.get_model(),
|
||||
model,
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
developer_instructions: turn_context.developer_instructions.clone(),
|
||||
final_output_json_schema: turn_context.final_output_json_schema.clone(),
|
||||
truncation_policy: Some(turn_context.truncation_policy.into()),
|
||||
});
|
||||
|
||||
feedback_tags!(
|
||||
model = turn_context.client.get_model(),
|
||||
approval_policy = turn_context.approval_policy,
|
||||
sandbox_policy = turn_context.sandbox_policy,
|
||||
effort = turn_context.client.get_reasoning_effort(),
|
||||
auth_mode = sess.services.auth_manager.get_auth_mode(),
|
||||
features = sess.features.enabled_features(),
|
||||
);
|
||||
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
let mut stream = turn_context
|
||||
.client
|
||||
.clone()
|
||||
.stream(prompt)
|
||||
.instrument(trace_span!("stream_request"))
|
||||
.or_cancel(&cancellation_token)
|
||||
@@ -2648,13 +2665,6 @@ async fn try_run_turn(
|
||||
// token usage is available to avoid duplicate TokenCount events.
|
||||
sess.update_rate_limits(&turn_context, snapshot).await;
|
||||
}
|
||||
ResponseEvent::ModelsEtag(etag) => {
|
||||
// Update internal state with latest models etag
|
||||
sess.services
|
||||
.models_manager
|
||||
.refresh_if_new_etag(etag, sess.features.enabled(Feature::RemoteModels))
|
||||
.await;
|
||||
}
|
||||
ResponseEvent::Completed {
|
||||
response_id: _,
|
||||
token_usage,
|
||||
@@ -2786,7 +2796,6 @@ mod tests {
|
||||
use crate::function_tool::FunctionCallError;
|
||||
use crate::shell::default_user_shell;
|
||||
use crate::tools::format_exec_output_str;
|
||||
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
|
||||
use crate::protocol::CompactedItem;
|
||||
@@ -2795,9 +2804,6 @@ mod tests {
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::RateLimitWindow;
|
||||
use crate::protocol::ResumedHistory;
|
||||
use crate::protocol::TokenCountEvent;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::protocol::TokenUsageInfo;
|
||||
use crate::state::TaskKind;
|
||||
use crate::tasks::SessionTask;
|
||||
use crate::tasks::SessionTaskContext;
|
||||
@@ -2852,83 +2858,6 @@ mod tests {
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_seeds_token_info_from_rollout() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (mut rollout_items, _expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
let info1 = TokenUsageInfo {
|
||||
total_token_usage: TokenUsage {
|
||||
input_tokens: 10,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 20,
|
||||
reasoning_output_tokens: 0,
|
||||
total_tokens: 30,
|
||||
},
|
||||
last_token_usage: TokenUsage {
|
||||
input_tokens: 3,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 4,
|
||||
reasoning_output_tokens: 0,
|
||||
total_tokens: 7,
|
||||
},
|
||||
model_context_window: Some(1_000),
|
||||
};
|
||||
let info2 = TokenUsageInfo {
|
||||
total_token_usage: TokenUsage {
|
||||
input_tokens: 100,
|
||||
cached_input_tokens: 50,
|
||||
output_tokens: 200,
|
||||
reasoning_output_tokens: 25,
|
||||
total_tokens: 375,
|
||||
},
|
||||
last_token_usage: TokenUsage {
|
||||
input_tokens: 10,
|
||||
cached_input_tokens: 0,
|
||||
output_tokens: 20,
|
||||
reasoning_output_tokens: 5,
|
||||
total_tokens: 35,
|
||||
},
|
||||
model_context_window: Some(2_000),
|
||||
};
|
||||
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: Some(info1),
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: None,
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: Some(info2.clone()),
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
rollout_items.push(RolloutItem::EventMsg(EventMsg::TokenCount(
|
||||
TokenCountEvent {
|
||||
info: None,
|
||||
rate_limits: None,
|
||||
},
|
||||
)));
|
||||
|
||||
session
|
||||
.record_initial_history(InitialHistory::Resumed(ResumedHistory {
|
||||
conversation_id: ConversationId::default(),
|
||||
history: rollout_items,
|
||||
rollout_path: PathBuf::from("/tmp/resume.jsonl"),
|
||||
}))
|
||||
.await;
|
||||
|
||||
let actual = session.state.lock().await.token_info();
|
||||
assert_eq!(actual, Some(info2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_reconstructs_forked_transcript() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
@@ -2961,6 +2890,7 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
|
||||
@@ -3027,6 +2957,7 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
|
||||
@@ -3219,7 +3150,6 @@ mod tests {
|
||||
let auth_manager =
|
||||
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let models_manager = Arc::new(ModelsManager::new(auth_manager.clone()));
|
||||
let exec_policy = ExecPolicyManager::default();
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
provider: config.model_provider.clone(),
|
||||
@@ -3234,6 +3164,7 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
let per_turn_config = Session::build_per_turn_config(&session_configuration);
|
||||
@@ -3259,10 +3190,9 @@ mod tests {
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: Arc::new(default_user_shell()),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: auth_manager.clone(),
|
||||
otel_manager: otel_manager.clone(),
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
models_manager,
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
skills_manager,
|
||||
};
|
||||
@@ -3274,6 +3204,7 @@ mod tests {
|
||||
&session_configuration,
|
||||
per_turn_config,
|
||||
model_family,
|
||||
None,
|
||||
conversation_id,
|
||||
"turn_id".to_string(),
|
||||
);
|
||||
@@ -3306,7 +3237,6 @@ mod tests {
|
||||
let auth_manager =
|
||||
AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let models_manager = Arc::new(ModelsManager::new(auth_manager.clone()));
|
||||
let exec_policy = ExecPolicyManager::default();
|
||||
let model = ModelsManager::get_model_offline(config.model.as_deref());
|
||||
let session_configuration = SessionConfiguration {
|
||||
provider: config.model_provider.clone(),
|
||||
@@ -3321,6 +3251,7 @@ mod tests {
|
||||
sandbox_policy: config.sandbox_policy.clone(),
|
||||
cwd: config.cwd.clone(),
|
||||
original_config_do_not_use: Arc::clone(&config),
|
||||
exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())),
|
||||
session_source: SessionSource::Exec,
|
||||
};
|
||||
let per_turn_config = Session::build_per_turn_config(&session_configuration);
|
||||
@@ -3346,10 +3277,9 @@ mod tests {
|
||||
rollout: Mutex::new(None),
|
||||
user_shell: Arc::new(default_user_shell()),
|
||||
show_raw_agent_reasoning: config.show_raw_agent_reasoning,
|
||||
exec_policy,
|
||||
auth_manager: Arc::clone(&auth_manager),
|
||||
otel_manager: otel_manager.clone(),
|
||||
models_manager: Arc::clone(&models_manager),
|
||||
models_manager,
|
||||
tool_approvals: Mutex::new(ApprovalStore::default()),
|
||||
skills_manager,
|
||||
};
|
||||
@@ -3361,6 +3291,7 @@ mod tests {
|
||||
&session_configuration,
|
||||
per_turn_config,
|
||||
model_family,
|
||||
None,
|
||||
conversation_id,
|
||||
"turn_id".to_string(),
|
||||
));
|
||||
|
||||
@@ -25,7 +25,7 @@ use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
|
||||
/// Start an interactive sub-Codex conversation and return IO channels.
|
||||
@@ -118,11 +118,7 @@ pub(crate) async fn run_codex_conversation_one_shot(
|
||||
.await?;
|
||||
|
||||
// Send the initial input to kick off the one-shot turn.
|
||||
io.submit(Op::UserInput {
|
||||
items: input,
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
io.submit(Op::UserInput { items: input }).await?;
|
||||
|
||||
// Bridge events so we can observe completion and shut down automatically.
|
||||
let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
|
||||
@@ -188,10 +184,6 @@ async fn forward_events(
|
||||
id: _,
|
||||
msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::TokenCount(_),
|
||||
} => {}
|
||||
Event {
|
||||
id: _,
|
||||
msg: EventMsg::SessionConfigured(_),
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::client_common::ResponseEvent;
|
||||
use crate::codex::Session;
|
||||
use crate::codex::TurnContext;
|
||||
use crate::codex::get_last_assistant_message_from_turn;
|
||||
use crate::codex::refresh_models_and_reset_turn_context;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::features::Feature;
|
||||
@@ -55,7 +56,7 @@ pub(crate) async fn run_compact_task(
|
||||
input: Vec<UserInput>,
|
||||
) {
|
||||
let start_event = EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: turn_context.client.get_model_context_window(),
|
||||
model_context_window: turn_context.client.get_model_context_window().await,
|
||||
});
|
||||
sess.send_event(&turn_context, start_event).await;
|
||||
run_compact_task_inner(sess.clone(), turn_context, input).await;
|
||||
@@ -83,14 +84,9 @@ async fn run_compact_task_inner(
|
||||
cwd: turn_context.cwd.clone(),
|
||||
approval_policy: turn_context.approval_policy,
|
||||
sandbox_policy: turn_context.sandbox_policy.clone(),
|
||||
model: turn_context.client.get_model(),
|
||||
model: turn_context.client.get_model().await,
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
base_instructions: turn_context.base_instructions.clone(),
|
||||
user_instructions: turn_context.user_instructions.clone(),
|
||||
developer_instructions: turn_context.developer_instructions.clone(),
|
||||
final_output_json_schema: turn_context.final_output_json_schema.clone(),
|
||||
truncation_policy: Some(turn_context.truncation_policy.into()),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
@@ -137,6 +133,10 @@ async fn run_compact_task_inner(
|
||||
Err(e) => {
|
||||
if retries < max_retries {
|
||||
retries += 1;
|
||||
if matches!(e, CodexErr::OutdatedModels) {
|
||||
refresh_models_and_reset_turn_context(&sess, &turn_context).await;
|
||||
continue;
|
||||
}
|
||||
let delay = backoff(retries);
|
||||
sess.notify_stream_error(
|
||||
turn_context.as_ref(),
|
||||
@@ -295,7 +295,7 @@ async fn drain_to_completed(
|
||||
turn_context: &TurnContext,
|
||||
prompt: &Prompt,
|
||||
) -> CodexResult<()> {
|
||||
let mut stream = turn_context.client.clone().stream(prompt).await?;
|
||||
let mut stream = turn_context.client.stream(prompt).await?;
|
||||
loop {
|
||||
let maybe_event = stream.next().await;
|
||||
let Some(event) = maybe_event else {
|
||||
|
||||
@@ -20,7 +20,7 @@ pub(crate) async fn run_inline_remote_auto_compact_task(
|
||||
|
||||
pub(crate) async fn run_remote_compact_task(sess: Arc<Session>, turn_context: Arc<TurnContext>) {
|
||||
let start_event = EventMsg::TaskStarted(TaskStartedEvent {
|
||||
model_context_window: turn_context.client.get_model_context_window(),
|
||||
model_context_window: turn_context.client.get_model_context_window().await,
|
||||
});
|
||||
sess.send_event(&turn_context, start_event).await;
|
||||
|
||||
|
||||
@@ -4,25 +4,25 @@ use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error, PartialEq, Eq)]
|
||||
pub enum ConstraintError {
|
||||
#[error("value `{candidate}` is not in the allowed set {allowed}")]
|
||||
InvalidValue { candidate: String, allowed: String },
|
||||
|
||||
#[error("field `{field_name}` cannot be empty")]
|
||||
EmptyField { field_name: String },
|
||||
#[error("{message}")]
|
||||
pub struct ConstraintError {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl ConstraintError {
|
||||
pub fn invalid_value(candidate: impl Into<String>, allowed: impl Into<String>) -> Self {
|
||||
Self::InvalidValue {
|
||||
candidate: candidate.into(),
|
||||
allowed: allowed.into(),
|
||||
Self {
|
||||
message: format!(
|
||||
"value `{}` is not in the allowed set {}",
|
||||
candidate.into(),
|
||||
allowed.into()
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty_field(field_name: impl Into<String>) -> Self {
|
||||
Self::EmptyField {
|
||||
field_name: field_name.into(),
|
||||
Self {
|
||||
message: format!("field `{}` cannot be empty", field_name.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,12 +8,10 @@ use crate::config::types::OtelConfig;
|
||||
use crate::config::types::OtelConfigToml;
|
||||
use crate::config::types::OtelExporterKind;
|
||||
use crate::config::types::SandboxWorkspaceWrite;
|
||||
use crate::config::types::ScrollInputMode;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::config::types::ShellEnvironmentPolicyToml;
|
||||
use crate::config::types::Tui;
|
||||
use crate::config::types::UriBasedFileOpener;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::LoaderOverrides;
|
||||
use crate::config_loader::load_config_layers_state;
|
||||
@@ -38,12 +36,12 @@ use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_rmcp_client::OAuthCredentialsStoreMode;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use dirs::home_dir;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use similar::DiffableStr;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
@@ -94,10 +92,6 @@ pub(crate) fn test_config() -> Config {
|
||||
/// Application configuration loaded from disk and merged with overrides.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
/// Provenance for how this [`Config`] was derived (merged layers + enforced
|
||||
/// requirements).
|
||||
pub config_layer_stack: ConfigLayerStack,
|
||||
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
|
||||
@@ -119,7 +113,7 @@ pub struct Config {
|
||||
/// Approval policy for executing commands.
|
||||
pub approval_policy: Constrained<AskForApproval>,
|
||||
|
||||
pub sandbox_policy: Constrained<SandboxPolicy>,
|
||||
pub sandbox_policy: SandboxPolicy,
|
||||
|
||||
/// True if the user passed in an override or set a value in config.toml
|
||||
/// for either of approval_policy or sandbox_mode.
|
||||
@@ -184,58 +178,6 @@ pub struct Config {
|
||||
/// Show startup tooltips in the TUI welcome screen.
|
||||
pub show_tooltips: bool,
|
||||
|
||||
/// Override the events-per-wheel-tick factor for TUI2 scroll normalization.
|
||||
///
|
||||
/// This is the same `tui.scroll_events_per_tick` value from `config.toml`, plumbed through the
|
||||
/// merged [`Config`] object (see [`Tui`]) so TUI2 can normalize scroll event density per
|
||||
/// terminal.
|
||||
pub tui_scroll_events_per_tick: Option<u16>,
|
||||
|
||||
/// Override the number of lines applied per wheel tick in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_wheel_lines` value from `config.toml` (see [`Tui`]). TUI2
|
||||
/// applies it to wheel-like scroll streams. Trackpad-like scrolling uses a separate
|
||||
/// `tui.scroll_trackpad_lines` setting.
|
||||
pub tui_scroll_wheel_lines: Option<u16>,
|
||||
|
||||
/// Override the number of lines per tick-equivalent used for trackpad scrolling in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_trackpad_lines` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_trackpad_lines: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_trackpad_accel_events` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_trackpad_accel_events: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: maximum multiplier applied to trackpad-like streams in TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_trackpad_accel_max` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_trackpad_accel_max: Option<u16>,
|
||||
|
||||
/// Control how TUI2 interprets mouse scroll input (wheel vs trackpad).
|
||||
///
|
||||
/// This is the same `tui.scroll_mode` value from `config.toml` (see [`Tui`]).
|
||||
pub tui_scroll_mode: ScrollInputMode,
|
||||
|
||||
/// Override the wheel tick detection threshold (ms) for TUI2 auto scroll mode.
|
||||
///
|
||||
/// This is the same `tui.scroll_wheel_tick_detect_max_ms` value from `config.toml` (see
|
||||
/// [`Tui`]).
|
||||
pub tui_scroll_wheel_tick_detect_max_ms: Option<u64>,
|
||||
|
||||
/// Override the wheel-like end-of-stream threshold (ms) for TUI2 auto scroll mode.
|
||||
///
|
||||
/// This is the same `tui.scroll_wheel_like_max_duration_ms` value from `config.toml` (see
|
||||
/// [`Tui`]).
|
||||
pub tui_scroll_wheel_like_max_duration_ms: Option<u64>,
|
||||
|
||||
/// Invert mouse scroll direction for TUI2.
|
||||
///
|
||||
/// This is the same `tui.scroll_invert` value from `config.toml` (see [`Tui`]) and is applied
|
||||
/// consistently to both mouse wheels and trackpads.
|
||||
pub tui_scroll_invert: bool,
|
||||
|
||||
/// The directory that should be treated as the current working directory
|
||||
/// for the session. All relative paths inside the business-logic layer are
|
||||
/// resolved against this path.
|
||||
@@ -302,6 +244,9 @@ pub struct Config {
|
||||
/// Optional override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Optional override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
|
||||
pub model_verbosity: Option<Verbosity>,
|
||||
|
||||
@@ -324,6 +269,10 @@ pub struct Config {
|
||||
/// If set to `true`, used only the experimental unified exec tool.
|
||||
pub use_experimental_unified_exec_tool: bool,
|
||||
|
||||
/// If set to `true`, use the experimental official Rust MCP client.
|
||||
/// https://github.com/modelcontextprotocol/rust-sdk
|
||||
pub use_experimental_use_rmcp_client: bool,
|
||||
|
||||
/// Settings for ghost snapshots (used for undo).
|
||||
pub ghost_snapshot: GhostSnapshotConfig,
|
||||
|
||||
@@ -363,7 +312,6 @@ pub struct ConfigBuilder {
|
||||
cli_overrides: Option<Vec<(String, TomlValue)>>,
|
||||
harness_overrides: Option<ConfigOverrides>,
|
||||
loader_overrides: Option<LoaderOverrides>,
|
||||
thread_agnostic: bool,
|
||||
}
|
||||
|
||||
impl ConfigBuilder {
|
||||
@@ -372,13 +320,6 @@ impl ConfigBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Load a "thread-agnostic" config stack, which intentionally ignores any
|
||||
/// in-repo `.codex/` config layers (because there is no cwd/project context).
|
||||
pub fn thread_agnostic(mut self) -> Self {
|
||||
self.thread_agnostic = true;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn cli_overrides(mut self, cli_overrides: Vec<(String, TomlValue)>) -> Self {
|
||||
self.cli_overrides = Some(cli_overrides);
|
||||
self
|
||||
@@ -400,22 +341,13 @@ impl ConfigBuilder {
|
||||
cli_overrides,
|
||||
harness_overrides,
|
||||
loader_overrides,
|
||||
thread_agnostic,
|
||||
} = self;
|
||||
let codex_home = codex_home.map_or_else(find_codex_home, std::io::Result::Ok)?;
|
||||
let cli_overrides = cli_overrides.unwrap_or_default();
|
||||
let harness_overrides = harness_overrides.unwrap_or_default();
|
||||
let loader_overrides = loader_overrides.unwrap_or_default();
|
||||
let cwd = if thread_agnostic {
|
||||
None
|
||||
} else {
|
||||
Some(match harness_overrides.cwd.as_deref() {
|
||||
Some(path) => AbsolutePathBuf::try_from(path)?,
|
||||
None => AbsolutePathBuf::current_dir()?,
|
||||
})
|
||||
};
|
||||
let config_layer_stack =
|
||||
load_config_layers_state(&codex_home, cwd, &cli_overrides, loader_overrides).await?;
|
||||
load_config_layers_state(&codex_home, &cli_overrides, loader_overrides).await?;
|
||||
let merged_toml = config_layer_stack.effective_config();
|
||||
|
||||
// Note that each layer in ConfigLayerStack should have resolved
|
||||
@@ -425,11 +357,11 @@ impl ConfigBuilder {
|
||||
let config_toml: ConfigToml = merged_toml
|
||||
.try_into()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
|
||||
Config::load_config_with_layer_stack(
|
||||
Config::load_config_with_requirements(
|
||||
config_toml,
|
||||
harness_overrides,
|
||||
codex_home,
|
||||
config_layer_stack,
|
||||
config_layer_stack.requirements().clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -469,16 +401,10 @@ impl Config {
|
||||
/// applied yet, which risks failing to enforce required constraints.
|
||||
pub async fn load_config_as_toml_with_cli_overrides(
|
||||
codex_home: &Path,
|
||||
cwd: &AbsolutePathBuf,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
) -> std::io::Result<ConfigToml> {
|
||||
let config_layer_stack = load_config_layers_state(
|
||||
codex_home,
|
||||
Some(cwd.clone()),
|
||||
&cli_overrides,
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
let config_layer_stack =
|
||||
load_config_layers_state(codex_home, &cli_overrides, LoaderOverrides::default()).await?;
|
||||
|
||||
let merged_toml = config_layer_stack.effective_config();
|
||||
let cfg = deserialize_config_toml_with_base(merged_toml, codex_home).map_err(|e| {
|
||||
@@ -512,12 +438,8 @@ pub async fn load_global_mcp_servers(
|
||||
// config layers for deprecated fields rather than reporting on the merged
|
||||
// result.
|
||||
let cli_overrides = Vec::<(String, TomlValue)>::new();
|
||||
// There is no cwd/project context for this query, so this will not include
|
||||
// MCP servers defined in in-repo .codex/ folders.
|
||||
let cwd: Option<AbsolutePathBuf> = None;
|
||||
let config_layer_stack =
|
||||
load_config_layers_state(codex_home, cwd, &cli_overrides, LoaderOverrides::default())
|
||||
.await?;
|
||||
load_config_layers_state(codex_home, &cli_overrides, LoaderOverrides::default()).await?;
|
||||
let merged_toml = config_layer_stack.effective_config();
|
||||
let Some(servers_value) = merged_toml.get("mcp_servers") else {
|
||||
return Ok(BTreeMap::new());
|
||||
@@ -678,7 +600,7 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R
|
||||
}
|
||||
|
||||
/// Base config deserialized from ~/.codex/config.toml.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigToml {
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
@@ -794,6 +716,9 @@ pub struct ConfigToml {
|
||||
/// Override to force-enable reasoning summaries for the configured model.
|
||||
pub model_supports_reasoning_summaries: Option<bool>,
|
||||
|
||||
/// Override to force reasoning summary format for the configured model.
|
||||
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
|
||||
|
||||
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
|
||||
pub chatgpt_base_url: Option<String>,
|
||||
|
||||
@@ -810,11 +735,6 @@ pub struct ConfigToml {
|
||||
#[serde(default)]
|
||||
pub ghost_snapshot: Option<GhostSnapshotToml>,
|
||||
|
||||
/// Markers used to detect the project root when searching parent
|
||||
/// directories for `.codex` folders. Defaults to [".git"] when unset.
|
||||
#[serde(default)]
|
||||
pub project_root_markers: Option<Vec<String>>,
|
||||
|
||||
/// When `true`, checks for Codex updates on startup and surfaces update prompts.
|
||||
/// Set to `false` only if your Codex updates are centrally managed.
|
||||
/// Defaults to `true`.
|
||||
@@ -839,6 +759,7 @@ pub struct ConfigToml {
|
||||
pub experimental_instructions_file: Option<AbsolutePathBuf>,
|
||||
pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
/// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama".
|
||||
pub oss_provider: Option<String>,
|
||||
@@ -869,7 +790,7 @@ impl From<ConfigToml> for UserSavedConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ProjectConfig {
|
||||
pub trust_level: Option<TrustLevel>,
|
||||
}
|
||||
@@ -884,7 +805,7 @@ impl ProjectConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ToolsToml {
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
@@ -903,7 +824,7 @@ impl From<ToolsToml> for Tools {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct GhostSnapshotToml {
|
||||
/// Exclude untracked files larger than this many bytes from ghost snapshots.
|
||||
#[serde(alias = "ignore_untracked_files_over_bytes")]
|
||||
@@ -1078,17 +999,16 @@ impl Config {
|
||||
codex_home: PathBuf,
|
||||
) -> std::io::Result<Self> {
|
||||
// Note this ignores requirements.toml enforcement for tests.
|
||||
let config_layer_stack = ConfigLayerStack::default();
|
||||
Self::load_config_with_layer_stack(cfg, overrides, codex_home, config_layer_stack)
|
||||
let requirements = ConfigRequirements::default();
|
||||
Self::load_config_with_requirements(cfg, overrides, codex_home, requirements)
|
||||
}
|
||||
|
||||
fn load_config_with_layer_stack(
|
||||
fn load_config_with_requirements(
|
||||
cfg: ConfigToml,
|
||||
overrides: ConfigOverrides,
|
||||
codex_home: PathBuf,
|
||||
config_layer_stack: ConfigLayerStack,
|
||||
requirements: ConfigRequirements,
|
||||
) -> std::io::Result<Self> {
|
||||
let requirements = config_layer_stack.requirements().clone();
|
||||
let user_instructions = Self::load_instructions(Some(&codex_home));
|
||||
|
||||
// Destructure ConfigOverrides fully to ensure all overrides are applied.
|
||||
@@ -1255,6 +1175,7 @@ impl Config {
|
||||
let include_apply_patch_tool_flag = features.enabled(Feature::ApplyPatchFreeform);
|
||||
let tools_web_search_request = features.enabled(Feature::WebSearchRequest);
|
||||
let use_experimental_unified_exec_tool = features.enabled(Feature::UnifiedExec);
|
||||
let use_experimental_use_rmcp_client = features.enabled(Feature::RmcpClient);
|
||||
|
||||
let forced_chatgpt_workspace_id =
|
||||
cfg.forced_chatgpt_workspace_id.as_ref().and_then(|value| {
|
||||
@@ -1314,15 +1235,11 @@ impl Config {
|
||||
// Config.
|
||||
let ConfigRequirements {
|
||||
approval_policy: mut constrained_approval_policy,
|
||||
sandbox_policy: mut constrained_sandbox_policy,
|
||||
} = requirements;
|
||||
|
||||
constrained_approval_policy
|
||||
.set(approval_policy)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("{e}")))?;
|
||||
constrained_sandbox_policy
|
||||
.set(sandbox_policy)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("{e}")))?;
|
||||
|
||||
let config = Self {
|
||||
model,
|
||||
@@ -1333,7 +1250,7 @@ impl Config {
|
||||
model_provider,
|
||||
cwd: resolved_cwd,
|
||||
approval_policy: constrained_approval_policy,
|
||||
sandbox_policy: constrained_sandbox_policy,
|
||||
sandbox_policy,
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode,
|
||||
forced_auto_mode_downgraded_on_windows,
|
||||
shell_environment_policy,
|
||||
@@ -1366,7 +1283,6 @@ impl Config {
|
||||
.collect(),
|
||||
tool_output_token_limit: cfg.tool_output_token_limit,
|
||||
codex_home,
|
||||
config_layer_stack,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
codex_linux_sandbox_exe,
|
||||
@@ -1384,6 +1300,7 @@ impl Config {
|
||||
.or(cfg.model_reasoning_summary)
|
||||
.unwrap_or_default(),
|
||||
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
|
||||
model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(),
|
||||
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
|
||||
chatgpt_base_url: config_profile
|
||||
.chatgpt_base_url
|
||||
@@ -1394,6 +1311,7 @@ impl Config {
|
||||
include_apply_patch_tool: include_apply_patch_tool_flag,
|
||||
tools_web_search_request,
|
||||
use_experimental_unified_exec_tool,
|
||||
use_experimental_use_rmcp_client,
|
||||
ghost_snapshot,
|
||||
features,
|
||||
active_profile: active_profile_name,
|
||||
@@ -1409,27 +1327,6 @@ impl Config {
|
||||
.unwrap_or_default(),
|
||||
animations: cfg.tui.as_ref().map(|t| t.animations).unwrap_or(true),
|
||||
show_tooltips: cfg.tui.as_ref().map(|t| t.show_tooltips).unwrap_or(true),
|
||||
tui_scroll_events_per_tick: cfg.tui.as_ref().and_then(|t| t.scroll_events_per_tick),
|
||||
tui_scroll_wheel_lines: cfg.tui.as_ref().and_then(|t| t.scroll_wheel_lines),
|
||||
tui_scroll_trackpad_lines: cfg.tui.as_ref().and_then(|t| t.scroll_trackpad_lines),
|
||||
tui_scroll_trackpad_accel_events: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_trackpad_accel_events),
|
||||
tui_scroll_trackpad_accel_max: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_trackpad_accel_max),
|
||||
tui_scroll_mode: cfg.tui.as_ref().map(|t| t.scroll_mode).unwrap_or_default(),
|
||||
tui_scroll_wheel_tick_detect_max_ms: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_wheel_tick_detect_max_ms),
|
||||
tui_scroll_wheel_like_max_duration_ms: cfg
|
||||
.tui
|
||||
.as_ref()
|
||||
.and_then(|t| t.scroll_wheel_like_max_duration_ms),
|
||||
tui_scroll_invert: cfg.tui.as_ref().map(|t| t.scroll_invert).unwrap_or(false),
|
||||
otel: {
|
||||
let t: OtelConfigToml = cfg.otel.unwrap_or_default();
|
||||
let log_user_prompt = t.log_user_prompt.unwrap_or(false);
|
||||
@@ -1602,23 +1499,8 @@ persistence = "none"
|
||||
.expect("TUI config without notifications should succeed");
|
||||
let tui = parsed.tui.expect("config should include tui section");
|
||||
|
||||
assert_eq!(
|
||||
tui,
|
||||
Tui {
|
||||
notifications: Notifications::Enabled(true),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
scroll_events_per_tick: None,
|
||||
scroll_wheel_lines: None,
|
||||
scroll_trackpad_lines: None,
|
||||
scroll_trackpad_accel_events: None,
|
||||
scroll_trackpad_accel_max: None,
|
||||
scroll_mode: ScrollInputMode::Auto,
|
||||
scroll_wheel_tick_detect_max_ms: None,
|
||||
scroll_wheel_like_max_duration_ms: None,
|
||||
scroll_invert: false,
|
||||
}
|
||||
);
|
||||
assert_eq!(tui.notifications, Notifications::Enabled(true));
|
||||
assert!(tui.show_tooltips);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1790,12 +1672,12 @@ trust_level = "trusted"
|
||||
config.forced_auto_mode_downgraded_on_windows,
|
||||
"expected workspace-write request to be downgraded on Windows"
|
||||
);
|
||||
match config.sandbox_policy.get() {
|
||||
&SandboxPolicy::ReadOnly => {}
|
||||
match config.sandbox_policy {
|
||||
SandboxPolicy::ReadOnly => {}
|
||||
other => panic!("expected read-only policy on Windows, got {other:?}"),
|
||||
}
|
||||
} else {
|
||||
match config.sandbox_policy.get() {
|
||||
match config.sandbox_policy {
|
||||
SandboxPolicy::WorkspaceWrite { writable_roots, .. } => {
|
||||
assert_eq!(
|
||||
writable_roots
|
||||
@@ -1927,8 +1809,8 @@ trust_level = "trusted"
|
||||
)?;
|
||||
|
||||
assert!(matches!(
|
||||
config.sandbox_policy.get(),
|
||||
&SandboxPolicy::DangerFullAccess
|
||||
config.sandbox_policy,
|
||||
SandboxPolicy::DangerFullAccess
|
||||
));
|
||||
assert!(config.did_user_set_custom_approval_policy_or_sandbox_mode);
|
||||
|
||||
@@ -1964,14 +1846,11 @@ trust_level = "trusted"
|
||||
)?;
|
||||
|
||||
if cfg!(target_os = "windows") {
|
||||
assert!(matches!(
|
||||
config.sandbox_policy.get(),
|
||||
SandboxPolicy::ReadOnly
|
||||
));
|
||||
assert!(matches!(config.sandbox_policy, SandboxPolicy::ReadOnly));
|
||||
assert!(config.forced_auto_mode_downgraded_on_windows);
|
||||
} else {
|
||||
assert!(matches!(
|
||||
config.sandbox_policy.get(),
|
||||
config.sandbox_policy,
|
||||
SandboxPolicy::WorkspaceWrite { .. }
|
||||
));
|
||||
assert!(!config.forced_auto_mode_downgraded_on_windows);
|
||||
@@ -2007,6 +1886,7 @@ trust_level = "trusted"
|
||||
let codex_home = TempDir::new()?;
|
||||
let cfg = ConfigToml {
|
||||
experimental_use_unified_exec_tool: Some(true),
|
||||
experimental_use_rmcp_client: Some(true),
|
||||
experimental_use_freeform_apply_patch: Some(true),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -2019,10 +1899,12 @@ trust_level = "trusted"
|
||||
|
||||
assert!(config.features.enabled(Feature::ApplyPatchFreeform));
|
||||
assert!(config.features.enabled(Feature::UnifiedExec));
|
||||
assert!(config.features.enabled(Feature::RmcpClient));
|
||||
|
||||
assert!(config.include_apply_patch_tool);
|
||||
|
||||
assert!(config.use_experimental_unified_exec_tool);
|
||||
assert!(config.use_experimental_use_rmcp_client);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -2062,12 +1944,10 @@ trust_level = "trusted"
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
let config_layer_stack =
|
||||
load_config_layers_state(codex_home.path(), Some(cwd), &Vec::new(), overrides).await?;
|
||||
load_config_layers_state(codex_home.path(), &Vec::new(), overrides).await?;
|
||||
let cfg = deserialize_config_toml_with_base(
|
||||
config_layer_stack.effective_config(),
|
||||
codex_home.path(),
|
||||
@@ -2094,43 +1974,6 @@ trust_level = "trusted"
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn config_builder_thread_agnostic_ignores_project_layers() -> anyhow::Result<()> {
|
||||
let tmp = TempDir::new()?;
|
||||
let codex_home = tmp.path().join("codex_home");
|
||||
std::fs::create_dir_all(&codex_home)?;
|
||||
std::fs::write(codex_home.join(CONFIG_TOML_FILE), "model = \"from-user\"\n")?;
|
||||
|
||||
let project = tmp.path().join("project");
|
||||
std::fs::create_dir_all(project.join(".codex"))?;
|
||||
std::fs::write(
|
||||
project.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"model = \"from-project\"\n",
|
||||
)?;
|
||||
|
||||
let harness_overrides = ConfigOverrides {
|
||||
cwd: Some(project),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let with_project_layers = ConfigBuilder::default()
|
||||
.codex_home(codex_home.clone())
|
||||
.harness_overrides(harness_overrides.clone())
|
||||
.build()
|
||||
.await?;
|
||||
assert_eq!(with_project_layers.model.as_deref(), Some("from-project"));
|
||||
|
||||
let thread_agnostic = ConfigBuilder::default()
|
||||
.codex_home(codex_home)
|
||||
.harness_overrides(harness_overrides)
|
||||
.thread_agnostic()
|
||||
.build()
|
||||
.await?;
|
||||
assert_eq!(thread_agnostic.model.as_deref(), Some("from-user"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn load_global_mcp_servers_returns_empty_if_missing() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -2220,13 +2063,10 @@ trust_level = "trusted"
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
|
||||
let config_layer_stack = load_config_layers_state(
|
||||
codex_home.path(),
|
||||
Some(cwd),
|
||||
&[("model".to_string(), TomlValue::String("cli".to_string()))],
|
||||
overrides,
|
||||
)
|
||||
@@ -3208,7 +3048,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::Never),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -3223,7 +3063,6 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3232,6 +3071,7 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3242,6 +3082,7 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("o3".to_string()),
|
||||
@@ -3253,15 +3094,6 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
},
|
||||
o3_profile_config
|
||||
@@ -3291,7 +3123,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai-chat-completions".to_string(),
|
||||
model_provider: fixture.openai_chat_completions_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::UnlessTrusted),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -3306,7 +3138,6 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3315,6 +3146,7 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3325,6 +3157,7 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("gpt3".to_string()),
|
||||
@@ -3336,15 +3169,6 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3389,7 +3213,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -3404,7 +3228,6 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3413,6 +3236,7 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: None,
|
||||
model_reasoning_summary: ReasoningSummary::default(),
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: None,
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3423,6 +3247,7 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("zdr".to_string()),
|
||||
@@ -3434,15 +3259,6 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3473,7 +3289,7 @@ model_verbosity = "high"
|
||||
model_provider_id: "openai".to_string(),
|
||||
model_provider: fixture.openai_provider.clone(),
|
||||
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
did_user_set_custom_approval_policy_or_sandbox_mode: true,
|
||||
forced_auto_mode_downgraded_on_windows: false,
|
||||
shell_environment_policy: ShellEnvironmentPolicy::default(),
|
||||
@@ -3488,7 +3304,6 @@ model_verbosity = "high"
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
tool_output_token_limit: None,
|
||||
codex_home: fixture.codex_home(),
|
||||
config_layer_stack: Default::default(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
codex_linux_sandbox_exe: None,
|
||||
@@ -3497,6 +3312,7 @@ model_verbosity = "high"
|
||||
model_reasoning_effort: Some(ReasoningEffort::High),
|
||||
model_reasoning_summary: ReasoningSummary::Detailed,
|
||||
model_supports_reasoning_summaries: None,
|
||||
model_reasoning_summary_format: None,
|
||||
model_verbosity: Some(Verbosity::High),
|
||||
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
|
||||
base_instructions: None,
|
||||
@@ -3507,6 +3323,7 @@ model_verbosity = "high"
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
use_experimental_use_rmcp_client: false,
|
||||
ghost_snapshot: GhostSnapshotConfig::default(),
|
||||
features: Features::with_defaults(),
|
||||
active_profile: Some("gpt5".to_string()),
|
||||
@@ -3518,15 +3335,6 @@ model_verbosity = "high"
|
||||
tui_notifications: Default::default(),
|
||||
animations: true,
|
||||
show_tooltips: true,
|
||||
tui_scroll_events_per_tick: None,
|
||||
tui_scroll_wheel_lines: None,
|
||||
tui_scroll_trackpad_lines: None,
|
||||
tui_scroll_trackpad_accel_events: None,
|
||||
tui_scroll_trackpad_accel_max: None,
|
||||
tui_scroll_mode: ScrollInputMode::Auto,
|
||||
tui_scroll_wheel_tick_detect_max_ms: None,
|
||||
tui_scroll_wheel_like_max_duration_ms: None,
|
||||
tui_scroll_invert: false,
|
||||
otel: OtelConfig::default(),
|
||||
};
|
||||
|
||||
@@ -3826,15 +3634,12 @@ trust_level = "untrusted"
|
||||
// Verify that untrusted projects still get WorkspaceWrite sandbox (or ReadOnly on Windows)
|
||||
if cfg!(target_os = "windows") {
|
||||
assert!(
|
||||
matches!(config.sandbox_policy.get(), SandboxPolicy::ReadOnly),
|
||||
matches!(config.sandbox_policy, SandboxPolicy::ReadOnly),
|
||||
"Expected ReadOnly on Windows"
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
matches!(
|
||||
config.sandbox_policy.get(),
|
||||
SandboxPolicy::WorkspaceWrite { .. }
|
||||
),
|
||||
matches!(config.sandbox_policy, SandboxPolicy::WorkspaceWrite { .. }),
|
||||
"Expected WorkspaceWrite sandbox for untrusted project"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::protocol::AskForApproval;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
@@ -10,7 +9,7 @@ use codex_protocol::openai_models::ReasoningEffort;
|
||||
|
||||
/// Collection of common configuration options that a user can define as a unit
|
||||
/// in `config.toml`.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Default, PartialEq, Deserialize)]
|
||||
pub struct ConfigProfile {
|
||||
pub model: Option<String>,
|
||||
/// The key in the `model_providers` map identifying the
|
||||
@@ -26,6 +25,7 @@ pub struct ConfigProfile {
|
||||
pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
|
||||
@@ -132,7 +132,7 @@ impl ConfigService {
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, ConfigServiceError> {
|
||||
let layers = self
|
||||
.load_thread_agnostic_config()
|
||||
.load_layers_state()
|
||||
.await
|
||||
.map_err(|err| ConfigServiceError::io("failed to read configuration layers", err))?;
|
||||
|
||||
@@ -185,7 +185,7 @@ impl ConfigService {
|
||||
&self,
|
||||
) -> Result<codex_app_server_protocol::UserSavedConfig, ConfigServiceError> {
|
||||
let layers = self
|
||||
.load_thread_agnostic_config()
|
||||
.load_layers_state()
|
||||
.await
|
||||
.map_err(|err| ConfigServiceError::io("failed to load configuration", err))?;
|
||||
|
||||
@@ -219,7 +219,7 @@ impl ConfigService {
|
||||
}
|
||||
|
||||
let layers = self
|
||||
.load_thread_agnostic_config()
|
||||
.load_layers_state()
|
||||
.await
|
||||
.map_err(|err| ConfigServiceError::io("failed to load configuration", err))?;
|
||||
let user_layer = match layers.get_user_layer() {
|
||||
@@ -328,14 +328,9 @@ impl ConfigService {
|
||||
})
|
||||
}
|
||||
|
||||
/// Loads a "thread-agnostic" config, which means the config layers do not
|
||||
/// include any in-repo .codex/ folders because there is no cwd/project root
|
||||
/// associated with this query.
|
||||
async fn load_thread_agnostic_config(&self) -> std::io::Result<ConfigLayerStack> {
|
||||
let cwd: Option<AbsolutePathBuf> = None;
|
||||
async fn load_layers_state(&self) -> std::io::Result<ConfigLayerStack> {
|
||||
load_config_layers_state(
|
||||
&self.codex_home,
|
||||
cwd,
|
||||
&self.cli_overrides,
|
||||
self.loader_overrides.clone(),
|
||||
)
|
||||
@@ -556,10 +551,6 @@ fn override_message(layer: &ConfigLayerSource) -> String {
|
||||
ConfigLayerSource::System { file } => {
|
||||
format!("Overridden by managed config (system): {}", file.display())
|
||||
}
|
||||
ConfigLayerSource::Project { dot_codex_folder } => format!(
|
||||
"Overridden by project config: {}/{CONFIG_TOML_FILE}",
|
||||
dot_codex_folder.display(),
|
||||
),
|
||||
ConfigLayerSource::SessionFlags => "Overridden by session flags".to_string(),
|
||||
ConfigLayerSource::User { file } => {
|
||||
format!("Overridden by user config: {}", file.display())
|
||||
@@ -755,7 +746,6 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -779,41 +769,15 @@ remote_compaction = true
|
||||
},
|
||||
);
|
||||
let layers = response.layers.expect("layers present");
|
||||
if cfg!(unix) {
|
||||
let system_file = AbsolutePathBuf::from_absolute_path(
|
||||
crate::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX,
|
||||
)
|
||||
.expect("system file");
|
||||
assert_eq!(layers.len(), 3, "expected three layers on unix");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User {
|
||||
file: user_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(2).unwrap().name,
|
||||
ConfigLayerSource::System { file: system_file }
|
||||
);
|
||||
} else {
|
||||
assert_eq!(layers.len(), 2, "expected two layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: managed_file.clone()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
}
|
||||
assert_eq!(layers.len(), 2, "expected two layers");
|
||||
assert_eq!(
|
||||
layers.first().unwrap().name,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file }
|
||||
);
|
||||
assert_eq!(
|
||||
layers.get(1).unwrap().name,
|
||||
ConfigLayerSource::User { file: user_file }
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -836,7 +800,6 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -939,7 +902,6 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -987,7 +949,6 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -1033,7 +994,6 @@ remote_compaction = true
|
||||
managed_config_path: Some(managed_path.clone()),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -221,7 +221,7 @@ mod option_duration_secs {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
pub enum UriBasedFileOpener {
|
||||
#[serde(rename = "vscode")]
|
||||
VsCode,
|
||||
@@ -253,7 +253,7 @@ impl UriBasedFileOpener {
|
||||
}
|
||||
|
||||
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct History {
|
||||
/// If true, history entries will not be written to disk.
|
||||
pub persistence: HistoryPersistence,
|
||||
@@ -263,7 +263,7 @@ pub struct History {
|
||||
pub max_bytes: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum HistoryPersistence {
|
||||
/// Save all history entries to disk.
|
||||
@@ -275,7 +275,7 @@ pub enum HistoryPersistence {
|
||||
|
||||
// ===== OTEL configuration =====
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelHttpProtocol {
|
||||
/// Binary payload
|
||||
@@ -284,7 +284,7 @@ pub enum OtelHttpProtocol {
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OtelTlsConfig {
|
||||
pub ca_certificate: Option<AbsolutePathBuf>,
|
||||
@@ -293,7 +293,7 @@ pub struct OtelTlsConfig {
|
||||
}
|
||||
|
||||
/// Which OTEL exporter to use.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelExporterKind {
|
||||
None,
|
||||
@@ -315,7 +315,7 @@ pub enum OtelExporterKind {
|
||||
}
|
||||
|
||||
/// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct OtelConfigToml {
|
||||
/// Log user prompt in traces
|
||||
pub log_user_prompt: Option<bool>,
|
||||
@@ -350,7 +350,7 @@ impl Default for OtelConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Notifications {
|
||||
Enabled(bool),
|
||||
@@ -363,30 +363,8 @@ impl Default for Notifications {
|
||||
}
|
||||
}
|
||||
|
||||
/// How TUI2 should interpret mouse scroll events.
|
||||
///
|
||||
/// Terminals generally encode both mouse wheels and trackpads as the same "scroll up/down" mouse
|
||||
/// button events, without a magnitude. This setting controls whether Codex uses a heuristic to
|
||||
/// infer wheel vs trackpad per stream, or forces a specific behavior.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ScrollInputMode {
|
||||
/// Infer wheel vs trackpad behavior per scroll stream.
|
||||
Auto,
|
||||
/// Always treat scroll events as mouse-wheel input (fixed lines per tick).
|
||||
Wheel,
|
||||
/// Always treat scroll events as trackpad input (fractional accumulation).
|
||||
Trackpad,
|
||||
}
|
||||
|
||||
impl Default for ScrollInputMode {
|
||||
fn default() -> Self {
|
||||
Self::Auto
|
||||
}
|
||||
}
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Tui {
|
||||
/// Enable desktop notifications from the TUI when the terminal is unfocused.
|
||||
/// Defaults to `true`.
|
||||
@@ -402,109 +380,6 @@ pub struct Tui {
|
||||
/// Defaults to `true`.
|
||||
#[serde(default = "default_true")]
|
||||
pub show_tooltips: bool,
|
||||
|
||||
/// Override the *wheel* event density used to normalize TUI2 scrolling.
|
||||
///
|
||||
/// Terminals generally deliver both mouse wheels and trackpads as discrete `scroll up/down`
|
||||
/// mouse events with direction but no magnitude. Unfortunately, the *number* of raw events
|
||||
/// per physical wheel notch varies by terminal (commonly 1, 3, or 9+). TUI2 uses this value
|
||||
/// to normalize that raw event density into consistent "wheel tick" behavior.
|
||||
///
|
||||
/// Wheel math (conceptually):
|
||||
///
|
||||
/// - A single event contributes `1 / scroll_events_per_tick` tick-equivalents.
|
||||
/// - Wheel-like streams then scale that by `scroll_wheel_lines` so one physical notch scrolls
|
||||
/// a fixed number of lines.
|
||||
///
|
||||
/// Trackpad math is intentionally *not* fully tied to this value: in trackpad-like mode, TUI2
|
||||
/// uses `min(scroll_events_per_tick, 3)` as the divisor so terminals with dense wheel ticks
|
||||
/// (e.g. 9 events per notch) do not make trackpads feel artificially slow.
|
||||
///
|
||||
/// Defaults are derived per terminal from [`crate::terminal::TerminalInfo`] when TUI2 starts.
|
||||
/// See `codex-rs/tui2/docs/scroll_input_model.md` for the probe data and rationale.
|
||||
pub scroll_events_per_tick: Option<u16>,
|
||||
|
||||
/// Override how many transcript lines one physical *wheel notch* should scroll in TUI2.
|
||||
///
|
||||
/// This is the "classic feel" knob. Defaults to 3.
|
||||
///
|
||||
/// Wheel-like per-event contribution is `scroll_wheel_lines / scroll_events_per_tick`. For
|
||||
/// example, in a terminal that emits 9 events per notch, the default `3 / 9` yields 1/3 of a
|
||||
/// line per event and totals 3 lines once the full notch burst arrives.
|
||||
///
|
||||
/// See `codex-rs/tui2/docs/scroll_input_model.md` for details on the stream model and the
|
||||
/// wheel/trackpad heuristic.
|
||||
pub scroll_wheel_lines: Option<u16>,
|
||||
|
||||
/// Override baseline trackpad scroll sensitivity in TUI2.
|
||||
///
|
||||
/// Trackpads do not have discrete notches, but terminals still emit discrete `scroll up/down`
|
||||
/// events. In trackpad-like mode, TUI2 accumulates fractional scroll and only applies whole
|
||||
/// lines to the viewport.
|
||||
///
|
||||
/// Trackpad per-event contribution is:
|
||||
///
|
||||
/// - `scroll_trackpad_lines / min(scroll_events_per_tick, 3)`
|
||||
///
|
||||
/// (plus optional bounded acceleration; see `scroll_trackpad_accel_*`). The `min(..., 3)`
|
||||
/// divisor is deliberate: `scroll_events_per_tick` is calibrated from *wheel* behavior and
|
||||
/// can be much larger than trackpad event density, which would otherwise make trackpads feel
|
||||
/// too slow in dense-wheel terminals.
|
||||
///
|
||||
/// Defaults to 1, meaning one tick-equivalent maps to one transcript line.
|
||||
pub scroll_trackpad_lines: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2.
|
||||
///
|
||||
/// This keeps small swipes precise while allowing large/faster swipes to cover more content.
|
||||
/// Defaults are chosen to address terminals where trackpad event density is comparatively low.
|
||||
///
|
||||
/// Concretely, TUI2 computes an acceleration multiplier for trackpad-like streams:
|
||||
///
|
||||
/// - `multiplier = clamp(1 + abs(events) / scroll_trackpad_accel_events, 1..scroll_trackpad_accel_max)`
|
||||
///
|
||||
/// The multiplier is applied to the stream’s computed line delta (including any carried
|
||||
/// fractional remainder).
|
||||
pub scroll_trackpad_accel_events: Option<u16>,
|
||||
|
||||
/// Trackpad acceleration: maximum multiplier applied to trackpad-like streams.
|
||||
///
|
||||
/// Set to 1 to effectively disable trackpad acceleration.
|
||||
///
|
||||
/// See [`Tui::scroll_trackpad_accel_events`] for the exact multiplier formula.
|
||||
pub scroll_trackpad_accel_max: Option<u16>,
|
||||
|
||||
/// Select how TUI2 interprets mouse scroll input.
|
||||
///
|
||||
/// - `auto` (default): infer wheel vs trackpad per scroll stream.
|
||||
/// - `wheel`: always use wheel behavior (fixed lines per wheel notch).
|
||||
/// - `trackpad`: always use trackpad behavior (fractional accumulation; wheel may feel slow).
|
||||
#[serde(default)]
|
||||
pub scroll_mode: ScrollInputMode,
|
||||
|
||||
/// Auto-mode threshold: maximum time (ms) for the first tick-worth of events to arrive.
|
||||
///
|
||||
/// In `scroll_mode = "auto"`, TUI2 starts a stream as trackpad-like (to avoid overshoot) and
|
||||
/// promotes it to wheel-like if `scroll_events_per_tick` events arrive "quickly enough". This
|
||||
/// threshold controls what "quickly enough" means.
|
||||
///
|
||||
/// Most users should leave this unset; it is primarily for terminals that emit wheel ticks
|
||||
/// batched over longer time spans.
|
||||
pub scroll_wheel_tick_detect_max_ms: Option<u64>,
|
||||
|
||||
/// Auto-mode fallback: maximum duration (ms) that a very small stream is still treated as wheel-like.
|
||||
///
|
||||
/// This is only used when `scroll_events_per_tick` is effectively 1 (one event per wheel
|
||||
/// notch). In that case, we cannot observe a "tick completion time", so TUI2 treats a
|
||||
/// short-lived, small stream (<= 2 events) as wheel-like to preserve classic wheel behavior.
|
||||
pub scroll_wheel_like_max_duration_ms: Option<u64>,
|
||||
|
||||
/// Invert mouse scroll direction in TUI2.
|
||||
///
|
||||
/// This flips the scroll sign after terminal detection. It is applied consistently to both
|
||||
/// wheel and trackpad input.
|
||||
#[serde(default)]
|
||||
pub scroll_invert: bool,
|
||||
}
|
||||
|
||||
const fn default_true() -> bool {
|
||||
@@ -514,7 +389,7 @@ const fn default_true() -> bool {
|
||||
/// Settings for notices we display to users via the tui and app-server clients
|
||||
/// (primarily the Codex IDE extension). NOTE: these are different from
|
||||
/// notifications - notices are warnings, NUX screens, acknowledgements, etc.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct Notice {
|
||||
/// Tracks whether the user has acknowledged the full access warning prompt.
|
||||
pub hide_full_access_warning: Option<bool>,
|
||||
@@ -537,7 +412,7 @@ impl Notice {
|
||||
pub(crate) const TABLE_KEY: &'static str = "notice";
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<AbsolutePathBuf>,
|
||||
@@ -560,7 +435,7 @@ impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum ShellEnvironmentPolicyInherit {
|
||||
/// "Core" environment variables for the platform. On UNIX, this would
|
||||
@@ -577,7 +452,7 @@ pub enum ShellEnvironmentPolicyInherit {
|
||||
|
||||
/// Policy for building the `env` when spawning a process via either the
|
||||
/// `shell` or `local_shell` tool.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct ShellEnvironmentPolicyToml {
|
||||
pub inherit: Option<ShellEnvironmentPolicyInherit>,
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ This module is the canonical place to **load and describe Codex configuration la
|
||||
|
||||
Exported from `codex_core::config_loader`:
|
||||
|
||||
- `load_config_layers_state(codex_home, cwd_opt, cli_overrides, overrides) -> ConfigLayerStack`
|
||||
- `load_config_layers_state(codex_home, cli_overrides, overrides) -> ConfigLayerStack`
|
||||
- `ConfigLayerStack`
|
||||
- `effective_config() -> toml::Value`
|
||||
- `origins() -> HashMap<String, ConfigLayerMetadata>`
|
||||
@@ -37,14 +37,11 @@ Most callers want the effective config plus metadata:
|
||||
|
||||
```rust
|
||||
use codex_core::config_loader::{load_config_layers_state, LoaderOverrides};
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
let cli_overrides: Vec<(String, TomlValue)> = Vec::new();
|
||||
let cwd = AbsolutePathBuf::current_dir()?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&cli_overrides,
|
||||
LoaderOverrides::default(),
|
||||
).await?;
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::config::Constrained;
|
||||
@@ -11,14 +9,12 @@ use crate::config::ConstraintError;
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ConfigRequirements {
|
||||
pub approval_policy: Constrained<AskForApproval>,
|
||||
pub sandbox_policy: Constrained<SandboxPolicy>,
|
||||
}
|
||||
|
||||
impl Default for ConfigRequirements {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
approval_policy: Constrained::allow_any_from_default(),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::ReadOnly),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,34 +23,6 @@ impl Default for ConfigRequirements {
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigRequirementsToml {
|
||||
pub allowed_approval_policies: Option<Vec<AskForApproval>>,
|
||||
pub allowed_sandbox_modes: Option<Vec<SandboxModeRequirement>>,
|
||||
}
|
||||
|
||||
/// Currently, `external-sandbox` is not supported in config.toml, but it is
|
||||
/// supported through programmatic use.
|
||||
#[derive(Deserialize, Debug, Clone, Copy, PartialEq)]
|
||||
pub enum SandboxModeRequirement {
|
||||
#[serde(rename = "read-only")]
|
||||
ReadOnly,
|
||||
|
||||
#[serde(rename = "workspace-write")]
|
||||
WorkspaceWrite,
|
||||
|
||||
#[serde(rename = "danger-full-access")]
|
||||
DangerFullAccess,
|
||||
|
||||
#[serde(rename = "external-sandbox")]
|
||||
ExternalSandbox,
|
||||
}
|
||||
|
||||
impl From<SandboxMode> for SandboxModeRequirement {
|
||||
fn from(mode: SandboxMode) -> Self {
|
||||
match mode {
|
||||
SandboxMode::ReadOnly => SandboxModeRequirement::ReadOnly,
|
||||
SandboxMode::WorkspaceWrite => SandboxModeRequirement::WorkspaceWrite,
|
||||
SandboxMode::DangerFullAccess => SandboxModeRequirement::DangerFullAccess,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigRequirementsToml {
|
||||
@@ -73,7 +41,7 @@ impl ConfigRequirementsToml {
|
||||
};
|
||||
}
|
||||
|
||||
fill_missing_take!(self, other, { allowed_approval_policies, allowed_sandbox_modes });
|
||||
fill_missing_take!(self, other, { allowed_approval_policies });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,13 +49,12 @@ impl TryFrom<ConfigRequirementsToml> for ConfigRequirements {
|
||||
type Error = ConstraintError;
|
||||
|
||||
fn try_from(toml: ConfigRequirementsToml) -> Result<Self, Self::Error> {
|
||||
let ConfigRequirementsToml {
|
||||
allowed_approval_policies,
|
||||
allowed_sandbox_modes,
|
||||
} = toml;
|
||||
let approval_policy: Constrained<AskForApproval> = match allowed_approval_policies {
|
||||
let approval_policy: Constrained<AskForApproval> = match toml.allowed_approval_policies {
|
||||
Some(policies) => {
|
||||
if let Some(first) = policies.first() {
|
||||
let default_value = AskForApproval::default();
|
||||
if policies.contains(&default_value) {
|
||||
Constrained::allow_values(default_value, policies)?
|
||||
} else if let Some(first) = policies.first() {
|
||||
Constrained::allow_values(*first, policies)?
|
||||
} else {
|
||||
return Err(ConstraintError::empty_field("allowed_approval_policies"));
|
||||
@@ -95,51 +62,7 @@ impl TryFrom<ConfigRequirementsToml> for ConfigRequirements {
|
||||
}
|
||||
None => Constrained::allow_any_from_default(),
|
||||
};
|
||||
|
||||
// TODO(gt): `ConfigRequirementsToml` should let the author specify the
|
||||
// default `SandboxPolicy`? Should do this for `AskForApproval` too?
|
||||
//
|
||||
// Currently, we force ReadOnly as the default policy because two of
|
||||
// the other variants (WorkspaceWrite, ExternalSandbox) require
|
||||
// additional parameters. Ultimately, we should expand the config
|
||||
// format to allow specifying those parameters.
|
||||
let default_sandbox_policy = SandboxPolicy::ReadOnly;
|
||||
let sandbox_policy: Constrained<SandboxPolicy> = match allowed_sandbox_modes {
|
||||
Some(modes) => {
|
||||
if !modes.contains(&SandboxModeRequirement::ReadOnly) {
|
||||
return Err(ConstraintError::invalid_value(
|
||||
"allowed_sandbox_modes",
|
||||
"must include 'read-only' to allow any SandboxPolicy",
|
||||
));
|
||||
};
|
||||
|
||||
Constrained::new(default_sandbox_policy, move |candidate| {
|
||||
let mode = match candidate {
|
||||
SandboxPolicy::ReadOnly => SandboxModeRequirement::ReadOnly,
|
||||
SandboxPolicy::WorkspaceWrite { .. } => {
|
||||
SandboxModeRequirement::WorkspaceWrite
|
||||
}
|
||||
SandboxPolicy::DangerFullAccess => SandboxModeRequirement::DangerFullAccess,
|
||||
SandboxPolicy::ExternalSandbox { .. } => {
|
||||
SandboxModeRequirement::ExternalSandbox
|
||||
}
|
||||
};
|
||||
if modes.contains(&mode) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ConstraintError::invalid_value(
|
||||
format!("{candidate:?}"),
|
||||
format!("{modes:?}"),
|
||||
))
|
||||
}
|
||||
})?
|
||||
}
|
||||
None => Constrained::allow_any(default_sandbox_policy),
|
||||
};
|
||||
Ok(ConfigRequirements {
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
})
|
||||
Ok(ConfigRequirements { approval_policy })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,8 +70,6 @@ impl TryFrom<ConfigRequirementsToml> for ConfigRequirements {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
use codex_protocol::protocol::NetworkAccess;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use toml::from_str;
|
||||
|
||||
@@ -183,105 +104,4 @@ mod tests {
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_allowed_approval_policies() -> Result<()> {
|
||||
let toml_str = r#"
|
||||
allowed_approval_policies = ["untrusted", "on-request"]
|
||||
"#;
|
||||
let config: ConfigRequirementsToml = from_str(toml_str)?;
|
||||
let requirements = ConfigRequirements::try_from(config)?;
|
||||
|
||||
assert_eq!(
|
||||
requirements.approval_policy.value(),
|
||||
AskForApproval::UnlessTrusted,
|
||||
"currently, there is no way to specify the default value for approval policy in the toml, so it picks the first allowed value"
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::UnlessTrusted)
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnFailure),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "OnFailure".into(),
|
||||
allowed: "[UnlessTrusted, OnRequest]".into(),
|
||||
})
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
requirements.approval_policy.can_set(&AskForApproval::Never),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "Never".into(),
|
||||
allowed: "[UnlessTrusted, OnRequest]".into(),
|
||||
})
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::ReadOnly)
|
||||
.is_ok()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_allowed_sandbox_modes() -> Result<()> {
|
||||
let toml_str = r#"
|
||||
allowed_sandbox_modes = ["read-only", "workspace-write"]
|
||||
"#;
|
||||
let config: ConfigRequirementsToml = from_str(toml_str)?;
|
||||
let requirements = ConfigRequirements::try_from(config)?;
|
||||
|
||||
let root = if cfg!(windows) { "C:\\repo" } else { "/repo" };
|
||||
assert!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::ReadOnly)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![AbsolutePathBuf::from_absolute_path(root)?],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
})
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::DangerFullAccess),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "DangerFullAccess".into(),
|
||||
allowed: "[ReadOnly, WorkspaceWrite]".into(),
|
||||
})
|
||||
);
|
||||
assert_eq!(
|
||||
requirements
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Restricted,
|
||||
}),
|
||||
Err(ConstraintError::InvalidValue {
|
||||
candidate: "ExternalSandbox { network_access: Restricted }".into(),
|
||||
allowed: "[ReadOnly, WorkspaceWrite]".into(),
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,13 +33,11 @@ pub(super) async fn load_config_layers_internal(
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
managed_preferences_base64,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
let LoaderOverrides {
|
||||
managed_config_path,
|
||||
..
|
||||
} = overrides;
|
||||
|
||||
let managed_config_path = AbsolutePathBuf::from_absolute_path(
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use super::config_requirements::ConfigRequirementsToml;
|
||||
use base64::Engine;
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use core_foundation::base::TCFType;
|
||||
@@ -11,7 +10,6 @@ use toml::Value as TomlValue;
|
||||
|
||||
const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex";
|
||||
const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64";
|
||||
const MANAGED_PREFERENCES_REQUIREMENTS_KEY: &str = "requirements_toml_base64";
|
||||
|
||||
pub(crate) async fn load_managed_admin_config_layer(
|
||||
override_base64: Option<&str>,
|
||||
@@ -21,126 +19,82 @@ pub(crate) async fn load_managed_admin_config_layer(
|
||||
return if trimmed.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
parse_managed_config_base64(trimmed).map(Some)
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
};
|
||||
}
|
||||
|
||||
const LOAD_ERROR: &str = "Failed to load managed preferences configuration";
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_config).await {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed config load task was cancelled");
|
||||
tracing::error!("Managed preferences load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed config load task failed: {join_err}");
|
||||
tracing::error!("Managed preferences load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other("Failed to load managed config"))
|
||||
Err(io::Error::other(LOAD_ERROR))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_config() -> io::Result<Option<TomlValue>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_CONFIG_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_config_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub(crate) async fn load_managed_admin_requirements_toml(
|
||||
target: &mut ConfigRequirementsToml,
|
||||
override_base64: Option<&str>,
|
||||
) -> io::Result<()> {
|
||||
if let Some(encoded) = override_base64 {
|
||||
let trimmed = encoded.trim();
|
||||
if !trimmed.is_empty() {
|
||||
target.merge_unset_fields(parse_managed_requirements_base64(trimmed)?);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match task::spawn_blocking(load_managed_admin_requirements).await {
|
||||
Ok(result) => {
|
||||
if let Some(requirements) = result? {
|
||||
target.merge_unset_fields(requirements);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(join_err) => {
|
||||
if join_err.is_cancelled() {
|
||||
tracing::error!("Managed requirements load task was cancelled");
|
||||
} else {
|
||||
tracing::error!("Managed requirements load task failed: {join_err}");
|
||||
}
|
||||
Err(io::Error::other("Failed to load managed requirements"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_managed_admin_requirements() -> io::Result<Option<ConfigRequirementsToml>> {
|
||||
load_managed_preference(MANAGED_PREFERENCES_REQUIREMENTS_KEY)?
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.map(parse_managed_requirements_base64)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
fn load_managed_preference(key_name: &str) -> io::Result<Option<String>> {
|
||||
#[link(name = "CoreFoundation", kind = "framework")]
|
||||
unsafe extern "C" {
|
||||
fn CFPreferencesCopyAppValue(key: CFStringRef, application_id: CFStringRef) -> *mut c_void;
|
||||
}
|
||||
|
||||
let application_id = CFString::new(MANAGED_PREFERENCES_APPLICATION_ID);
|
||||
let key = CFString::new(MANAGED_PREFERENCES_CONFIG_KEY);
|
||||
|
||||
let value_ref = unsafe {
|
||||
CFPreferencesCopyAppValue(
|
||||
CFString::new(key_name).as_concrete_TypeRef(),
|
||||
CFString::new(MANAGED_PREFERENCES_APPLICATION_ID).as_concrete_TypeRef(),
|
||||
key.as_concrete_TypeRef(),
|
||||
application_id.as_concrete_TypeRef(),
|
||||
)
|
||||
};
|
||||
|
||||
if value_ref.is_null() {
|
||||
tracing::debug!(
|
||||
"Managed preferences for {MANAGED_PREFERENCES_APPLICATION_ID} key {key_name} not found",
|
||||
"Managed preferences for {} key {} not found",
|
||||
MANAGED_PREFERENCES_APPLICATION_ID,
|
||||
MANAGED_PREFERENCES_CONFIG_KEY
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) }.to_string();
|
||||
Ok(Some(value))
|
||||
let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) };
|
||||
let contents = value.to_string();
|
||||
let trimmed = contents.trim();
|
||||
|
||||
parse_managed_preferences_base64(trimmed).map(Some)
|
||||
}
|
||||
|
||||
fn parse_managed_config_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
match toml::from_str::<TomlValue>(&decode_managed_preferences_base64(encoded)?) {
|
||||
fn parse_managed_preferences_base64(encoded: &str) -> io::Result<TomlValue> {
|
||||
let decoded = BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed preferences as base64: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
let decoded_str = String::from_utf8(decoded).map_err(|err| {
|
||||
tracing::error!("Managed preferences base64 contents were not valid UTF-8: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?;
|
||||
|
||||
match toml::from_str::<TomlValue>(&decoded_str) {
|
||||
Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)),
|
||||
Ok(other) => {
|
||||
tracing::error!("Managed config TOML must have a table at the root, found {other:?}",);
|
||||
tracing::error!(
|
||||
"Managed preferences TOML must have a table at the root, found {other:?}",
|
||||
);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"managed config root must be a table",
|
||||
"managed preferences root must be a table",
|
||||
))
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to parse managed config TOML: {err}");
|
||||
tracing::error!("Failed to parse managed preferences TOML: {err}");
|
||||
Err(io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_managed_requirements_base64(encoded: &str) -> io::Result<ConfigRequirementsToml> {
|
||||
toml::from_str::<ConfigRequirementsToml>(&decode_managed_preferences_base64(encoded)?).map_err(
|
||||
|err| {
|
||||
tracing::error!("Failed to parse managed requirements TOML: {err}");
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn decode_managed_preferences_base64(encoded: &str) -> io::Result<String> {
|
||||
String::from_utf8(BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| {
|
||||
tracing::error!("Failed to decode managed value as base64: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})?)
|
||||
.map_err(|err| {
|
||||
tracing::error!("Managed value base64 contents were not valid UTF-8: {err}",);
|
||||
io::Error::new(io::ErrorKind::InvalidData, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,14 +11,11 @@ mod state;
|
||||
mod tests;
|
||||
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config_loader::config_requirements::ConfigRequirementsToml;
|
||||
use crate::config_loader::layer_io::LoadedConfigLayers;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use serde::Deserialize;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
@@ -28,19 +25,11 @@ pub use config_requirements::ConfigRequirements;
|
||||
pub use merge::merge_toml_values;
|
||||
pub use state::ConfigLayerEntry;
|
||||
pub use state::ConfigLayerStack;
|
||||
pub use state::ConfigLayerStackOrdering;
|
||||
pub use state::LoaderOverrides;
|
||||
|
||||
/// On Unix systems, load requirements from this file path, if present.
|
||||
const DEFAULT_REQUIREMENTS_TOML_FILE_UNIX: &str = "/etc/codex/requirements.toml";
|
||||
|
||||
/// On Unix systems, load default settings from this file path, if present.
|
||||
/// Note that /etc/codex/ is treated as a "config folder," so subfolders such
|
||||
/// as skills/ and rules/ will also be honored.
|
||||
pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml";
|
||||
|
||||
const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
|
||||
|
||||
/// To build up the set of admin-enforced constraints, we build up from multiple
|
||||
/// configuration layers in the following order, but a constraint defined in an
|
||||
/// earlier layer cannot be overridden by a later layer:
|
||||
@@ -65,27 +54,15 @@ const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
|
||||
/// (*) Only available on macOS via managed device profiles.
|
||||
///
|
||||
/// See https://developers.openai.com/codex/security for details.
|
||||
///
|
||||
/// When loading the config stack for a thread, there should be a `cwd`
|
||||
/// associated with it such that `cwd` should be `Some(...)`. Only for
|
||||
/// thread-agnostic config loading (e.g., for the app server's `/config`
|
||||
/// endpoint) should `cwd` be `None`.
|
||||
pub async fn load_config_layers_state(
|
||||
codex_home: &Path,
|
||||
cwd: Option<AbsolutePathBuf>,
|
||||
cli_overrides: &[(String, TomlValue)],
|
||||
overrides: LoaderOverrides,
|
||||
) -> io::Result<ConfigLayerStack> {
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
macos::load_managed_admin_requirements_toml(
|
||||
&mut config_requirements_toml,
|
||||
overrides
|
||||
.macos_managed_config_requirements_base64
|
||||
.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
// TODO(mbolin): Support an entry in MDM for config requirements and use it
|
||||
// with `config_requirements_toml.merge_unset_fields(...)`, if present.
|
||||
|
||||
// Honor /etc/codex/requirements.toml.
|
||||
if cfg!(unix) {
|
||||
@@ -107,58 +84,44 @@ pub async fn load_config_layers_state(
|
||||
|
||||
let mut layers = Vec::<ConfigLayerEntry>::new();
|
||||
|
||||
// Include an entry for the "system" config folder, loading its config.toml,
|
||||
// if it exists.
|
||||
let system_config_toml_file = if cfg!(unix) {
|
||||
Some(AbsolutePathBuf::from_absolute_path(
|
||||
SYSTEM_CONFIG_TOML_FILE_UNIX,
|
||||
)?)
|
||||
} else {
|
||||
// TODO(gt): Determine the path to load on Windows.
|
||||
None
|
||||
};
|
||||
if let Some(system_config_toml_file) = system_config_toml_file {
|
||||
let system_layer =
|
||||
load_config_toml_for_required_layer(&system_config_toml_file, |config_toml| {
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::System {
|
||||
file: system_config_toml_file.clone(),
|
||||
},
|
||||
config_toml,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
layers.push(system_layer);
|
||||
}
|
||||
// TODO(mbolin): Honor managed preferences (macOS only).
|
||||
// TODO(mbolin): Honor /etc/codex/config.toml.
|
||||
|
||||
// Add a layer for $CODEX_HOME/config.toml if it exists. Note if the file
|
||||
// exists, but is malformed, then this error should be propagated to the
|
||||
// user.
|
||||
let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home)?;
|
||||
let user_layer = load_config_toml_for_required_layer(&user_file, |config_toml| {
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User {
|
||||
file: user_file.clone(),
|
||||
},
|
||||
config_toml,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
layers.push(user_layer);
|
||||
|
||||
if let Some(cwd) = cwd {
|
||||
let mut merged_so_far = TomlValue::Table(toml::map::Map::new());
|
||||
for layer in &layers {
|
||||
merge_toml_values(&mut merged_so_far, &layer.config);
|
||||
match tokio::fs::read_to_string(&user_file).await {
|
||||
Ok(contents) => {
|
||||
let user_config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing user config file {}: {e}",
|
||||
user_file.as_path().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User { file: user_file },
|
||||
user_config,
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() != io::ErrorKind::NotFound {
|
||||
return Err(io::Error::new(
|
||||
e.kind(),
|
||||
format!(
|
||||
"Failed to read user config file {}: {e}",
|
||||
user_file.as_path().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
let project_root_markers = project_root_markers_from_config(&merged_so_far)?
|
||||
.unwrap_or_else(default_project_root_markers);
|
||||
|
||||
let project_root = find_project_root(&cwd, &project_root_markers).await?;
|
||||
let project_layers = load_project_layers(&cwd, &project_root).await?;
|
||||
layers.extend(project_layers);
|
||||
}
|
||||
|
||||
// TODO(mbolin): Add layers for cwd, tree, and repo config files.
|
||||
|
||||
// Add a layer for runtime overrides from the CLI or UI, if any exist.
|
||||
if !cli_overrides.is_empty() {
|
||||
let cli_overrides_layer = overrides::build_cli_overrides_layer(cli_overrides);
|
||||
@@ -178,20 +141,11 @@ pub async fn load_config_layers_state(
|
||||
managed_config_from_mdm,
|
||||
} = loaded_config_layers;
|
||||
if let Some(config) = managed_config {
|
||||
let managed_parent = config.file.as_path().parent().ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Managed config file {} has no parent directory",
|
||||
config.file.as_path().display()
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let managed_config =
|
||||
resolve_relative_paths_in_config_toml(config.managed_config, managed_parent)?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: config.file },
|
||||
managed_config,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile {
|
||||
file: config.file.clone(),
|
||||
},
|
||||
config.managed_config,
|
||||
));
|
||||
}
|
||||
if let Some(config) = managed_config_from_mdm {
|
||||
@@ -204,52 +158,6 @@ pub async fn load_config_layers_state(
|
||||
ConfigLayerStack::new(layers, config_requirements_toml.try_into()?)
|
||||
}
|
||||
|
||||
/// Attempts to load a config.toml file from `config_toml`.
|
||||
/// - If the file exists and is valid TOML, passes the parsed `toml::Value` to
|
||||
/// `create_entry` and returns the resulting layer entry.
|
||||
/// - If the file does not exist, uses an empty `Table` with `create_entry` and
|
||||
/// returns the resulting layer entry.
|
||||
/// - If there is an error reading the file or parsing the TOML, returns an
|
||||
/// error.
|
||||
async fn load_config_toml_for_required_layer(
|
||||
config_toml: impl AsRef<Path>,
|
||||
create_entry: impl FnOnce(TomlValue) -> ConfigLayerEntry,
|
||||
) -> io::Result<ConfigLayerEntry> {
|
||||
let toml_file = config_toml.as_ref();
|
||||
let toml_value = match tokio::fs::read_to_string(toml_file).await {
|
||||
Ok(contents) => {
|
||||
let config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Error parsing config file {}: {e}", toml_file.display()),
|
||||
)
|
||||
})?;
|
||||
let config_parent = toml_file.parent().ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Config file {} has no parent directory",
|
||||
toml_file.display()
|
||||
),
|
||||
)
|
||||
})?;
|
||||
resolve_relative_paths_in_config_toml(config, config_parent)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Ok(TomlValue::Table(toml::map::Map::new()))
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
e.kind(),
|
||||
format!("Failed to read config file {}: {e}", toml_file.display()),
|
||||
))
|
||||
}
|
||||
}
|
||||
}?;
|
||||
|
||||
Ok(create_entry(toml_value))
|
||||
}
|
||||
|
||||
/// If available, apply requirements from `/etc/codex/requirements.toml` to
|
||||
/// `config_requirements_toml` by filling in any unset fields.
|
||||
async fn load_requirements_toml(
|
||||
@@ -319,217 +227,6 @@ async fn load_requirements_from_legacy_scheme(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads `project_root_markers` from the [toml::Value] produced by merging
|
||||
/// `config.toml` from the config layers in the stack preceding
|
||||
/// [ConfigLayerSource::Project].
|
||||
///
|
||||
/// Invariants:
|
||||
/// - If `project_root_markers` is not specified, returns `Ok(None)`.
|
||||
/// - If `project_root_markers` is specified, returns `Ok(Some(markers))` where
|
||||
/// `markers` is a `Vec<String>` (including `Ok(Some(Vec::new()))` for an
|
||||
/// empty array, which indicates that root detection should be disabled).
|
||||
/// - Returns an error if `project_root_markers` is specified but is not an
|
||||
/// array of strings.
|
||||
fn project_root_markers_from_config(config: &TomlValue) -> io::Result<Option<Vec<String>>> {
|
||||
let Some(table) = config.as_table() else {
|
||||
return Ok(None);
|
||||
};
|
||||
let Some(markers_value) = table.get("project_root_markers") else {
|
||||
return Ok(None);
|
||||
};
|
||||
let TomlValue::Array(entries) = markers_value else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"project_root_markers must be an array of strings",
|
||||
));
|
||||
};
|
||||
if entries.is_empty() {
|
||||
return Ok(Some(Vec::new()));
|
||||
}
|
||||
let mut markers = Vec::new();
|
||||
for entry in entries {
|
||||
let Some(marker) = entry.as_str() else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"project_root_markers must be an array of strings",
|
||||
));
|
||||
};
|
||||
markers.push(marker.to_string());
|
||||
}
|
||||
Ok(Some(markers))
|
||||
}
|
||||
|
||||
fn default_project_root_markers() -> Vec<String> {
|
||||
DEFAULT_PROJECT_ROOT_MARKERS
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Takes a `toml::Value` parsed from a config.toml file and walks through it,
|
||||
/// resolving any `AbsolutePathBuf` fields against `base_dir`, returning a new
|
||||
/// `toml::Value` with the same shape but with paths resolved.
|
||||
///
|
||||
/// This ensures that multiple config layers can be merged together correctly
|
||||
/// even if they were loaded from different directories.
|
||||
fn resolve_relative_paths_in_config_toml(
|
||||
value_from_config_toml: TomlValue,
|
||||
base_dir: &Path,
|
||||
) -> io::Result<TomlValue> {
|
||||
// Use the serialize/deserialize round-trip to convert the
|
||||
// `toml::Value` into a `ConfigToml` with `AbsolutePath
|
||||
let _guard = AbsolutePathBufGuard::new(base_dir);
|
||||
let Ok(resolved) = value_from_config_toml.clone().try_into::<ConfigToml>() else {
|
||||
return Ok(value_from_config_toml);
|
||||
};
|
||||
drop(_guard);
|
||||
|
||||
let resolved_value = TomlValue::try_from(resolved).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Failed to serialize resolved config: {e}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(copy_shape_from_original(
|
||||
&value_from_config_toml,
|
||||
&resolved_value,
|
||||
))
|
||||
}
|
||||
|
||||
/// Ensure that every field in `original` is present in the returned
|
||||
/// `toml::Value`, taking the value from `resolved` where possible. This ensures
|
||||
/// the fields that we "removed" during the serialize/deserialize round-trip in
|
||||
/// `resolve_config_paths` are preserved, out of an abundance of caution.
|
||||
fn copy_shape_from_original(original: &TomlValue, resolved: &TomlValue) -> TomlValue {
|
||||
match (original, resolved) {
|
||||
(TomlValue::Table(original_table), TomlValue::Table(resolved_table)) => {
|
||||
let mut table = toml::map::Map::new();
|
||||
for (key, original_value) in original_table {
|
||||
let resolved_value = resolved_table.get(key).unwrap_or(original_value);
|
||||
table.insert(
|
||||
key.clone(),
|
||||
copy_shape_from_original(original_value, resolved_value),
|
||||
);
|
||||
}
|
||||
TomlValue::Table(table)
|
||||
}
|
||||
(TomlValue::Array(original_array), TomlValue::Array(resolved_array)) => {
|
||||
let mut items = Vec::new();
|
||||
for (index, original_value) in original_array.iter().enumerate() {
|
||||
let resolved_value = resolved_array.get(index).unwrap_or(original_value);
|
||||
items.push(copy_shape_from_original(original_value, resolved_value));
|
||||
}
|
||||
TomlValue::Array(items)
|
||||
}
|
||||
(_, resolved_value) => resolved_value.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_project_root(
|
||||
cwd: &AbsolutePathBuf,
|
||||
project_root_markers: &[String],
|
||||
) -> io::Result<AbsolutePathBuf> {
|
||||
if project_root_markers.is_empty() {
|
||||
return Ok(cwd.clone());
|
||||
}
|
||||
|
||||
for ancestor in cwd.as_path().ancestors() {
|
||||
for marker in project_root_markers {
|
||||
let marker_path = ancestor.join(marker);
|
||||
if tokio::fs::metadata(&marker_path).await.is_ok() {
|
||||
return AbsolutePathBuf::from_absolute_path(ancestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(cwd.clone())
|
||||
}
|
||||
|
||||
/// Return the appropriate list of layers (each with
|
||||
/// [ConfigLayerSource::Project] as the source) between `cwd` and
|
||||
/// `project_root`, inclusive. The list is ordered in _increasing_ precdence,
|
||||
/// starting from folders closest to `project_root` (which is the lowest
|
||||
/// precedence) to those closest to `cwd` (which is the highest precedence).
|
||||
async fn load_project_layers(
|
||||
cwd: &AbsolutePathBuf,
|
||||
project_root: &AbsolutePathBuf,
|
||||
) -> io::Result<Vec<ConfigLayerEntry>> {
|
||||
let mut dirs = cwd
|
||||
.as_path()
|
||||
.ancestors()
|
||||
.scan(false, |done, a| {
|
||||
if *done {
|
||||
None
|
||||
} else {
|
||||
if a == project_root.as_path() {
|
||||
*done = true;
|
||||
}
|
||||
Some(a)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
dirs.reverse();
|
||||
|
||||
let mut layers = Vec::new();
|
||||
for dir in dirs {
|
||||
let dot_codex = dir.join(".codex");
|
||||
if !tokio::fs::metadata(&dot_codex)
|
||||
.await
|
||||
.map(|meta| meta.is_dir())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let dot_codex_abs = AbsolutePathBuf::from_absolute_path(&dot_codex)?;
|
||||
let config_file = dot_codex_abs.join(CONFIG_TOML_FILE)?;
|
||||
match tokio::fs::read_to_string(&config_file).await {
|
||||
Ok(contents) => {
|
||||
let config: TomlValue = toml::from_str(&contents).map_err(|e| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Error parsing project config file {}: {e}",
|
||||
config_file.as_path().display(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let config =
|
||||
resolve_relative_paths_in_config_toml(config, dot_codex_abs.as_path())?;
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: dot_codex_abs,
|
||||
},
|
||||
config,
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
if err.kind() == io::ErrorKind::NotFound {
|
||||
// If there is no config.toml file, record an empty entry
|
||||
// for this project layer, as this may still have subfolders
|
||||
// that are significant in the overall ConfigLayerStack.
|
||||
layers.push(ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: dot_codex_abs,
|
||||
},
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
));
|
||||
} else {
|
||||
return Err(io::Error::new(
|
||||
err.kind(),
|
||||
format!(
|
||||
"Failed to read project config file {}: {err}",
|
||||
config_file.as_path().display(),
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(layers)
|
||||
}
|
||||
|
||||
/// The legacy mechanism for specifying admin-enforced configuration is to read
|
||||
/// from a file like `/etc/codex/managed_config.toml` that has the same
|
||||
/// structure as `config.toml` where fields like `approval_policy` can specify
|
||||
@@ -541,67 +238,17 @@ async fn load_project_layers(
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
struct LegacyManagedConfigToml {
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_mode: Option<SandboxMode>,
|
||||
}
|
||||
|
||||
impl From<LegacyManagedConfigToml> for ConfigRequirementsToml {
|
||||
fn from(legacy: LegacyManagedConfigToml) -> Self {
|
||||
let mut config_requirements_toml = ConfigRequirementsToml::default();
|
||||
|
||||
let LegacyManagedConfigToml {
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
} = legacy;
|
||||
let LegacyManagedConfigToml { approval_policy } = legacy;
|
||||
if let Some(approval_policy) = approval_policy {
|
||||
config_requirements_toml.allowed_approval_policies = Some(vec![approval_policy]);
|
||||
}
|
||||
if let Some(sandbox_mode) = sandbox_mode {
|
||||
config_requirements_toml.allowed_sandbox_modes = Some(vec![sandbox_mode.into()]);
|
||||
}
|
||||
|
||||
config_requirements_toml
|
||||
}
|
||||
}
|
||||
|
||||
// Cannot name this `mod tests` because of tests.rs in this folder.
|
||||
#[cfg(test)]
|
||||
mod unit_tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn ensure_resolve_relative_paths_in_config_toml_preserves_all_fields() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let base_dir = tmp.path();
|
||||
let contents = r#"
|
||||
# This is a field recognized by config.toml that is an AbsolutePathBuf in
|
||||
# the ConfigToml struct.
|
||||
experimental_instructions_file = "./some_file.md"
|
||||
|
||||
# This is a field recognized by config.toml.
|
||||
model = "gpt-1000"
|
||||
|
||||
# This is a field not recognized by config.toml.
|
||||
foo = "xyzzy"
|
||||
"#;
|
||||
let user_config: TomlValue = toml::from_str(contents)?;
|
||||
|
||||
let normalized_toml_value = resolve_relative_paths_in_config_toml(user_config, base_dir)?;
|
||||
let mut expected_toml_value = toml::map::Map::new();
|
||||
expected_toml_value.insert(
|
||||
"experimental_instructions_file".to_string(),
|
||||
TomlValue::String(
|
||||
AbsolutePathBuf::resolve_path_against_base("./some_file.md", base_dir)?
|
||||
.as_path()
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
),
|
||||
);
|
||||
expected_toml_value.insert(
|
||||
"model".to_string(),
|
||||
TomlValue::String("gpt-1000".to_string()),
|
||||
);
|
||||
expected_toml_value.insert("foo".to_string(), TomlValue::String("xyzzy".to_string()));
|
||||
assert_eq!(normalized_toml_value, TomlValue::Table(expected_toml_value));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,17 +12,14 @@ use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
/// LoaderOverrides overrides managed configuration inputs (primarily for tests).
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct LoaderOverrides {
|
||||
pub managed_config_path: Option<PathBuf>,
|
||||
//TODO(gt): Add a macos_ prefix to this field and remove the target_os check.
|
||||
#[cfg(target_os = "macos")]
|
||||
pub managed_preferences_base64: Option<String>,
|
||||
pub macos_managed_config_requirements_base64: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConfigLayerEntry {
|
||||
pub name: ConfigLayerSource,
|
||||
pub config: TomlValue,
|
||||
@@ -53,28 +50,9 @@ impl ConfigLayerEntry {
|
||||
config: serde_json::to_value(&self.config).unwrap_or(JsonValue::Null),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the `.codex/` folder associated with this config layer, if any.
|
||||
pub fn config_folder(&self) -> Option<AbsolutePathBuf> {
|
||||
match &self.name {
|
||||
ConfigLayerSource::Mdm { .. } => None,
|
||||
ConfigLayerSource::System { file } => file.parent(),
|
||||
ConfigLayerSource::User { file } => file.parent(),
|
||||
ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder.clone()),
|
||||
ConfigLayerSource::SessionFlags => None,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => None,
|
||||
ConfigLayerSource::LegacyManagedConfigTomlFromMdm => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConfigLayerStackOrdering {
|
||||
LowestPrecedenceFirst,
|
||||
HighestPrecedenceFirst,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConfigLayerStack {
|
||||
/// Layers are listed from lowest precedence (base) to highest (top), so
|
||||
/// later entries in the Vec override earlier ones.
|
||||
@@ -178,16 +156,7 @@ impl ConfigLayerStack {
|
||||
/// Returns the highest-precedence to lowest-precedence layers, so
|
||||
/// `ConfigLayerSource::SessionFlags` would be first, if present.
|
||||
pub fn layers_high_to_low(&self) -> Vec<&ConfigLayerEntry> {
|
||||
self.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst)
|
||||
}
|
||||
|
||||
/// Returns the highest-precedence to lowest-precedence layers, so
|
||||
/// `ConfigLayerSource::SessionFlags` would be first, if present.
|
||||
pub fn get_layers(&self, ordering: ConfigLayerStackOrdering) -> Vec<&ConfigLayerEntry> {
|
||||
match ordering {
|
||||
ConfigLayerStackOrdering::HighestPrecedenceFirst => self.layers.iter().rev().collect(),
|
||||
ConfigLayerStackOrdering::LowestPrecedenceFirst => self.layers.iter().collect(),
|
||||
}
|
||||
self.layers.iter().rev().collect()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,12 +170,7 @@ fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result<Option<
|
||||
));
|
||||
}
|
||||
|
||||
// The previous check ensured `layers` is sorted by precedence, so now we
|
||||
// further verify that:
|
||||
// 1. There is at most one user config layer.
|
||||
// 2. Project layers are ordered from root to cwd.
|
||||
let mut user_layer_index: Option<usize> = None;
|
||||
let mut previous_project_dot_codex_folder: Option<&AbsolutePathBuf> = None;
|
||||
for (index, layer) in layers.iter().enumerate() {
|
||||
if matches!(layer.name, ConfigLayerSource::User { .. }) {
|
||||
if user_layer_index.is_some() {
|
||||
@@ -217,32 +181,6 @@ fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result<Option<
|
||||
}
|
||||
user_layer_index = Some(index);
|
||||
}
|
||||
|
||||
if let ConfigLayerSource::Project {
|
||||
dot_codex_folder: current_project_dot_codex_folder,
|
||||
} = &layer.name
|
||||
{
|
||||
if let Some(previous) = previous_project_dot_codex_folder {
|
||||
let Some(parent) = previous.as_path().parent() else {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"project layer has no parent directory",
|
||||
));
|
||||
};
|
||||
if previous == current_project_dot_codex_folder
|
||||
|| !current_project_dot_codex_folder
|
||||
.as_path()
|
||||
.ancestors()
|
||||
.any(|ancestor| ancestor == parent)
|
||||
{
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"project layers are not ordered from root to cwd",
|
||||
));
|
||||
}
|
||||
}
|
||||
previous_project_dot_codex_folder = Some(current_project_dot_codex_folder);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(user_layer_index)
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
use super::LoaderOverrides;
|
||||
use super::load_config_layers_state;
|
||||
use crate::config::CONFIG_TOML_FILE;
|
||||
use crate::config::ConfigBuilder;
|
||||
use crate::config::ConfigOverrides;
|
||||
use crate::config_loader::ConfigLayerEntry;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::config_requirements::ConfigRequirementsToml;
|
||||
use crate::config_loader::fingerprint::version_for_toml;
|
||||
use crate::config_loader::load_requirements_toml;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
#[cfg(target_os = "macos")]
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
use toml::Value as TomlValue;
|
||||
@@ -45,18 +38,11 @@ extra = true
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
overrides,
|
||||
)
|
||||
.await
|
||||
.expect("load config");
|
||||
let state = load_config_layers_state(tmp.path(), &[] as &[(String, TomlValue)], overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let loaded = state.effective_config();
|
||||
let table = loaded.as_table().expect("top-level table expected");
|
||||
|
||||
@@ -76,41 +62,18 @@ extra = true
|
||||
async fn returns_empty_when_all_layers_missing() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
#[cfg(target_os = "macos")]
|
||||
managed_preferences_base64: None,
|
||||
macos_managed_config_requirements_base64: None,
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
let layers = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
overrides,
|
||||
)
|
||||
.await
|
||||
.expect("load layers");
|
||||
let user_layer = layers
|
||||
.get_user_layer()
|
||||
.expect("expected a user layer even when CODEX_HOME/config.toml does not exist");
|
||||
assert_eq!(
|
||||
&ConfigLayerEntry {
|
||||
name: super::ConfigLayerSource::User {
|
||||
file: AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, tmp.path())
|
||||
.expect("resolve user config.toml path")
|
||||
},
|
||||
config: TomlValue::Table(toml::map::Map::new()),
|
||||
version: version_for_toml(&TomlValue::Table(toml::map::Map::new())),
|
||||
},
|
||||
user_layer,
|
||||
);
|
||||
assert_eq!(
|
||||
user_layer.config,
|
||||
TomlValue::Table(toml::map::Map::new()),
|
||||
"expected empty config for user layer when config.toml does not exist"
|
||||
let layers = load_config_layers_state(tmp.path(), &[] as &[(String, TomlValue)], overrides)
|
||||
.await
|
||||
.expect("load layers");
|
||||
assert!(
|
||||
layers.get_user_layer().is_none(),
|
||||
"no user layer when CODEX_HOME/config.toml does not exist"
|
||||
);
|
||||
|
||||
let binding = layers.effective_config();
|
||||
@@ -124,10 +87,9 @@ async fn returns_empty_when_all_layers_missing() {
|
||||
.iter()
|
||||
.filter(|layer| matches!(layer.name, super::ConfigLayerSource::System { .. }))
|
||||
.count();
|
||||
let expected_system_layers = if cfg!(unix) { 1 } else { 0 };
|
||||
assert_eq!(
|
||||
num_system_layers, expected_system_layers,
|
||||
"system layer should be present only on unix"
|
||||
num_system_layers, 0,
|
||||
"managed config layer should be absent when file missing"
|
||||
);
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
@@ -146,6 +108,12 @@ async fn returns_empty_when_all_layers_missing() {
|
||||
async fn managed_preferences_take_highest_precedence() {
|
||||
use base64::Engine;
|
||||
|
||||
let managed_payload = r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#;
|
||||
let encoded = base64::prelude::BASE64_STANDARD.encode(managed_payload.as_bytes());
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
@@ -167,28 +135,12 @@ flag = true
|
||||
|
||||
let overrides = LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
[nested]
|
||||
value = "managed"
|
||||
flag = false
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
macos_managed_config_requirements_base64: None,
|
||||
managed_preferences_base64: Some(encoded),
|
||||
};
|
||||
|
||||
let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd");
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
overrides,
|
||||
)
|
||||
.await
|
||||
.expect("load config");
|
||||
let state = load_config_layers_state(tmp.path(), &[] as &[(String, TomlValue)], overrides)
|
||||
.await
|
||||
.expect("load config");
|
||||
let loaded = state.effective_config();
|
||||
let nested = loaded
|
||||
.get("nested")
|
||||
@@ -201,108 +153,6 @@ flag = false
|
||||
assert_eq!(nested.get("flag"), Some(&TomlValue::Boolean(false)));
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_requirements_are_applied() -> anyhow::Result<()> {
|
||||
use base64::Engine;
|
||||
|
||||
let tmp = tempdir()?;
|
||||
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(AbsolutePathBuf::try_from(tmp.path())?),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(tmp.path().join("managed_config.toml")),
|
||||
managed_preferences_base64: Some(String::new()),
|
||||
macos_managed_config_requirements_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
allowed_approval_policies = ["never"]
|
||||
allowed_sandbox_modes = ["read-only"]
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
state.requirements().approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
);
|
||||
assert_eq!(
|
||||
*state.requirements().sandbox_policy.get(),
|
||||
SandboxPolicy::ReadOnly
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.sandbox_policy
|
||||
.can_set(&SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: Vec::new(),
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
})
|
||||
.is_err()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[tokio::test]
|
||||
async fn managed_preferences_requirements_take_precedence() -> anyhow::Result<()> {
|
||||
use base64::Engine;
|
||||
|
||||
let tmp = tempdir()?;
|
||||
let managed_path = tmp.path().join("managed_config.toml");
|
||||
|
||||
tokio::fs::write(&managed_path, "approval_policy = \"on-request\"\n").await?;
|
||||
|
||||
let state = load_config_layers_state(
|
||||
tmp.path(),
|
||||
Some(AbsolutePathBuf::try_from(tmp.path())?),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides {
|
||||
managed_config_path: Some(managed_path),
|
||||
managed_preferences_base64: Some(String::new()),
|
||||
macos_managed_config_requirements_base64: Some(
|
||||
base64::prelude::BASE64_STANDARD.encode(
|
||||
r#"
|
||||
allowed_approval_policies = ["never"]
|
||||
"#
|
||||
.as_bytes(),
|
||||
),
|
||||
),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
state.requirements().approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
);
|
||||
assert!(
|
||||
state
|
||||
.requirements()
|
||||
.approval_policy
|
||||
.can_set(&AskForApproval::OnRequest)
|
||||
.is_err()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn load_requirements_toml_produces_expected_constraints() -> anyhow::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
@@ -326,7 +176,7 @@ allowed_approval_policies = ["never", "on-request"]
|
||||
let config_requirements: ConfigRequirements = config_requirements_toml.try_into()?;
|
||||
assert_eq!(
|
||||
config_requirements.approval_policy.value(),
|
||||
AskForApproval::Never
|
||||
AskForApproval::OnRequest
|
||||
);
|
||||
config_requirements
|
||||
.approval_policy
|
||||
@@ -339,209 +189,3 @@ allowed_approval_policies = ["never", "on-request"]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"root\"\n",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"child\"\n",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter_map(|layer| match &layer.name {
|
||||
super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(project_layers.len(), 2);
|
||||
assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path());
|
||||
assert_eq!(
|
||||
project_layers[1].as_path(),
|
||||
project_root.join(".codex").as_path()
|
||||
);
|
||||
|
||||
let config = layers.effective_config();
|
||||
let foo = config
|
||||
.get("foo")
|
||||
.and_then(TomlValue::as_str)
|
||||
.expect("foo entry");
|
||||
assert_eq!(foo, "child");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_paths_resolve_relative_to_dot_codex_and_override_in_order() -> std::io::Result<()>
|
||||
{
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
let root_cfg = r#"
|
||||
experimental_instructions_file = "root.txt"
|
||||
"#;
|
||||
let nested_cfg = r#"
|
||||
experimental_instructions_file = "child.txt"
|
||||
"#;
|
||||
tokio::fs::write(project_root.join(".codex").join(CONFIG_TOML_FILE), root_cfg).await?;
|
||||
tokio::fs::write(nested.join(".codex").join(CONFIG_TOML_FILE), nested_cfg).await?;
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join("root.txt"),
|
||||
"root instructions",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join("child.txt"),
|
||||
"child instructions",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
|
||||
let config = ConfigBuilder::default()
|
||||
.codex_home(codex_home)
|
||||
.harness_overrides(ConfigOverrides {
|
||||
cwd: Some(nested.clone()),
|
||||
..ConfigOverrides::default()
|
||||
})
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
config.base_instructions.as_deref(),
|
||||
Some("child instructions")
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(&nested).await?;
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter(|layer| matches!(layer.name, super::ConfigLayerSource::Project { .. }))
|
||||
.collect();
|
||||
assert_eq!(
|
||||
vec![&ConfigLayerEntry {
|
||||
name: super::ConfigLayerSource::Project {
|
||||
dot_codex_folder: AbsolutePathBuf::from_absolute_path(project_root.join(".codex"))?,
|
||||
},
|
||||
config: TomlValue::Table(toml::map::Map::new()),
|
||||
version: version_for_toml(&TomlValue::Table(toml::map::Map::new())),
|
||||
}],
|
||||
project_layers
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()> {
|
||||
let tmp = tempdir()?;
|
||||
let project_root = tmp.path().join("project");
|
||||
let nested = project_root.join("child");
|
||||
tokio::fs::create_dir_all(project_root.join(".codex")).await?;
|
||||
tokio::fs::create_dir_all(nested.join(".codex")).await?;
|
||||
tokio::fs::write(project_root.join(".hg"), "hg").await?;
|
||||
tokio::fs::write(
|
||||
project_root.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"root\"\n",
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(
|
||||
nested.join(".codex").join(CONFIG_TOML_FILE),
|
||||
"foo = \"child\"\n",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let codex_home = tmp.path().join("home");
|
||||
tokio::fs::create_dir_all(&codex_home).await?;
|
||||
tokio::fs::write(
|
||||
codex_home.join(CONFIG_TOML_FILE),
|
||||
r#"
|
||||
project_root_markers = [".hg"]
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cwd = AbsolutePathBuf::from_absolute_path(&nested)?;
|
||||
let layers = load_config_layers_state(
|
||||
&codex_home,
|
||||
Some(cwd),
|
||||
&[] as &[(String, TomlValue)],
|
||||
LoaderOverrides::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_layers: Vec<_> = layers
|
||||
.layers_high_to_low()
|
||||
.into_iter()
|
||||
.filter_map(|layer| match &layer.name {
|
||||
super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(project_layers.len(), 2);
|
||||
assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path());
|
||||
assert_eq!(
|
||||
project_layers[1].as_path(),
|
||||
project_root.join(".codex").as_path()
|
||||
);
|
||||
|
||||
let merged = layers.effective_config();
|
||||
let foo = merged
|
||||
.get("foo")
|
||||
.and_then(TomlValue::as_str)
|
||||
.expect("foo entry");
|
||||
assert_eq!(foo, "child");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -79,8 +79,8 @@ impl ContextManager {
|
||||
|
||||
// Estimate token usage using byte-based heuristics from the truncation helpers.
|
||||
// This is a coarse lower bound, not a tokenizer-accurate count.
|
||||
pub(crate) fn estimate_token_count(&self, turn_context: &TurnContext) -> Option<i64> {
|
||||
let model_family = turn_context.client.get_model_family();
|
||||
pub(crate) async fn estimate_token_count(&self, turn_context: &TurnContext) -> Option<i64> {
|
||||
let model_family = turn_context.client.get_model_family().await;
|
||||
let base_tokens =
|
||||
i64::try_from(approx_token_count(model_family.base_instructions.as_str()))
|
||||
.unwrap_or(i64::MAX);
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::codex_conversation::CodexConversation;
|
||||
use crate::config::Config;
|
||||
use crate::error::CodexErr;
|
||||
use crate::error::Result as CodexResult;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::openai_models::models_manager::ModelsManager;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::SessionConfiguredEvent;
|
||||
|
||||
@@ -90,6 +90,10 @@ pub enum CodexErr {
|
||||
#[error("spawn failed: child stdout/stderr not captured")]
|
||||
Spawn,
|
||||
|
||||
/// Returned when the models list is outdated and needs to be refreshed.
|
||||
#[error("remote models list is outdated")]
|
||||
OutdatedModels,
|
||||
|
||||
/// Returned by run_command_stream when the user pressed Ctrl‑C (SIGINT). Session uses this to
|
||||
/// surface a polite FunctionCallOutput back to the model instead of crashing the CLI.
|
||||
#[error("interrupted (Ctrl-C). Something went wrong? Hit `/feedback` to report the issue.")]
|
||||
|
||||
@@ -3,11 +3,7 @@ use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
|
||||
use crate::command_safety::is_dangerous_command::requires_initial_appoval;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigLayerStackOrdering;
|
||||
use codex_execpolicy::AmendError;
|
||||
use codex_execpolicy::Decision;
|
||||
use codex_execpolicy::Error as ExecPolicyRuleError;
|
||||
@@ -21,6 +17,7 @@ use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use thiserror::Error;
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::task::spawn_blocking;
|
||||
|
||||
use crate::bash::parse_shell_lc_plain_commands;
|
||||
@@ -83,141 +80,20 @@ pub enum ExecPolicyUpdateError {
|
||||
FeatureDisabled,
|
||||
}
|
||||
|
||||
pub(crate) struct ExecPolicyManager {
|
||||
policy: ArcSwap<Policy>,
|
||||
}
|
||||
|
||||
impl ExecPolicyManager {
|
||||
pub(crate) fn new(policy: Arc<Policy>) -> Self {
|
||||
Self {
|
||||
policy: ArcSwap::from(policy),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn load(
|
||||
features: &Features,
|
||||
config_stack: &ConfigLayerStack,
|
||||
) -> Result<Self, ExecPolicyError> {
|
||||
let policy = load_exec_policy_for_features(features, config_stack).await?;
|
||||
Ok(Self::new(Arc::new(policy)))
|
||||
}
|
||||
|
||||
pub(crate) fn current(&self) -> Arc<Policy> {
|
||||
self.policy.load_full()
|
||||
}
|
||||
|
||||
pub(crate) async fn create_exec_approval_requirement_for_command(
|
||||
&self,
|
||||
features: &Features,
|
||||
command: &[String],
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_permissions: SandboxPermissions,
|
||||
) -> ExecApprovalRequirement {
|
||||
let exec_policy = self.current();
|
||||
let commands =
|
||||
parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]);
|
||||
let heuristics_fallback = |cmd: &[String]| {
|
||||
if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) {
|
||||
Decision::Prompt
|
||||
} else {
|
||||
Decision::Allow
|
||||
}
|
||||
};
|
||||
let evaluation = exec_policy.check_multiple(commands.iter(), &heuristics_fallback);
|
||||
|
||||
match evaluation.decision {
|
||||
Decision::Forbidden => ExecApprovalRequirement::Forbidden {
|
||||
reason: FORBIDDEN_REASON.to_string(),
|
||||
},
|
||||
Decision::Prompt => {
|
||||
if matches!(approval_policy, AskForApproval::Never) {
|
||||
ExecApprovalRequirement::Forbidden {
|
||||
reason: PROMPT_CONFLICT_REASON.to_string(),
|
||||
}
|
||||
} else {
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: derive_prompt_reason(&evaluation),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_prompt_rules(
|
||||
&evaluation.matched_rules,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
Decision::Allow => ExecApprovalRequirement::Skip {
|
||||
// Bypass sandbox if execpolicy allows the command
|
||||
bypass_sandbox: evaluation.matched_rules.iter().any(|rule_match| {
|
||||
is_policy_match(rule_match) && rule_match.decision() == Decision::Allow
|
||||
}),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_allow_rules(&evaluation.matched_rules)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn append_amendment_and_update(
|
||||
&self,
|
||||
codex_home: &Path,
|
||||
amendment: &ExecPolicyAmendment,
|
||||
) -> Result<(), ExecPolicyUpdateError> {
|
||||
let policy_path = default_policy_path(codex_home);
|
||||
let prefix = amendment.command.clone();
|
||||
spawn_blocking({
|
||||
let policy_path = policy_path.clone();
|
||||
let prefix = prefix.clone();
|
||||
move || blocking_append_allow_prefix_rule(&policy_path, &prefix)
|
||||
})
|
||||
.await
|
||||
.map_err(|source| ExecPolicyUpdateError::JoinBlockingTask { source })?
|
||||
.map_err(|source| ExecPolicyUpdateError::AppendRule {
|
||||
path: policy_path,
|
||||
source,
|
||||
})?;
|
||||
|
||||
let mut updated_policy = self.current().as_ref().clone();
|
||||
updated_policy.add_prefix_rule(&prefix, Decision::Allow)?;
|
||||
self.policy.store(Arc::new(updated_policy));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ExecPolicyManager {
|
||||
fn default() -> Self {
|
||||
Self::new(Arc::new(Policy::empty()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_exec_policy_for_features(
|
||||
pub(crate) async fn load_exec_policy_for_features(
|
||||
features: &Features,
|
||||
config_stack: &ConfigLayerStack,
|
||||
codex_home: &Path,
|
||||
) -> Result<Policy, ExecPolicyError> {
|
||||
if !features.enabled(Feature::ExecPolicy) {
|
||||
Ok(Policy::empty())
|
||||
} else {
|
||||
load_exec_policy(config_stack).await
|
||||
load_exec_policy(codex_home).await
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result<Policy, ExecPolicyError> {
|
||||
// Iterate the layers in increasing order of precedence, adding the *.rules
|
||||
// from each layer, so that higher-precedence layers can override
|
||||
// rules defined in lower-precedence ones.
|
||||
let mut policy_paths = Vec::new();
|
||||
for layer in config_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst) {
|
||||
if let Some(config_folder) = layer.config_folder() {
|
||||
#[expect(clippy::expect_used)]
|
||||
let policy_dir = config_folder.join(RULES_DIR_NAME).expect("safe join");
|
||||
let layer_policy_paths = collect_policy_files(&policy_dir).await?;
|
||||
policy_paths.extend(layer_policy_paths);
|
||||
}
|
||||
}
|
||||
pub async fn load_exec_policy(codex_home: &Path) -> Result<Policy, ExecPolicyError> {
|
||||
let policy_dir = codex_home.join(RULES_DIR_NAME);
|
||||
let policy_paths = collect_policy_files(&policy_dir).await?;
|
||||
|
||||
let mut parser = PolicyParser::new();
|
||||
for policy_path in &policy_paths {
|
||||
@@ -238,15 +114,46 @@ pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result<Policy,
|
||||
}
|
||||
|
||||
let policy = parser.build();
|
||||
tracing::debug!("loaded execpolicy from {} files", policy_paths.len());
|
||||
tracing::debug!(
|
||||
"loaded execpolicy from {} files in {}",
|
||||
policy_paths.len(),
|
||||
policy_dir.display()
|
||||
);
|
||||
|
||||
Ok(policy)
|
||||
}
|
||||
|
||||
fn default_policy_path(codex_home: &Path) -> PathBuf {
|
||||
pub(crate) fn default_policy_path(codex_home: &Path) -> PathBuf {
|
||||
codex_home.join(RULES_DIR_NAME).join(DEFAULT_POLICY_FILE)
|
||||
}
|
||||
|
||||
pub(crate) async fn append_execpolicy_amendment_and_update(
|
||||
codex_home: &Path,
|
||||
current_policy: &Arc<RwLock<Policy>>,
|
||||
prefix: &[String],
|
||||
) -> Result<(), ExecPolicyUpdateError> {
|
||||
let policy_path = default_policy_path(codex_home);
|
||||
let prefix = prefix.to_vec();
|
||||
spawn_blocking({
|
||||
let policy_path = policy_path.clone();
|
||||
let prefix = prefix.clone();
|
||||
move || blocking_append_allow_prefix_rule(&policy_path, &prefix)
|
||||
})
|
||||
.await
|
||||
.map_err(|source| ExecPolicyUpdateError::JoinBlockingTask { source })?
|
||||
.map_err(|source| ExecPolicyUpdateError::AppendRule {
|
||||
path: policy_path,
|
||||
source,
|
||||
})?;
|
||||
|
||||
current_policy
|
||||
.write()
|
||||
.await
|
||||
.add_prefix_rule(&prefix, Decision::Allow)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Derive a proposed execpolicy amendment when a command requires user approval
|
||||
/// - If any execpolicy rule prompts, return None, because an amendment would not skip that policy requirement.
|
||||
/// - Otherwise return the first heuristics Prompt.
|
||||
@@ -310,8 +217,60 @@ fn derive_prompt_reason(evaluation: &Evaluation) -> Option<String> {
|
||||
})
|
||||
}
|
||||
|
||||
async fn collect_policy_files(dir: impl AsRef<Path>) -> Result<Vec<PathBuf>, ExecPolicyError> {
|
||||
let dir = dir.as_ref();
|
||||
pub(crate) async fn create_exec_approval_requirement_for_command(
|
||||
exec_policy: &Arc<RwLock<Policy>>,
|
||||
features: &Features,
|
||||
command: &[String],
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
sandbox_permissions: SandboxPermissions,
|
||||
) -> ExecApprovalRequirement {
|
||||
let commands = parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]);
|
||||
let heuristics_fallback = |cmd: &[String]| {
|
||||
if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) {
|
||||
Decision::Prompt
|
||||
} else {
|
||||
Decision::Allow
|
||||
}
|
||||
};
|
||||
let policy = exec_policy.read().await;
|
||||
let evaluation = policy.check_multiple(commands.iter(), &heuristics_fallback);
|
||||
|
||||
match evaluation.decision {
|
||||
Decision::Forbidden => ExecApprovalRequirement::Forbidden {
|
||||
reason: FORBIDDEN_REASON.to_string(),
|
||||
},
|
||||
Decision::Prompt => {
|
||||
if matches!(approval_policy, AskForApproval::Never) {
|
||||
ExecApprovalRequirement::Forbidden {
|
||||
reason: PROMPT_CONFLICT_REASON.to_string(),
|
||||
}
|
||||
} else {
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: derive_prompt_reason(&evaluation),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_prompt_rules(&evaluation.matched_rules)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
Decision::Allow => ExecApprovalRequirement::Skip {
|
||||
// Bypass sandbox if execpolicy allows the command
|
||||
bypass_sandbox: evaluation.matched_rules.iter().any(|rule_match| {
|
||||
is_policy_match(rule_match) && rule_match.decision() == Decision::Allow
|
||||
}),
|
||||
proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) {
|
||||
try_derive_execpolicy_amendment_for_allow_rules(&evaluation.matched_rules)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn collect_policy_files(dir: &Path) -> Result<Vec<PathBuf>, ExecPolicyError> {
|
||||
let mut read_dir = match fs::read_dir(dir).await {
|
||||
Ok(read_dir) => read_dir,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => return Ok(Vec::new()),
|
||||
@@ -354,54 +313,30 @@ async fn collect_policy_files(dir: impl AsRef<Path>) -> Result<Vec<PathBuf>, Exe
|
||||
|
||||
policy_paths.sort();
|
||||
|
||||
tracing::debug!(
|
||||
"loaded {} .rules files in {}",
|
||||
policy_paths.len(),
|
||||
dir.display()
|
||||
);
|
||||
Ok(policy_paths)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_loader::ConfigLayerEntry;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use codex_app_server_protocol::ConfigLayerSource;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tempfile::tempdir;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
fn config_stack_for_dot_codex_folder(dot_codex_folder: &Path) -> ConfigLayerStack {
|
||||
let dot_codex_folder = AbsolutePathBuf::from_absolute_path(dot_codex_folder)
|
||||
.expect("absolute dot_codex_folder");
|
||||
let layer = ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project { dot_codex_folder },
|
||||
TomlValue::Table(Default::default()),
|
||||
);
|
||||
ConfigLayerStack::new(vec![layer], ConfigRequirements::default()).expect("ConfigLayerStack")
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn returns_empty_policy_when_feature_disabled() {
|
||||
let mut features = Features::with_defaults();
|
||||
features.disable(Feature::ExecPolicy);
|
||||
let temp_dir = tempdir().expect("create temp dir");
|
||||
let config_stack = config_stack_for_dot_codex_folder(temp_dir.path());
|
||||
|
||||
let manager = ExecPolicyManager::load(&features, &config_stack)
|
||||
let policy = load_exec_policy_for_features(&features, temp_dir.path())
|
||||
.await
|
||||
.expect("manager result");
|
||||
let policy = manager.current();
|
||||
.expect("policy result");
|
||||
|
||||
let commands = [vec!["rm".to_string()]];
|
||||
assert_eq!(
|
||||
@@ -432,7 +367,6 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn loads_policies_from_policy_subdirectory() {
|
||||
let temp_dir = tempdir().expect("create temp dir");
|
||||
let config_stack = config_stack_for_dot_codex_folder(temp_dir.path());
|
||||
let policy_dir = temp_dir.path().join(RULES_DIR_NAME);
|
||||
fs::create_dir_all(&policy_dir).expect("create policy dir");
|
||||
fs::write(
|
||||
@@ -441,7 +375,7 @@ mod tests {
|
||||
)
|
||||
.expect("write policy file");
|
||||
|
||||
let policy = load_exec_policy(&config_stack)
|
||||
let policy = load_exec_policy(temp_dir.path())
|
||||
.await
|
||||
.expect("policy result");
|
||||
let command = [vec!["rm".to_string()]];
|
||||
@@ -460,14 +394,13 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn ignores_policies_outside_policy_dir() {
|
||||
let temp_dir = tempdir().expect("create temp dir");
|
||||
let config_stack = config_stack_for_dot_codex_folder(temp_dir.path());
|
||||
fs::write(
|
||||
temp_dir.path().join("root.rules"),
|
||||
r#"prefix_rule(pattern=["ls"], decision="prompt")"#,
|
||||
)
|
||||
.expect("write policy file");
|
||||
|
||||
let policy = load_exec_policy(&config_stack)
|
||||
let policy = load_exec_policy(temp_dir.path())
|
||||
.await
|
||||
.expect("policy result");
|
||||
let command = [vec!["ls".to_string()]];
|
||||
@@ -483,69 +416,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn loads_policies_from_multiple_config_layers() -> anyhow::Result<()> {
|
||||
let user_dir = tempdir()?;
|
||||
let project_dir = tempdir()?;
|
||||
|
||||
let user_policy_dir = user_dir.path().join(RULES_DIR_NAME);
|
||||
fs::create_dir_all(&user_policy_dir)?;
|
||||
fs::write(
|
||||
user_policy_dir.join("user.rules"),
|
||||
r#"prefix_rule(pattern=["rm"], decision="forbidden")"#,
|
||||
)?;
|
||||
|
||||
let project_policy_dir = project_dir.path().join(RULES_DIR_NAME);
|
||||
fs::create_dir_all(&project_policy_dir)?;
|
||||
fs::write(
|
||||
project_policy_dir.join("project.rules"),
|
||||
r#"prefix_rule(pattern=["ls"], decision="prompt")"#,
|
||||
)?;
|
||||
|
||||
let user_config_toml =
|
||||
AbsolutePathBuf::from_absolute_path(user_dir.path().join("config.toml"))?;
|
||||
let project_dot_codex_folder = AbsolutePathBuf::from_absolute_path(project_dir.path())?;
|
||||
let layers = vec![
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::User {
|
||||
file: user_config_toml,
|
||||
},
|
||||
TomlValue::Table(Default::default()),
|
||||
),
|
||||
ConfigLayerEntry::new(
|
||||
ConfigLayerSource::Project {
|
||||
dot_codex_folder: project_dot_codex_folder,
|
||||
},
|
||||
TomlValue::Table(Default::default()),
|
||||
),
|
||||
];
|
||||
let config_stack = ConfigLayerStack::new(layers, ConfigRequirements::default())?;
|
||||
|
||||
let policy = load_exec_policy(&config_stack).await?;
|
||||
|
||||
assert_eq!(
|
||||
Evaluation {
|
||||
decision: Decision::Forbidden,
|
||||
matched_rules: vec![RuleMatch::PrefixRuleMatch {
|
||||
matched_prefix: vec!["rm".to_string()],
|
||||
decision: Decision::Forbidden
|
||||
}],
|
||||
},
|
||||
policy.check_multiple([vec!["rm".to_string()]].iter(), &|_| Decision::Allow)
|
||||
);
|
||||
assert_eq!(
|
||||
Evaluation {
|
||||
decision: Decision::Prompt,
|
||||
matched_rules: vec![RuleMatch::PrefixRuleMatch {
|
||||
matched_prefix: vec!["ls".to_string()],
|
||||
decision: Decision::Prompt
|
||||
}],
|
||||
},
|
||||
policy.check_multiple([vec!["ls".to_string()]].iter(), &|_| Decision::Allow)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn evaluates_bash_lc_inner_commands() {
|
||||
let policy_src = r#"
|
||||
@@ -555,7 +425,7 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
|
||||
let forbidden_script = vec![
|
||||
"bash".to_string(),
|
||||
@@ -563,16 +433,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
"rm -rf /tmp".to_string(),
|
||||
];
|
||||
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&forbidden_script,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&forbidden_script,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -589,19 +458,18 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let command = vec!["rm".to_string()];
|
||||
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -619,19 +487,18 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let command = vec!["rm".to_string()];
|
||||
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::Never,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::Never,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -645,16 +512,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
async fn exec_approval_requirement_falls_back_to_heuristics() {
|
||||
let command = vec!["cargo".to_string(), "build".to_string()];
|
||||
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let empty_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&empty_policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -672,7 +539,7 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
"-lc".to_string(),
|
||||
@@ -680,15 +547,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
ExecPolicyManager::new(policy)
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: None,
|
||||
proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![
|
||||
@@ -701,16 +568,14 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
#[tokio::test]
|
||||
async fn append_execpolicy_amendment_updates_policy_and_file() {
|
||||
let codex_home = tempdir().expect("create temp dir");
|
||||
let current_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
let prefix = vec!["echo".to_string(), "hello".to_string()];
|
||||
let manager = ExecPolicyManager::default();
|
||||
|
||||
manager
|
||||
.append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(prefix))
|
||||
append_execpolicy_amendment_and_update(codex_home.path(), ¤t_policy, &prefix)
|
||||
.await
|
||||
.expect("update policy");
|
||||
let updated_policy = manager.current();
|
||||
|
||||
let evaluation = updated_policy.check(
|
||||
let evaluation = current_policy.read().await.check(
|
||||
&["echo".to_string(), "hello".to_string(), "world".to_string()],
|
||||
&|_| Decision::Allow,
|
||||
);
|
||||
@@ -734,11 +599,10 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
#[tokio::test]
|
||||
async fn append_execpolicy_amendment_rejects_empty_prefix() {
|
||||
let codex_home = tempdir().expect("create temp dir");
|
||||
let manager = ExecPolicyManager::default();
|
||||
let current_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
|
||||
let result = manager
|
||||
.append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(vec![]))
|
||||
.await;
|
||||
let result =
|
||||
append_execpolicy_amendment_and_update(codex_home.path(), ¤t_policy, &[]).await;
|
||||
|
||||
assert!(matches!(
|
||||
result,
|
||||
@@ -753,16 +617,16 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
async fn proposed_execpolicy_amendment_is_present_for_single_command_without_policy_match() {
|
||||
let command = vec!["cargo".to_string(), "build".to_string()];
|
||||
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let empty_policy = Arc::new(RwLock::new(Policy::empty()));
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&empty_policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -780,16 +644,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
let mut features = Features::with_defaults();
|
||||
features.disable(Feature::ExecPolicy);
|
||||
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&features,
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&Arc::new(RwLock::new(Policy::empty())),
|
||||
&features,
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -807,19 +670,18 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let command = vec!["rm".to_string()];
|
||||
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -837,16 +699,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
"-lc".to_string(),
|
||||
"cargo build && echo ok".to_string(),
|
||||
];
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&Arc::new(RwLock::new(Policy::empty())),
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -867,7 +728,7 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
|
||||
let command = vec![
|
||||
"bash".to_string(),
|
||||
@@ -876,15 +737,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
ExecPolicyManager::new(policy)
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::UnlessTrusted,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await,
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: None,
|
||||
proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![
|
||||
@@ -898,16 +759,15 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
async fn proposed_execpolicy_amendment_is_present_when_heuristics_allow() {
|
||||
let command = vec!["echo".to_string(), "safe".to_string()];
|
||||
|
||||
let manager = ExecPolicyManager::default();
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&Arc::new(RwLock::new(Policy::empty())),
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
@@ -925,19 +785,18 @@ prefix_rule(pattern=["rm"], decision="forbidden")
|
||||
parser
|
||||
.parse("test.rules", policy_src)
|
||||
.expect("parse policy");
|
||||
let policy = Arc::new(parser.build());
|
||||
let policy = Arc::new(RwLock::new(parser.build()));
|
||||
let command = vec!["echo".to_string(), "safe".to_string()];
|
||||
|
||||
let manager = ExecPolicyManager::new(policy);
|
||||
let requirement = manager
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
let requirement = create_exec_approval_requirement_for_command(
|
||||
&policy,
|
||||
&Features::with_defaults(),
|
||||
&command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
SandboxPermissions::UseDefault,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
requirement,
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config::profile::ConfigProfile;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
@@ -70,6 +69,8 @@ pub enum Feature {
|
||||
// Experimental
|
||||
/// Use the single unified PTY-backed exec tool.
|
||||
UnifiedExec,
|
||||
/// Enable experimental RMCP features such as OAuth login.
|
||||
RmcpClient,
|
||||
/// Include the freeform apply_patch tool.
|
||||
ApplyPatchFreeform,
|
||||
/// Allow the model to request web searches.
|
||||
@@ -92,8 +93,6 @@ pub enum Feature {
|
||||
Tui2,
|
||||
/// Enable discovery and injection of skills.
|
||||
Skills,
|
||||
/// Enforce UTF8 output in Powershell.
|
||||
PowershellUtf8,
|
||||
}
|
||||
|
||||
impl Feature {
|
||||
@@ -227,6 +226,7 @@ impl Features {
|
||||
let base_legacy = LegacyFeatureToggles {
|
||||
experimental_use_freeform_apply_patch: cfg.experimental_use_freeform_apply_patch,
|
||||
experimental_use_unified_exec_tool: cfg.experimental_use_unified_exec_tool,
|
||||
experimental_use_rmcp_client: cfg.experimental_use_rmcp_client,
|
||||
tools_web_search: cfg.tools.as_ref().and_then(|t| t.web_search),
|
||||
tools_view_image: cfg.tools.as_ref().and_then(|t| t.view_image),
|
||||
..Default::default()
|
||||
@@ -243,6 +243,7 @@ impl Features {
|
||||
.experimental_use_freeform_apply_patch,
|
||||
|
||||
experimental_use_unified_exec_tool: config_profile.experimental_use_unified_exec_tool,
|
||||
experimental_use_rmcp_client: config_profile.experimental_use_rmcp_client,
|
||||
tools_web_search: config_profile.tools_web_search,
|
||||
tools_view_image: config_profile.tools_view_image,
|
||||
};
|
||||
@@ -255,10 +256,6 @@ impl Features {
|
||||
|
||||
features
|
||||
}
|
||||
|
||||
pub fn enabled_features(&self) -> Vec<Feature> {
|
||||
self.enabled.iter().copied().collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Keys accepted in `[features]` tables.
|
||||
@@ -277,7 +274,7 @@ pub fn is_known_feature_key(key: &str) -> bool {
|
||||
}
|
||||
|
||||
/// Deserializable features table for TOML.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct FeaturesToml {
|
||||
#[serde(flatten)]
|
||||
pub entries: BTreeMap<String, bool>,
|
||||
@@ -298,7 +295,7 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
id: Feature::GhostCommit,
|
||||
key: "undo",
|
||||
stage: Stage::Stable,
|
||||
default_enabled: false,
|
||||
default_enabled: true,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ParallelToolCalls,
|
||||
@@ -351,6 +348,13 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
},
|
||||
default_enabled: false,
|
||||
},
|
||||
// Unstable features.
|
||||
FeatureSpec {
|
||||
id: Feature::RmcpClient,
|
||||
key: "rmcp_client",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::ApplyPatchFreeform,
|
||||
key: "apply_patch_freeform",
|
||||
@@ -393,12 +397,6 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: true,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::PowershellUtf8,
|
||||
key: "powershell_utf8",
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::Tui2,
|
||||
key: "tui2",
|
||||
|
||||
@@ -17,6 +17,10 @@ const ALIASES: &[Alias] = &[
|
||||
legacy_key: "experimental_use_unified_exec_tool",
|
||||
feature: Feature::UnifiedExec,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "experimental_use_rmcp_client",
|
||||
feature: Feature::RmcpClient,
|
||||
},
|
||||
Alias {
|
||||
legacy_key: "experimental_use_freeform_apply_patch",
|
||||
feature: Feature::ApplyPatchFreeform,
|
||||
@@ -46,6 +50,7 @@ pub struct LegacyFeatureToggles {
|
||||
pub include_apply_patch_tool: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_rmcp_client: Option<bool>,
|
||||
pub tools_web_search: Option<bool>,
|
||||
pub tools_view_image: Option<bool>,
|
||||
}
|
||||
@@ -70,6 +75,12 @@ impl LegacyFeatureToggles {
|
||||
self.experimental_use_unified_exec_tool,
|
||||
"experimental_use_unified_exec_tool",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::RmcpClient,
|
||||
self.experimental_use_rmcp_client,
|
||||
"experimental_use_rmcp_client",
|
||||
);
|
||||
set_if_some(
|
||||
features,
|
||||
Feature::WebSearchRequest,
|
||||
|
||||
@@ -33,7 +33,7 @@ pub mod git_info;
|
||||
pub mod landlock;
|
||||
pub mod mcp;
|
||||
mod mcp_connection_manager;
|
||||
pub mod models_manager;
|
||||
pub mod openai_models;
|
||||
pub use mcp_connection_manager::MCP_SANDBOX_STATE_CAPABILITY;
|
||||
pub use mcp_connection_manager::MCP_SANDBOX_STATE_METHOD;
|
||||
pub use mcp_connection_manager::SandboxState;
|
||||
|
||||
@@ -79,60 +79,26 @@ pub const DEFAULT_STARTUP_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
/// Default timeout for individual tool calls.
|
||||
const DEFAULT_TOOL_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
/// The Responses API requires tool names to match `^[a-zA-Z0-9_-]+$`.
|
||||
/// MCP server/tool names are user-controlled, so sanitize the fully-qualified
|
||||
/// name we expose to the model by replacing any disallowed character with `_`.
|
||||
fn sanitize_responses_api_tool_name(name: &str) -> String {
|
||||
let mut sanitized = String::with_capacity(name.len());
|
||||
for c in name.chars() {
|
||||
if c.is_ascii_alphanumeric() || c == '_' || c == '-' {
|
||||
sanitized.push(c);
|
||||
} else {
|
||||
sanitized.push('_');
|
||||
}
|
||||
}
|
||||
|
||||
if sanitized.is_empty() {
|
||||
"_".to_string()
|
||||
} else {
|
||||
sanitized
|
||||
}
|
||||
}
|
||||
|
||||
fn sha1_hex(s: &str) -> String {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(s.as_bytes());
|
||||
let sha1 = hasher.finalize();
|
||||
format!("{sha1:x}")
|
||||
}
|
||||
|
||||
fn qualify_tools<I>(tools: I) -> HashMap<String, ToolInfo>
|
||||
where
|
||||
I: IntoIterator<Item = ToolInfo>,
|
||||
{
|
||||
let mut used_names = HashSet::new();
|
||||
let mut seen_raw_names = HashSet::new();
|
||||
let mut qualified_tools = HashMap::new();
|
||||
for tool in tools {
|
||||
let qualified_name_raw = format!(
|
||||
let mut qualified_name = format!(
|
||||
"mcp{}{}{}{}",
|
||||
MCP_TOOL_NAME_DELIMITER, tool.server_name, MCP_TOOL_NAME_DELIMITER, tool.tool_name
|
||||
);
|
||||
if !seen_raw_names.insert(qualified_name_raw.clone()) {
|
||||
warn!("skipping duplicated tool {}", qualified_name_raw);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Start from a "pretty" name (sanitized), then deterministically disambiguate on
|
||||
// collisions by appending a hash of the *raw* (unsanitized) qualified name. This
|
||||
// ensures tools like `foo.bar` and `foo_bar` don't collapse to the same key.
|
||||
let mut qualified_name = sanitize_responses_api_tool_name(&qualified_name_raw);
|
||||
|
||||
// Enforce length constraints early; use the raw name for the hash input so the
|
||||
// output remains stable even when sanitization changes.
|
||||
if qualified_name.len() > MAX_TOOL_NAME_LENGTH {
|
||||
let sha1_str = sha1_hex(&qualified_name_raw);
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(qualified_name.as_bytes());
|
||||
let sha1 = hasher.finalize();
|
||||
let sha1_str = format!("{sha1:x}");
|
||||
|
||||
// Truncate to make room for the hash suffix
|
||||
let prefix_len = MAX_TOOL_NAME_LENGTH - sha1_str.len();
|
||||
|
||||
qualified_name = format!("{}{}", &qualified_name[..prefix_len], sha1_str);
|
||||
}
|
||||
|
||||
@@ -1069,28 +1035,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_qualify_tools_sanitizes_invalid_characters() {
|
||||
let tools = vec![create_test_tool("server.one", "tool.two")];
|
||||
|
||||
let qualified_tools = qualify_tools(tools);
|
||||
|
||||
assert_eq!(qualified_tools.len(), 1);
|
||||
let (qualified_name, tool) = qualified_tools.into_iter().next().expect("one tool");
|
||||
assert_eq!(qualified_name, "mcp__server_one__tool_two");
|
||||
|
||||
// The key is sanitized for OpenAI, but we keep original parts for the actual MCP call.
|
||||
assert_eq!(tool.server_name, "server.one");
|
||||
assert_eq!(tool.tool_name, "tool.two");
|
||||
|
||||
assert!(
|
||||
qualified_name
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || c == '_' || c == '-'),
|
||||
"qualified name must be Responses API compatible: {qualified_name:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_filter_allows_by_default() {
|
||||
let filter = ToolFilter::default();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
pub mod cache;
|
||||
pub mod manager;
|
||||
mod cache;
|
||||
pub mod model_family;
|
||||
pub mod model_presets;
|
||||
pub mod models_manager;
|
||||
@@ -3,6 +3,7 @@ use codex_protocol::openai_models::ApplyPatchToolType;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
@@ -47,6 +48,9 @@ pub struct ModelFamily {
|
||||
// The reasoning effort to use for this model family when none is explicitly chosen.
|
||||
pub default_reasoning_effort: Option<ReasoningEffort>,
|
||||
|
||||
// Define if we need a special handling of reasoning summary
|
||||
pub reasoning_summary_format: ReasoningSummaryFormat,
|
||||
|
||||
/// Whether this model supports parallel tool calls when using the
|
||||
/// Responses API.
|
||||
pub supports_parallel_tool_calls: bool,
|
||||
@@ -84,6 +88,9 @@ impl ModelFamily {
|
||||
if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries {
|
||||
self.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
}
|
||||
if let Some(reasoning_summary_format) = config.model_reasoning_summary_format.as_ref() {
|
||||
self.reasoning_summary_format = reasoning_summary_format.clone();
|
||||
}
|
||||
if let Some(context_window) = config.model_context_window {
|
||||
self.context_window = Some(context_window);
|
||||
}
|
||||
@@ -110,6 +117,7 @@ impl ModelFamily {
|
||||
supported_reasoning_levels: _,
|
||||
shell_type,
|
||||
visibility: _,
|
||||
minimal_client_version: _,
|
||||
supported_in_api: _,
|
||||
priority: _,
|
||||
upgrade: _,
|
||||
@@ -121,6 +129,7 @@ impl ModelFamily {
|
||||
truncation_policy,
|
||||
supports_parallel_tool_calls,
|
||||
context_window,
|
||||
reasoning_summary_format,
|
||||
experimental_supported_tools,
|
||||
} = model;
|
||||
|
||||
@@ -136,6 +145,7 @@ impl ModelFamily {
|
||||
self.truncation_policy = truncation_policy.into();
|
||||
self.supports_parallel_tool_calls = supports_parallel_tool_calls;
|
||||
self.context_window = context_window;
|
||||
self.reasoning_summary_format = reasoning_summary_format;
|
||||
self.experimental_supported_tools = experimental_supported_tools;
|
||||
}
|
||||
|
||||
@@ -166,6 +176,7 @@ macro_rules! model_family {
|
||||
context_window: Some(CONTEXT_WINDOW_272K),
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
@@ -240,6 +251,7 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
experimental_supported_tools: vec![
|
||||
"grep_files".to_string(),
|
||||
@@ -259,6 +271,7 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -287,6 +300,7 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -299,6 +313,7 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -311,6 +326,7 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -326,6 +342,7 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
|
||||
model_family!(
|
||||
slug, slug,
|
||||
supports_reasoning_summaries: true,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
|
||||
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
@@ -400,6 +417,7 @@ fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: BASE_INSTRUCTIONS.to_string(),
|
||||
@@ -416,6 +434,7 @@ fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::openai_models::ClientVersion;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
@@ -432,6 +451,7 @@ mod tests {
|
||||
}],
|
||||
shell_type: shell,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 1,
|
||||
upgrade: None,
|
||||
@@ -443,6 +463,7 @@ mod tests {
|
||||
truncation_policy: TruncationPolicyConfig::bytes(10_000),
|
||||
supports_parallel_tool_calls: false,
|
||||
context_window: None,
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
experimental_supported_tools: Vec::new(),
|
||||
}
|
||||
}
|
||||
@@ -506,6 +527,7 @@ mod tests {
|
||||
experimental_supported_tools: vec!["local".to_string()],
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
context_window: Some(100),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
);
|
||||
|
||||
let updated = family.with_remote_overrides(vec![ModelInfo {
|
||||
@@ -519,6 +541,7 @@ mod tests {
|
||||
}],
|
||||
shell_type: ConfigShellToolType::ShellCommand,
|
||||
visibility: ModelVisibility::List,
|
||||
minimal_client_version: ClientVersion(0, 1, 0),
|
||||
supported_in_api: true,
|
||||
priority: 10,
|
||||
upgrade: None,
|
||||
@@ -530,6 +553,7 @@ mod tests {
|
||||
truncation_policy: TruncationPolicyConfig::tokens(2_000),
|
||||
supports_parallel_tool_calls: true,
|
||||
context_window: Some(400_000),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
|
||||
experimental_supported_tools: vec!["alpha".to_string(), "beta".to_string()],
|
||||
}]);
|
||||
|
||||
@@ -548,6 +572,10 @@ mod tests {
|
||||
assert_eq!(updated.truncation_policy, TruncationPolicy::Tokens(2_000));
|
||||
assert!(updated.supports_parallel_tool_calls);
|
||||
assert_eq!(updated.context_window, Some(400_000));
|
||||
assert_eq!(
|
||||
updated.reasoning_summary_format,
|
||||
ReasoningSummaryFormat::Experimental
|
||||
);
|
||||
assert_eq!(
|
||||
updated.experimental_supported_tools,
|
||||
vec!["alpha".to_string(), "beta".to_string()]
|
||||
@@ -24,8 +24,8 @@ use crate::default_client::build_reqwest_client;
|
||||
use crate::error::Result as CoreResult;
|
||||
use crate::features::Feature;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::models_manager::model_presets::builtin_model_presets;
|
||||
use crate::openai_models::model_family::ModelFamily;
|
||||
use crate::openai_models::model_presets::builtin_model_presets;
|
||||
|
||||
const MODEL_CACHE_FILE: &str = "models_cache.json";
|
||||
const DEFAULT_MODEL_CACHE_TTL: Duration = Duration::from_secs(300);
|
||||
@@ -77,7 +77,7 @@ impl ModelsManager {
|
||||
}
|
||||
|
||||
/// Fetch the latest remote models, using the on-disk cache when still fresh.
|
||||
pub async fn refresh_available_models_with_cache(&self, config: &Config) -> CoreResult<()> {
|
||||
pub async fn try_refresh_available_models(&self, config: &Config) -> CoreResult<()> {
|
||||
if !config.features.enabled(Feature::RemoteModels)
|
||||
|| self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey)
|
||||
{
|
||||
@@ -86,15 +86,13 @@ impl ModelsManager {
|
||||
if self.try_load_cache().await {
|
||||
return Ok(());
|
||||
}
|
||||
self.refresh_available_models_no_cache(config.features.enabled(Feature::RemoteModels))
|
||||
.await
|
||||
self.refresh_available_models(config).await
|
||||
}
|
||||
|
||||
pub(crate) async fn refresh_available_models_no_cache(
|
||||
&self,
|
||||
remote_models_feature: bool,
|
||||
) -> CoreResult<()> {
|
||||
if !remote_models_feature || self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey) {
|
||||
pub async fn refresh_available_models(&self, config: &Config) -> CoreResult<()> {
|
||||
if !config.features.enabled(Feature::RemoteModels)
|
||||
|| self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
let auth = self.auth_manager.auth();
|
||||
@@ -109,6 +107,8 @@ impl ModelsManager {
|
||||
.await
|
||||
.map_err(map_api_error)?;
|
||||
|
||||
let etag = etag.filter(|value| !value.is_empty());
|
||||
|
||||
self.apply_remote_models(models.clone()).await;
|
||||
*self.etag.write().await = etag.clone();
|
||||
self.persist_cache(&models, etag).await;
|
||||
@@ -116,7 +116,7 @@ impl ModelsManager {
|
||||
}
|
||||
|
||||
pub async fn list_models(&self, config: &Config) -> Vec<ModelPreset> {
|
||||
if let Err(err) = self.refresh_available_models_with_cache(config).await {
|
||||
if let Err(err) = self.try_refresh_available_models(config).await {
|
||||
error!("failed to refresh available models: {err}");
|
||||
}
|
||||
let remote_models = self.remote_models(config).await;
|
||||
@@ -139,11 +139,15 @@ impl ModelsManager {
|
||||
.with_config_overrides(config)
|
||||
}
|
||||
|
||||
pub async fn get_models_etag(&self) -> Option<String> {
|
||||
self.etag.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_model(&self, model: &Option<String>, config: &Config) -> String {
|
||||
if let Some(model) = model.as_ref() {
|
||||
return model.to_string();
|
||||
}
|
||||
if let Err(err) = self.refresh_available_models_with_cache(config).await {
|
||||
if let Err(err) = self.try_refresh_available_models(config).await {
|
||||
error!("failed to refresh available models: {err}");
|
||||
}
|
||||
// if codex-auto-balanced exists & signed in with chatgpt mode, return it, otherwise return the default model
|
||||
@@ -161,18 +165,6 @@ impl ModelsManager {
|
||||
}
|
||||
OPENAI_DEFAULT_API_MODEL.to_string()
|
||||
}
|
||||
pub async fn refresh_if_new_etag(&self, etag: String, remote_models_feature: bool) {
|
||||
let current_etag = self.get_etag().await;
|
||||
if current_etag.clone().is_some() && current_etag.as_deref() == Some(etag.as_str()) {
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self
|
||||
.refresh_available_models_no_cache(remote_models_feature)
|
||||
.await
|
||||
{
|
||||
error!("failed to refresh available models: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
pub fn get_model_offline(model: Option<&str>) -> String {
|
||||
@@ -185,10 +177,6 @@ impl ModelsManager {
|
||||
Self::find_family_for_model(model).with_config_overrides(config)
|
||||
}
|
||||
|
||||
async fn get_etag(&self) -> Option<String> {
|
||||
self.etag.read().await.clone()
|
||||
}
|
||||
|
||||
/// Replace the cached remote models and rebuild the derived presets list.
|
||||
async fn apply_remote_models(&self, models: Vec<ModelInfo>) {
|
||||
*self.remote_models.write().await = models;
|
||||
@@ -312,14 +300,26 @@ impl ModelsManager {
|
||||
|
||||
/// Convert a client version string to a whole version string (e.g. "1.2.3-alpha.4" -> "1.2.3")
|
||||
fn format_client_version_to_whole() -> String {
|
||||
format!(
|
||||
"{}.{}.{}",
|
||||
format_client_version_from_parts(
|
||||
env!("CARGO_PKG_VERSION_MAJOR"),
|
||||
env!("CARGO_PKG_VERSION_MINOR"),
|
||||
env!("CARGO_PKG_VERSION_PATCH")
|
||||
env!("CARGO_PKG_VERSION_PATCH"),
|
||||
)
|
||||
}
|
||||
|
||||
fn format_client_version_from_parts(major: &str, minor: &str, patch: &str) -> String {
|
||||
const DEV_VERSION: &str = "0.0.0";
|
||||
const FALLBACK_VERSION: &str = "99.99.99";
|
||||
|
||||
let normalized = format!("{major}.{minor}.{patch}");
|
||||
|
||||
if normalized == DEV_VERSION {
|
||||
FALLBACK_VERSION.to_string()
|
||||
} else {
|
||||
normalized
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::cache::ModelsCache;
|
||||
@@ -366,6 +366,7 @@ mod tests {
|
||||
"truncation_policy": {"mode": "bytes", "limit": 10_000},
|
||||
"supports_parallel_tool_calls": false,
|
||||
"context_window": null,
|
||||
"reasoning_summary_format": "none",
|
||||
"experimental_supported_tools": [],
|
||||
}))
|
||||
.expect("valid model")
|
||||
@@ -417,7 +418,7 @@ mod tests {
|
||||
let manager = ModelsManager::with_provider(auth_manager, provider);
|
||||
|
||||
manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.try_refresh_available_models(&config)
|
||||
.await
|
||||
.expect("refresh succeeds");
|
||||
let cached_remote = manager.remote_models(&config).await;
|
||||
@@ -476,7 +477,7 @@ mod tests {
|
||||
let manager = ModelsManager::with_provider(auth_manager, provider);
|
||||
|
||||
manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.try_refresh_available_models(&config)
|
||||
.await
|
||||
.expect("first refresh succeeds");
|
||||
assert_eq!(
|
||||
@@ -487,7 +488,7 @@ mod tests {
|
||||
|
||||
// Second call should read from cache and avoid the network.
|
||||
manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.try_refresh_available_models(&config)
|
||||
.await
|
||||
.expect("cached refresh succeeds");
|
||||
assert_eq!(
|
||||
@@ -530,7 +531,7 @@ mod tests {
|
||||
let manager = ModelsManager::with_provider(auth_manager, provider);
|
||||
|
||||
manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.try_refresh_available_models(&config)
|
||||
.await
|
||||
.expect("initial refresh succeeds");
|
||||
|
||||
@@ -555,7 +556,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.try_refresh_available_models(&config)
|
||||
.await
|
||||
.expect("second refresh succeeds");
|
||||
assert_eq!(
|
||||
@@ -601,7 +602,7 @@ mod tests {
|
||||
manager.cache_ttl = Duration::ZERO;
|
||||
|
||||
manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.try_refresh_available_models(&config)
|
||||
.await
|
||||
.expect("initial refresh succeeds");
|
||||
|
||||
@@ -616,7 +617,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
manager
|
||||
.refresh_available_models_with_cache(&config)
|
||||
.try_refresh_available_models(&config)
|
||||
.await
|
||||
.expect("second refresh succeeds");
|
||||
|
||||
@@ -8,30 +8,6 @@ use crate::shell::detect_shell_type;
|
||||
|
||||
const POWERSHELL_FLAGS: &[&str] = &["-nologo", "-noprofile", "-command", "-c"];
|
||||
|
||||
/// Prefixed command for powershell shell calls to force UTF-8 console output.
|
||||
pub(crate) const UTF8_OUTPUT_PREFIX: &str =
|
||||
"[Console]::OutputEncoding=[System.Text.Encoding]::UTF8;\n";
|
||||
|
||||
pub(crate) fn prefix_powershell_script_with_utf8(command: &[String]) -> Vec<String> {
|
||||
let Some((_, script)) = extract_powershell_command(command) else {
|
||||
return command.to_vec();
|
||||
};
|
||||
|
||||
let trimmed = script.trim_start();
|
||||
let script = if trimmed.starts_with(UTF8_OUTPUT_PREFIX) {
|
||||
script.to_string()
|
||||
} else {
|
||||
format!("{UTF8_OUTPUT_PREFIX}{script}")
|
||||
};
|
||||
|
||||
let mut command: Vec<String> = command[..(command.len() - 1)]
|
||||
.iter()
|
||||
.map(std::string::ToString::to_string)
|
||||
.collect();
|
||||
command.push(script);
|
||||
command
|
||||
}
|
||||
|
||||
/// Extract the PowerShell script body from an invocation such as:
|
||||
///
|
||||
/// - ["pwsh", "-NoProfile", "-Command", "Get-ChildItem -Recurse | Select-String foo"]
|
||||
@@ -46,10 +22,7 @@ pub fn extract_powershell_command(command: &[String]) -> Option<(&str, &str)> {
|
||||
}
|
||||
|
||||
let shell = &command[0];
|
||||
if !matches!(
|
||||
detect_shell_type(&PathBuf::from(shell)),
|
||||
Some(ShellType::PowerShell)
|
||||
) {
|
||||
if detect_shell_type(&PathBuf::from(shell)) != Some(ShellType::PowerShell) {
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -63,7 +36,7 @@ pub fn extract_powershell_command(command: &[String]) -> Option<(&str, &str)> {
|
||||
}
|
||||
if flag.eq_ignore_ascii_case("-Command") || flag.eq_ignore_ascii_case("-c") {
|
||||
let script = &command[i + 1];
|
||||
return Some((shell, script));
|
||||
return Some((shell, script.as_str()));
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
180
codex-rs/core/src/skills/assets/samples/plan/SKILL.md
Normal file
180
codex-rs/core/src/skills/assets/samples/plan/SKILL.md
Normal file
@@ -0,0 +1,180 @@
|
||||
---
|
||||
name: plan
|
||||
description: Generate a plan for how an agent should accomplish a complex coding task. Use when a user asks for a plan, and optionally when they want to save, find, read, update, or delete plan files in $CODEX_HOME/plans (default ~/.codex/plans).
|
||||
metadata:
|
||||
short-description: Generate a plan for a complex task
|
||||
---
|
||||
|
||||
# Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Draft structured plans that clarify intent, scope, requirements, action items, testing/validation, and risks.
|
||||
|
||||
Optionally, save plans to disk as markdown files with YAML frontmatter and free-form content. When drafting in chat, output only the plan body without frontmatter; add frontmatter only when saving to disk. Only write to the plans folder; do not modify the repository codebase.
|
||||
|
||||
This skill can also be used to draft codebase or system overviews.
|
||||
|
||||
## Core rules
|
||||
|
||||
- Resolve the plans directory as `$CODEX_HOME/plans` or `~/.codex/plans` when `CODEX_HOME` is not set.
|
||||
- Create the plans directory if it does not exist.
|
||||
- Never write to the repo; only read files to understand context.
|
||||
- Require frontmatter with **only** `name` and `description` (single-line values) for on-disk plans.
|
||||
- When presenting a draft plan in chat, omit frontmatter and start at `# Plan`.
|
||||
- Enforce naming rules: short, lower-case, hyphen-delimited; filename must equal `<name>.md`.
|
||||
- If a plan is not found, state it clearly and offer to create one.
|
||||
- Allow overview-style plans that document flows, architecture, or context without a work checklist.
|
||||
|
||||
## Decide the task
|
||||
|
||||
1. **Find/list**: discover plans by frontmatter summary; confirm if multiple matches exist.
|
||||
2. **Read/use**: validate frontmatter; present summary and full contents.
|
||||
3. **Create**: inspect repo read-only; choose plan style (implementation vs overview); draft plan; write to plans directory only.
|
||||
4. **Update**: load plan; revise content and/or description; preserve frontmatter keys; overwrite the plan file.
|
||||
5. **Delete**: confirm intent, then remove the plan file if asked.
|
||||
|
||||
## Plan discovery
|
||||
|
||||
- Prefer `scripts/list_plans.py` for quick summaries.
|
||||
- Use `scripts/read_plan_frontmatter.py` to validate a specific plan.
|
||||
- If name mismatches filename or frontmatter is missing fields, call it out and ask whether to fix.
|
||||
|
||||
## Plan creation workflow
|
||||
|
||||
1. Scan context quickly: read README.md and obvious docs (docs/, CONTRIBUTING.md, ARCHITECTURE.md); skim likely touched files; identify constraints (language, frameworks, CI/test commands, deployment).
|
||||
2. Ask follow-ups only if blocked: at most 1-2 questions, prefer multiple-choice. If unsure but not blocked, state assumptions and proceed.
|
||||
3. Identify scope, constraints, and data model/API implications (or capture existing behavior for an overview).
|
||||
4. Draft either an ordered implementation plan or a structured overview plan with diagrams/notes as needed.
|
||||
5. Immediately output the plan body only (no frontmatter), then ask the user if they want to 1. Make changes, 2. Implement it, 3. Save it as per plan.
|
||||
6. If the user wants to save it, prepend frontmatter and save the plan under the computed plans directory using `scripts/create_plan.py`.
|
||||
|
||||
|
||||
## Plan update workflow
|
||||
|
||||
- Re-read the plan and related code/docs before updating.
|
||||
- Keep the plan name stable unless the user explicitly wants a rename.
|
||||
- If renaming, update both frontmatter `name` and filename together.
|
||||
|
||||
## Scripts (low-freedom helpers)
|
||||
|
||||
Create a plan file (body only; frontmatter is written for you). Run from the plan skill directory:
|
||||
|
||||
```bash
|
||||
python ./scripts/create_plan.py \
|
||||
--name codex-rate-limit-overview \
|
||||
--description "Scope and update plan for Codex rate limiting" \
|
||||
--body-file /tmp/plan-body.md
|
||||
```
|
||||
|
||||
Read frontmatter summary for a plan (run from the plan skill directory):
|
||||
|
||||
```bash
|
||||
python ./scripts/read_plan_frontmatter.py ~/.codex/plans/codex-rate-limit-overview.md
|
||||
```
|
||||
|
||||
List plan summaries (optional filter; run from the plan skill directory):
|
||||
|
||||
```bash
|
||||
python ./scripts/list_plans.py --query "rate limit"
|
||||
```
|
||||
|
||||
## Plan file format
|
||||
|
||||
Use one of the structures below for the plan body. When drafting, output only the body (no frontmatter). When saving, prepend this frontmatter:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: <plan-name>
|
||||
description: <1-line summary>
|
||||
---
|
||||
```
|
||||
|
||||
### Implementation plan body template
|
||||
|
||||
```markdown
|
||||
# Plan
|
||||
|
||||
<1-3 sentences: intent, scope, and approach.>
|
||||
|
||||
## Requirements
|
||||
- <Requirement 1>
|
||||
- <Requirement 2>
|
||||
|
||||
## Scope
|
||||
- In:
|
||||
- Out:
|
||||
|
||||
## Files and entry points
|
||||
- <File/module/entry point 1>
|
||||
- <File/module/entry point 2>
|
||||
|
||||
## Data model / API changes
|
||||
- <If applicable, describe schema or contract changes>
|
||||
|
||||
## Action items
|
||||
[ ] <Step 1>
|
||||
[ ] <Step 2>
|
||||
[ ] <Step 3>
|
||||
[ ] <Step 4>
|
||||
[ ] <Step 5>
|
||||
[ ] <Step 6>
|
||||
|
||||
## Testing and validation
|
||||
- <Tests, commands, or validation steps>
|
||||
|
||||
## Risks and edge cases
|
||||
- <Risk 1>
|
||||
- <Risk 2>
|
||||
|
||||
## Open questions
|
||||
- <Question 1>
|
||||
- <Question 2>
|
||||
```
|
||||
|
||||
### Overview plan body template
|
||||
|
||||
```markdown
|
||||
# Plan
|
||||
|
||||
<1-3 sentences: intent and scope of the overview.>
|
||||
|
||||
## Overview
|
||||
<Describe the system, flow, or architecture at a high level.>
|
||||
|
||||
## Diagrams
|
||||
<Include text or Mermaid diagrams if helpful.>
|
||||
|
||||
## Key file references
|
||||
- <File/module/entry point 1>
|
||||
- <File/module/entry point 2>
|
||||
|
||||
## Auth / routing / behavior notes
|
||||
- <Capture relevant differences (e.g., auth modes, routing paths).>
|
||||
|
||||
## Current status
|
||||
- <What is live today vs pending work, if known.>
|
||||
|
||||
## Action items
|
||||
- None (overview only).
|
||||
|
||||
## Testing and validation
|
||||
- None (overview only).
|
||||
|
||||
## Risks and edge cases
|
||||
- None (overview only).
|
||||
|
||||
## Open questions
|
||||
- None.
|
||||
```
|
||||
|
||||
## Writing guidance
|
||||
|
||||
- Start with 1 short paragraph describing intent and approach.
|
||||
- Keep action items ordered and atomic (discovery -> changes -> tests -> rollout); use verb-first phrasing.
|
||||
- Scale action item count to complexity (simple: 1-2; complex: up to about 10).
|
||||
- Include file/entry-point hints and concrete validation steps where useful.
|
||||
- Always include testing/validation and risks/edge cases in implementation plans; include safe rollout/rollback when relevant.
|
||||
- Use open questions only when necessary (max 3).
|
||||
- Avoid vague steps, micro-steps, and code snippets; keep the plan implementation-agnostic.
|
||||
- For overview plans, keep action items minimal and set non-applicable sections to "None."
|
||||
@@ -0,0 +1,114 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Create or overwrite a plan markdown file in $CODEX_HOME/plans."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from plan_utils import get_plans_dir, validate_plan_name
|
||||
|
||||
DEFAULT_TEMPLATE = """# Plan
|
||||
|
||||
<1-3 sentences: intent, scope, and approach.>
|
||||
|
||||
## Requirements
|
||||
- <Requirement 1>
|
||||
- <Requirement 2>
|
||||
|
||||
## Scope
|
||||
- In:
|
||||
- Out:
|
||||
|
||||
## Files and entry points
|
||||
- <File/module/entry point 1>
|
||||
- <File/module/entry point 2>
|
||||
|
||||
## Data model / API changes
|
||||
- <If applicable, describe schema or contract changes>
|
||||
|
||||
## Action items
|
||||
[ ] <Step 1>
|
||||
[ ] <Step 2>
|
||||
[ ] <Step 3>
|
||||
[ ] <Step 4>
|
||||
[ ] <Step 5>
|
||||
[ ] <Step 6>
|
||||
|
||||
## Testing and validation
|
||||
- <Tests, commands, or validation steps>
|
||||
|
||||
## Risks and edge cases
|
||||
- <Risk 1>
|
||||
- <Risk 2>
|
||||
|
||||
## Open questions
|
||||
- <Question 1>
|
||||
- <Question 2>
|
||||
"""
|
||||
|
||||
|
||||
def read_body(args: argparse.Namespace) -> str | None:
|
||||
if args.template:
|
||||
return DEFAULT_TEMPLATE
|
||||
if args.body_file:
|
||||
return Path(args.body_file).read_text(encoding="utf-8")
|
||||
if not sys.stdin.isatty():
|
||||
return sys.stdin.read()
|
||||
return None
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create a plan file under $CODEX_HOME/plans or ~/.codex/plans."
|
||||
)
|
||||
parser.add_argument("--name", required=True, help="Plan name (lower-case, hyphen-delimited).")
|
||||
parser.add_argument("--description", required=True, help="Short plan description.")
|
||||
parser.add_argument(
|
||||
"--body-file",
|
||||
help="Path to markdown body (without frontmatter). If omitted, read from stdin.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--template",
|
||||
action="store_true",
|
||||
help="Write a template body instead of reading from stdin or --body-file.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite",
|
||||
action="store_true",
|
||||
help="Overwrite the plan file if it already exists.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
name = args.name.strip()
|
||||
description = args.description.strip()
|
||||
validate_plan_name(name)
|
||||
if not description or "\n" in description:
|
||||
raise SystemExit("Description must be a single line.")
|
||||
|
||||
body = read_body(args)
|
||||
if body is None:
|
||||
raise SystemExit("Provide --body-file, stdin, or --template to supply plan content.")
|
||||
|
||||
body = body.strip()
|
||||
if not body:
|
||||
raise SystemExit("Plan body cannot be empty.")
|
||||
if body.lstrip().startswith("---"):
|
||||
raise SystemExit("Plan body should not include frontmatter.")
|
||||
|
||||
plans_dir = get_plans_dir()
|
||||
plans_dir.mkdir(parents=True, exist_ok=True)
|
||||
plan_path = plans_dir / f"{name}.md"
|
||||
|
||||
if plan_path.exists() and not args.overwrite:
|
||||
raise SystemExit(f"Plan already exists: {plan_path}. Use --overwrite to replace.")
|
||||
|
||||
content = f"---\nname: {name}\ndescription: {description}\n---\n\n{body}\n"
|
||||
plan_path.write_text(content, encoding="utf-8")
|
||||
print(str(plan_path))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python3
|
||||
"""List plan summaries by reading frontmatter only."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
|
||||
from plan_utils import get_plans_dir, parse_frontmatter
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="List plan summaries from $CODEX_HOME/plans.")
|
||||
parser.add_argument("--query", help="Case-insensitive substring to filter name/description.")
|
||||
parser.add_argument("--json", action="store_true", help="Emit JSON output.")
|
||||
args = parser.parse_args()
|
||||
|
||||
plans_dir = get_plans_dir()
|
||||
if not plans_dir.exists():
|
||||
raise SystemExit(f"Plans directory not found: {plans_dir}")
|
||||
|
||||
query = args.query.lower() if args.query else None
|
||||
items = []
|
||||
for path in sorted(plans_dir.glob("*.md")):
|
||||
try:
|
||||
data = parse_frontmatter(path)
|
||||
except ValueError:
|
||||
continue
|
||||
name = data.get("name")
|
||||
description = data.get("description")
|
||||
if not name or not description:
|
||||
continue
|
||||
if query:
|
||||
haystack = f"{name} {description}".lower()
|
||||
if query not in haystack:
|
||||
continue
|
||||
items.append({"name": name, "description": description, "path": str(path)})
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(items))
|
||||
else:
|
||||
for item in items:
|
||||
print(f"{item['name']}\t{item['description']}\t{item['path']}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Shared helpers for plan scripts."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
_NAME_RE = re.compile(r"^[a-z0-9]+(-[a-z0-9]+)*$")
|
||||
|
||||
|
||||
def get_codex_home() -> Path:
|
||||
"""Return CODEX_HOME if set, else ~/.codex."""
|
||||
return Path(os.environ.get("CODEX_HOME", "~/.codex")).expanduser()
|
||||
|
||||
|
||||
def get_plans_dir() -> Path:
|
||||
return get_codex_home() / "plans"
|
||||
|
||||
|
||||
def validate_plan_name(name: str) -> None:
|
||||
if not name or not _NAME_RE.match(name):
|
||||
raise ValueError(
|
||||
"Invalid plan name. Use short, lower-case, hyphen-delimited names "
|
||||
"(e.g., codex-rate-limit-overview)."
|
||||
)
|
||||
|
||||
|
||||
def parse_frontmatter(path: Path) -> dict:
|
||||
"""Parse YAML frontmatter from a markdown file without reading the body."""
|
||||
with path.open("r", encoding="utf-8") as handle:
|
||||
first = handle.readline()
|
||||
if first.strip() != "---":
|
||||
raise ValueError("Frontmatter must start with '---'.")
|
||||
|
||||
data: dict[str, str] = {}
|
||||
for line in handle:
|
||||
stripped = line.strip()
|
||||
if stripped == "---":
|
||||
return data
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
if ":" not in line:
|
||||
raise ValueError(f"Invalid frontmatter line: {line.rstrip()}")
|
||||
key, value = line.split(":", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
if value and len(value) >= 2 and value[0] == value[-1] and value[0] in ('"', "'"):
|
||||
value = value[1:-1]
|
||||
data[key] = value
|
||||
|
||||
raise ValueError("Frontmatter must end with '---'.")
|
||||
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Read plan frontmatter without loading the full markdown body."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from plan_utils import parse_frontmatter
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Read name/description from plan frontmatter.")
|
||||
parser.add_argument("plan_path", help="Path to the plan markdown file.")
|
||||
parser.add_argument("--json", action="store_true", help="Emit JSON output.")
|
||||
args = parser.parse_args()
|
||||
|
||||
path = Path(args.plan_path).expanduser()
|
||||
if not path.exists():
|
||||
raise SystemExit(f"Plan not found: {path}")
|
||||
|
||||
data = parse_frontmatter(path)
|
||||
name = data.get("name")
|
||||
description = data.get("description")
|
||||
if not name or not description:
|
||||
raise SystemExit("Frontmatter must include name and description.")
|
||||
|
||||
payload = {"name": name, "description": description, "path": str(path)}
|
||||
if args.json:
|
||||
print(json.dumps(payload))
|
||||
else:
|
||||
print(f"name: {name}")
|
||||
print(f"description: {description}")
|
||||
print(f"path: {path}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -216,7 +216,7 @@ Follow these steps in order, skipping only if there is a clear reason why they a
|
||||
### Skill Naming
|
||||
|
||||
- Use lowercase letters, digits, and hyphens only; normalize user-provided titles to hyphen-case (e.g., "Plan Mode" -> `plan-mode`).
|
||||
- When generating names, generate a name under 64 characters (letters, digits, hyphens).
|
||||
- When generating names, generate a name under 30 characters (letters, digits, hyphens).
|
||||
- Prefer short, verb-led phrases that describe the action.
|
||||
- Namespace by tool when it improves clarity or triggering (e.g., `gh-address-comments`, `linear-address-issue`).
|
||||
- Name the skill folder exactly after the skill name.
|
||||
|
||||
@@ -17,7 +17,7 @@ import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
MAX_SKILL_NAME_LENGTH = 64
|
||||
MAX_SKILL_NAME_LENGTH = 30
|
||||
ALLOWED_RESOURCES = {"scripts", "references", "assets"}
|
||||
|
||||
SKILL_TEMPLATE = """---
|
||||
@@ -37,23 +37,23 @@ description: [TODO: Complete and informative explanation of what the skill does
|
||||
|
||||
**1. Workflow-Based** (best for sequential processes)
|
||||
- Works well when there are clear step-by-step procedures
|
||||
- Example: DOCX skill with "Workflow Decision Tree" -> "Reading" -> "Creating" -> "Editing"
|
||||
- Structure: ## Overview -> ## Workflow Decision Tree -> ## Step 1 -> ## Step 2...
|
||||
- Example: DOCX skill with "Workflow Decision Tree" → "Reading" → "Creating" → "Editing"
|
||||
- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2...
|
||||
|
||||
**2. Task-Based** (best for tool collections)
|
||||
- Works well when the skill offers different operations/capabilities
|
||||
- Example: PDF skill with "Quick Start" -> "Merge PDFs" -> "Split PDFs" -> "Extract Text"
|
||||
- Structure: ## Overview -> ## Quick Start -> ## Task Category 1 -> ## Task Category 2...
|
||||
- Example: PDF skill with "Quick Start" → "Merge PDFs" → "Split PDFs" → "Extract Text"
|
||||
- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2...
|
||||
|
||||
**3. Reference/Guidelines** (best for standards or specifications)
|
||||
- Works well for brand guidelines, coding standards, or requirements
|
||||
- Example: Brand styling with "Brand Guidelines" -> "Colors" -> "Typography" -> "Features"
|
||||
- Structure: ## Overview -> ## Guidelines -> ## Specifications -> ## Usage...
|
||||
- Example: Brand styling with "Brand Guidelines" → "Colors" → "Typography" → "Features"
|
||||
- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage...
|
||||
|
||||
**4. Capabilities-Based** (best for integrated systems)
|
||||
- Works well when the skill provides multiple interrelated features
|
||||
- Example: Product Management with "Core Capabilities" -> numbered capability list
|
||||
- Structure: ## Overview -> ## Core Capabilities -> ### 1. Feature -> ### 2. Feature...
|
||||
- Example: Product Management with "Core Capabilities" → numbered capability list
|
||||
- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature...
|
||||
|
||||
Patterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations).
|
||||
|
||||
@@ -212,7 +212,7 @@ def parse_resources(raw_resources):
|
||||
invalid = sorted({item for item in resources if item not in ALLOWED_RESOURCES})
|
||||
if invalid:
|
||||
allowed = ", ".join(sorted(ALLOWED_RESOURCES))
|
||||
print(f"[ERROR] Unknown resource type(s): {', '.join(invalid)}")
|
||||
print(f"❌ Error: Unknown resource type(s): {', '.join(invalid)}")
|
||||
print(f" Allowed: {allowed}")
|
||||
sys.exit(1)
|
||||
deduped = []
|
||||
@@ -233,23 +233,23 @@ def create_resource_dirs(skill_dir, skill_name, skill_title, resources, include_
|
||||
example_script = resource_dir / "example.py"
|
||||
example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name))
|
||||
example_script.chmod(0o755)
|
||||
print("[OK] Created scripts/example.py")
|
||||
print("✅ Created scripts/example.py")
|
||||
else:
|
||||
print("[OK] Created scripts/")
|
||||
print("✅ Created scripts/")
|
||||
elif resource == "references":
|
||||
if include_examples:
|
||||
example_reference = resource_dir / "api_reference.md"
|
||||
example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title))
|
||||
print("[OK] Created references/api_reference.md")
|
||||
print("✅ Created references/api_reference.md")
|
||||
else:
|
||||
print("[OK] Created references/")
|
||||
print("✅ Created references/")
|
||||
elif resource == "assets":
|
||||
if include_examples:
|
||||
example_asset = resource_dir / "example_asset.txt"
|
||||
example_asset.write_text(EXAMPLE_ASSET)
|
||||
print("[OK] Created assets/example_asset.txt")
|
||||
print("✅ Created assets/example_asset.txt")
|
||||
else:
|
||||
print("[OK] Created assets/")
|
||||
print("✅ Created assets/")
|
||||
|
||||
|
||||
def init_skill(skill_name, path, resources, include_examples):
|
||||
@@ -270,15 +270,15 @@ def init_skill(skill_name, path, resources, include_examples):
|
||||
|
||||
# Check if directory already exists
|
||||
if skill_dir.exists():
|
||||
print(f"[ERROR] Skill directory already exists: {skill_dir}")
|
||||
print(f"❌ Error: Skill directory already exists: {skill_dir}")
|
||||
return None
|
||||
|
||||
# Create skill directory
|
||||
try:
|
||||
skill_dir.mkdir(parents=True, exist_ok=False)
|
||||
print(f"[OK] Created skill directory: {skill_dir}")
|
||||
print(f"✅ Created skill directory: {skill_dir}")
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Error creating directory: {e}")
|
||||
print(f"❌ Error creating directory: {e}")
|
||||
return None
|
||||
|
||||
# Create SKILL.md from template
|
||||
@@ -288,9 +288,9 @@ def init_skill(skill_name, path, resources, include_examples):
|
||||
skill_md_path = skill_dir / "SKILL.md"
|
||||
try:
|
||||
skill_md_path.write_text(skill_content)
|
||||
print("[OK] Created SKILL.md")
|
||||
print("✅ Created SKILL.md")
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Error creating SKILL.md: {e}")
|
||||
print(f"❌ Error creating SKILL.md: {e}")
|
||||
return None
|
||||
|
||||
# Create resource directories if requested
|
||||
@@ -298,11 +298,11 @@ def init_skill(skill_name, path, resources, include_examples):
|
||||
try:
|
||||
create_resource_dirs(skill_dir, skill_name, skill_title, resources, include_examples)
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Error creating resource directories: {e}")
|
||||
print(f"❌ Error creating resource directories: {e}")
|
||||
return None
|
||||
|
||||
# Print next steps
|
||||
print(f"\n[OK] Skill '{skill_name}' initialized successfully at {skill_dir}")
|
||||
print(f"\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}")
|
||||
print("\nNext steps:")
|
||||
print("1. Edit SKILL.md to complete the TODO items and update the description")
|
||||
if resources:
|
||||
@@ -338,11 +338,11 @@ def main():
|
||||
raw_skill_name = args.skill_name
|
||||
skill_name = normalize_skill_name(raw_skill_name)
|
||||
if not skill_name:
|
||||
print("[ERROR] Skill name must include at least one letter or digit.")
|
||||
print("❌ Error: Skill name must include at least one letter or digit.")
|
||||
sys.exit(1)
|
||||
if len(skill_name) > MAX_SKILL_NAME_LENGTH:
|
||||
print(
|
||||
f"[ERROR] Skill name '{skill_name}' is too long ({len(skill_name)} characters). "
|
||||
f"❌ Error: Skill name '{skill_name}' is too long ({len(skill_name)} characters). "
|
||||
f"Maximum is {MAX_SKILL_NAME_LENGTH} characters."
|
||||
)
|
||||
sys.exit(1)
|
||||
@@ -351,12 +351,12 @@ def main():
|
||||
|
||||
resources = parse_resources(args.resources)
|
||||
if args.examples and not resources:
|
||||
print("[ERROR] --examples requires --resources to be set.")
|
||||
print("❌ Error: --examples requires --resources to be set.")
|
||||
sys.exit(1)
|
||||
|
||||
path = args.path
|
||||
|
||||
print(f"Initializing skill: {skill_name}")
|
||||
print(f"🚀 Initializing skill: {skill_name}")
|
||||
print(f" Location: {path}")
|
||||
if resources:
|
||||
print(f" Resources: {', '.join(resources)}")
|
||||
|
||||
@@ -32,27 +32,27 @@ def package_skill(skill_path, output_dir=None):
|
||||
|
||||
# Validate skill folder exists
|
||||
if not skill_path.exists():
|
||||
print(f"[ERROR] Skill folder not found: {skill_path}")
|
||||
print(f"❌ Error: Skill folder not found: {skill_path}")
|
||||
return None
|
||||
|
||||
if not skill_path.is_dir():
|
||||
print(f"[ERROR] Path is not a directory: {skill_path}")
|
||||
print(f"❌ Error: Path is not a directory: {skill_path}")
|
||||
return None
|
||||
|
||||
# Validate SKILL.md exists
|
||||
skill_md = skill_path / "SKILL.md"
|
||||
if not skill_md.exists():
|
||||
print(f"[ERROR] SKILL.md not found in {skill_path}")
|
||||
print(f"❌ Error: SKILL.md not found in {skill_path}")
|
||||
return None
|
||||
|
||||
# Run validation before packaging
|
||||
print("Validating skill...")
|
||||
print("🔍 Validating skill...")
|
||||
valid, message = validate_skill(skill_path)
|
||||
if not valid:
|
||||
print(f"[ERROR] Validation failed: {message}")
|
||||
print(f"❌ Validation failed: {message}")
|
||||
print(" Please fix the validation errors before packaging.")
|
||||
return None
|
||||
print(f"[OK] {message}\n")
|
||||
print(f"✅ {message}\n")
|
||||
|
||||
# Determine output location
|
||||
skill_name = skill_path.name
|
||||
@@ -75,11 +75,11 @@ def package_skill(skill_path, output_dir=None):
|
||||
zipf.write(file_path, arcname)
|
||||
print(f" Added: {arcname}")
|
||||
|
||||
print(f"\n[OK] Successfully packaged skill to: {skill_filename}")
|
||||
print(f"\n✅ Successfully packaged skill to: {skill_filename}")
|
||||
return skill_filename
|
||||
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Error creating .skill file: {e}")
|
||||
print(f"❌ Error creating .skill file: {e}")
|
||||
return None
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ def main():
|
||||
skill_path = sys.argv[1]
|
||||
output_dir = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
print(f"Packaging skill: {skill_path}")
|
||||
print(f"📦 Packaging skill: {skill_path}")
|
||||
if output_dir:
|
||||
print(f" Output directory: {output_dir}")
|
||||
print()
|
||||
|
||||
@@ -9,7 +9,7 @@ from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
MAX_SKILL_NAME_LENGTH = 64
|
||||
MAX_SKILL_NAME_LENGTH = 30
|
||||
|
||||
|
||||
def validate_skill(skill_path):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user