diff --git a/.codex/skills/codex-pr-body/SKILL.md b/.codex/skills/codex-pr-body/SKILL.md new file mode 100644 index 0000000000..76b37b8750 --- /dev/null +++ b/.codex/skills/codex-pr-body/SKILL.md @@ -0,0 +1,59 @@ +--- +name: codex-pr-body +description: Update the title and body of one or more pull requests. +--- + +## Determining the PR(s) + +When this skill is invoked, the PR(s) to update may be specified explicitly, but in the common case, the PR(s) to update will be inferred from the branch / commit that the user is currently working on. For ordinary Git usage (i.e., not Sapling as discussed below), you may have to use a combination of `git branch` and `gh pr view --repo openai/codex --json number --jq '.number'` to determine the PR associated with the current branch / commit. + +## PR Body Contents + +When invoked, use `gh` to edit the pull request body and title to reflect the contents of the specified PR. Make sure to check the existing pull request body to see if there is key information that should be preserved. For example, NEVER remove an image in the existing pull request body, as the author may have no way to recover it if you remove it. + +It is critically important to explain _why_ the change is being made. If the current conversation in which this skill is invoked has discussed the motivation, be sure to capture this in the pull request body. + +The body should also explain _what_ changed, but this should appear after the _why_. + +Limit discussion to the _net change_ of the commit. It is generally frowned upon to discuss changes that were attempted but later undone in the course of the development of the pull request. When rewriting the pull request body, you may need to eliminate details such as these when they are no longer appropriate / of interest to future readers. + +Avoid references to absolute paths on my local disk. When talking about a path that is within the repository, simply use the repo-relative path. + +It is generally helpful to discuss how the change was verified. That said, it is unnecessary to mention things that CI checks automatically, e.g., do not include "ran `just fmt`" as part of the test plan. Though identifying the new tests that were purposely introduced to verify the new behavior introduced by the pull request is often appropriate. + +Make use of Markdown to format the pull request professionally. Ensure "code things" appear in single backticks when referenced inline. Fenced code blocks are useful when referencing code or showing a shell transcript. Also, make use of GitHub permalinks when citing existing pieces of code that are relevant to the change. + +Make sure to reference any relevant pull requests or issues, though there should be no need to reference the pull request in its own PR body. + +If there is documentation that should be updated on https://developers.openai.com/codex as a result of this change, please note that in a separate section near the end of the pull request. Omit this section if there is no documentation that needs to be updated. + +## Working with Stacks + +Sometimes a pull request is composed of a stack of commits that build on one another. In these cases, the PR body should reflect the _net_ change introduced by the stack as a whole, rather than the individual commits that make up the stack. + +Similarly, sometimes a user may be using a tool like Sapling to leverage _stacked pull requests_, in which case the `base` of the PR may be the a branch that is the `head` of another PR in the stack rather than `main`. In this case, be sure to discuss only the net change between the `base` and `head` of the PR that is being opened against that stacked base, rather than the changes relative to `main`. + +## Sapling + +If `.git/sl/store` is present, then this Git repository is governed by Sapling SCM (https://sapling-scm.com). + +In Sapling, run the following to see if there is a GitHub pull request associated with the current revision: + +```shell +sl log --template '{github_pull_request_url}' -r . +``` + +Alternatively, you can run `sl sl` to see the current development branch and whether there is a GitHub pull request associated with the current commit. For example, if the output were: + +``` + @ cb032b31cf 72 minutes ago mbolin #11412 +╭─╯ tui: show non-file layer content in /debug-config +│ +o fdd0cd1de9 Today at 20:09 origin/main +│ +~ +``` + +- `@` indicates the current commit is `cb032b31cf` +- it is a development branch containing a single commit branched off of `origin/main` +- it is associated with GitHub pull request #11412 diff --git a/.github/actions/linux-code-sign/action.yml b/.github/actions/linux-code-sign/action.yml index 9eea95dfe1..12e521187f 100644 --- a/.github/actions/linux-code-sign/action.yml +++ b/.github/actions/linux-code-sign/action.yml @@ -12,7 +12,7 @@ runs: using: composite steps: - name: Install cosign - uses: sigstore/cosign-installer@v3.7.0 + uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0 - name: Cosign Linux artifacts shell: bash diff --git a/.github/actions/prepare-bazel-ci/action.yml b/.github/actions/prepare-bazel-ci/action.yml new file mode 100644 index 0000000000..598c6e4c16 --- /dev/null +++ b/.github/actions/prepare-bazel-ci/action.yml @@ -0,0 +1,44 @@ +name: prepare-bazel-ci +description: Prepare a Bazel CI job with shared setup, repository cache restore, and execution logs. +inputs: + target: + description: Target triple used for setup and cache namespacing. + required: true + install-test-prereqs: + description: Install Node.js and DotSlash for Bazel-backed test jobs. + required: false + default: "false" +outputs: + repository-cache-path: + description: Filesystem path used for the Bazel repository cache. + value: ${{ steps.setup_bazel.outputs.repository-cache-path }} + +runs: + using: composite + steps: + - name: Set up Bazel CI + id: setup_bazel + uses: ./.github/actions/setup-bazel-ci + with: + target: ${{ inputs.target }} + install-test-prereqs: ${{ inputs.install-test-prereqs }} + + # Restore the Bazel repository cache explicitly so external dependencies + # do not need to be re-downloaded on every CI run. Keep restore failures + # non-fatal so transient cache-service errors degrade to a cold build + # instead of failing the job. + - name: Restore bazel repository cache + id: cache_bazel_repository_restore + continue-on-error: true + uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5 + with: + path: ${{ steps.setup_bazel.outputs.repository-cache-path }} + key: bazel-cache-${{ inputs.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} + restore-keys: | + bazel-cache-${{ inputs.target }} + + - name: Set up Bazel execution logs + shell: bash + run: | + mkdir -p "${RUNNER_TEMP}/bazel-execution-logs" + echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}" diff --git a/.github/actions/setup-bazel-ci/action.yml b/.github/actions/setup-bazel-ci/action.yml index a7e3b322c2..7c605c60b7 100644 --- a/.github/actions/setup-bazel-ci/action.yml +++ b/.github/actions/setup-bazel-ci/action.yml @@ -18,7 +18,7 @@ runs: steps: - name: Set up Node.js for js_repl tests if: inputs.install-test-prereqs == 'true' - uses: actions/setup-node@v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version-file: codex-rs/node-version.txt @@ -26,7 +26,7 @@ runs: # See https://github.com/openai/codex/pull/7617. - name: Install DotSlash if: inputs.install-test-prereqs == 'true' - uses: facebook/install-dotslash@v2 + uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2 - name: Make DotSlash available in PATH (Unix) if: inputs.install-test-prereqs == 'true' && runner.os != 'Windows' @@ -39,7 +39,7 @@ runs: run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe" - name: Set up Bazel - uses: bazelbuild/setup-bazelisk@v3 + uses: bazelbuild/setup-bazelisk@b39c379c82683a5f25d34f0d062761f62693e0b2 # v3 - name: Configure Bazel repository cache id: configure_bazel_repository_cache diff --git a/.github/actions/setup-rusty-v8-musl/action.yml b/.github/actions/setup-rusty-v8-musl/action.yml new file mode 100644 index 0000000000..871c73a268 --- /dev/null +++ b/.github/actions/setup-rusty-v8-musl/action.yml @@ -0,0 +1,49 @@ +name: setup-rusty-v8-musl +description: Download and verify musl rusty_v8 artifacts for Cargo builds. +inputs: + target: + description: Rust musl target triple. + required: true + +runs: + using: composite + steps: + - name: Configure musl rusty_v8 artifact overrides and verify checksums + shell: bash + env: + TARGET: ${{ inputs.target }} + run: | + set -euo pipefail + + case "${TARGET}" in + x86_64-unknown-linux-musl|aarch64-unknown-linux-musl) + ;; + *) + echo "Unsupported musl rusty_v8 target: ${TARGET}" >&2 + exit 1 + ;; + esac + + version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)" + release_tag="rusty-v8-v${version}" + base_url="https://github.com/openai/codex/releases/download/${release_tag}" + binding_dir="${RUNNER_TEMP}/rusty_v8" + archive_path="${binding_dir}/librusty_v8_release_${TARGET}.a.gz" + binding_path="${binding_dir}/src_binding_release_${TARGET}.rs" + checksums_path="${binding_dir}/rusty_v8_release_${TARGET}.sha256" + checksums_source="${GITHUB_WORKSPACE}/third_party/v8/rusty_v8_${version//./_}.sha256" + + mkdir -p "${binding_dir}" + curl -fsSL "${base_url}/librusty_v8_release_${TARGET}.a.gz" -o "${archive_path}" + curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}" + grep -E " (librusty_v8_release_${TARGET}[.]a[.]gz|src_binding_release_${TARGET}[.]rs)$" \ + "${checksums_source}" > "${checksums_path}" + + if [[ "$(wc -l < "${checksums_path}")" -ne 2 ]]; then + echo "Expected exactly two checksums for ${TARGET} in ${checksums_source}" >&2 + exit 1 + fi + + (cd "${binding_dir}" && sha256sum -c "${checksums_path}") + echo "RUSTY_V8_ARCHIVE=${archive_path}" >> "${GITHUB_ENV}" + echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "${GITHUB_ENV}" diff --git a/.github/actions/windows-code-sign/action.yml b/.github/actions/windows-code-sign/action.yml index f6cf737912..b79c790f16 100644 --- a/.github/actions/windows-code-sign/action.yml +++ b/.github/actions/windows-code-sign/action.yml @@ -27,14 +27,14 @@ runs: using: composite steps: - name: Azure login for Trusted Signing (OIDC) - uses: azure/login@v2 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 with: client-id: ${{ inputs.client-id }} tenant-id: ${{ inputs.tenant-id }} subscription-id: ${{ inputs.subscription-id }} - name: Sign Windows binaries with Azure Trusted Signing - uses: azure/trusted-signing-action@v0 + uses: azure/trusted-signing-action@1d365fec12862c4aa68fcac418143d73f0cea293 # v0 with: endpoint: ${{ inputs.endpoint }} trusted-signing-account-name: ${{ inputs.account-name }} diff --git a/.github/scripts/run-bazel-ci.sh b/.github/scripts/run-bazel-ci.sh index 9c95fda157..a1d42a0576 100755 --- a/.github/scripts/run-bazel-ci.sh +++ b/.github/scripts/run-bazel-ci.sh @@ -69,12 +69,37 @@ print_bazel_test_log_tails() { local console_log="$1" local testlogs_dir local -a bazel_info_cmd=(bazel) + local -a bazel_info_args=(info) if (( ${#bazel_startup_args[@]} > 0 )); then bazel_info_cmd+=("${bazel_startup_args[@]}") fi - testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" info bazel-testlogs 2>/dev/null || echo bazel-testlogs)" + # `bazel info` needs the same CI config as the failed test invocation so + # platform-specific output roots match. On Windows, omitting `ci-windows` + # would point at `local_windows-fastbuild` even when the test ran with the + # MSVC host platform under `local_windows_msvc-fastbuild`. + if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then + bazel_info_args+=( + "--config=${ci_config}" + "--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}" + ) + fi + # Only pass flags that affect Bazel's output-root selection or repository + # lookup. Test/build-only flags such as execution logs or remote download + # mode can make `bazel info` fail, which would hide the real test log path. + for arg in "${post_config_bazel_args[@]}"; do + case "$arg" in + --host_platform=* | --repo_contents_cache=* | --repository_cache=*) + bazel_info_args+=("$arg") + ;; + esac + done + + testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" \ + --noexperimental_remote_repo_contents_cache \ + "${bazel_info_args[@]}" \ + bazel-testlogs 2>/dev/null || echo bazel-testlogs)" local failed_targets=() while IFS= read -r target; do @@ -92,8 +117,14 @@ print_bazel_test_log_tails() { for target in "${failed_targets[@]}"; do local rel_path="${target#//}" - rel_path="${rel_path/:/\/}" + rel_path="${rel_path/://}" local test_log="${testlogs_dir}/${rel_path}/test.log" + local reported_test_log + reported_test_log="$(grep -F "FAIL: ${target} " "$console_log" | sed -nE 's#.* \(see (.*[\\/]test\.log)\).*#\1#p' | head -n 1 || true)" + if [[ -n "$reported_test_log" ]]; then + reported_test_log="${reported_test_log//\\//}" + test_log="$reported_test_log" + fi echo "::group::Bazel test log tail for ${target}" if [[ -f "$test_log" ]]; then diff --git a/.github/scripts/rusty_v8_bazel.py b/.github/scripts/rusty_v8_bazel.py index c11e67263e..ec73e0e5a7 100644 --- a/.github/scripts/rusty_v8_bazel.py +++ b/.github/scripts/rusty_v8_bazel.py @@ -4,6 +4,7 @@ from __future__ import annotations import argparse import gzip +import hashlib import re import shutil import subprocess @@ -12,8 +13,16 @@ import tempfile import tomllib from pathlib import Path +from rusty_v8_module_bazel import ( + RustyV8ChecksumError, + check_module_bazel, + update_module_bazel, +) + ROOT = Path(__file__).resolve().parents[2] +MODULE_BAZEL = ROOT / "MODULE.bazel" +RUSTY_V8_CHECKSUMS_DIR = ROOT / "third_party" / "v8" MUSL_RUNTIME_ARCHIVE_LABELS = [ "@llvm//runtimes/libcxx:libcxx.static", "@llvm//runtimes/libcxx:libcxxabi.static", @@ -146,6 +155,24 @@ def resolved_v8_crate_version() -> str: return matches[0] +def rusty_v8_checksum_manifest_path(version: str) -> Path: + return RUSTY_V8_CHECKSUMS_DIR / f"rusty_v8_{version.replace('.', '_')}.sha256" + + +def command_version(version: str | None) -> str: + if version is not None: + return version + return resolved_v8_crate_version() + + +def command_manifest_path(manifest: Path | None, version: str) -> Path: + if manifest is None: + return rusty_v8_checksum_manifest_path(version) + if manifest.is_absolute(): + return manifest + return ROOT / manifest + + def staged_archive_name(target: str, source_path: Path) -> str: if source_path.suffix == ".lib": return f"rusty_v8_release_{target}.lib.gz" @@ -244,8 +271,18 @@ def stage_release_pair( shutil.copyfile(binding_path, staged_binding) + staged_checksums = output_dir / f"rusty_v8_release_{target}.sha256" + with staged_checksums.open("w", encoding="utf-8") as checksums: + for path in [staged_library, staged_binding]: + digest = hashlib.sha256() + with path.open("rb") as artifact: + for chunk in iter(lambda: artifact.read(1024 * 1024), b""): + digest.update(chunk) + checksums.write(f"{digest.hexdigest()} {path.name}\n") + print(staged_library) print(staged_binding) + print(staged_checksums) def parse_args() -> argparse.Namespace: @@ -264,6 +301,24 @@ def parse_args() -> argparse.Namespace: subparsers.add_parser("resolved-v8-crate-version") + check_module_bazel_parser = subparsers.add_parser("check-module-bazel") + check_module_bazel_parser.add_argument("--version") + check_module_bazel_parser.add_argument("--manifest", type=Path) + check_module_bazel_parser.add_argument( + "--module-bazel", + type=Path, + default=MODULE_BAZEL, + ) + + update_module_bazel_parser = subparsers.add_parser("update-module-bazel") + update_module_bazel_parser.add_argument("--version") + update_module_bazel_parser.add_argument("--manifest", type=Path) + update_module_bazel_parser.add_argument( + "--module-bazel", + type=Path, + default=MODULE_BAZEL, + ) + return parser.parse_args() @@ -280,6 +335,22 @@ def main() -> int: if args.command == "resolved-v8-crate-version": print(resolved_v8_crate_version()) return 0 + if args.command == "check-module-bazel": + version = command_version(args.version) + manifest_path = command_manifest_path(args.manifest, version) + try: + check_module_bazel(args.module_bazel, manifest_path, version) + except RustyV8ChecksumError as exc: + raise SystemExit(str(exc)) from exc + return 0 + if args.command == "update-module-bazel": + version = command_version(args.version) + manifest_path = command_manifest_path(args.manifest, version) + try: + update_module_bazel(args.module_bazel, manifest_path, version) + except RustyV8ChecksumError as exc: + raise SystemExit(str(exc)) from exc + return 0 raise SystemExit(f"unsupported command: {args.command}") diff --git a/.github/scripts/rusty_v8_module_bazel.py b/.github/scripts/rusty_v8_module_bazel.py new file mode 100644 index 0000000000..7f474fc5d2 --- /dev/null +++ b/.github/scripts/rusty_v8_module_bazel.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import re +from dataclasses import dataclass +from pathlib import Path + + +SHA256_RE = re.compile(r"[0-9a-f]{64}") +HTTP_FILE_BLOCK_RE = re.compile(r"(?ms)^http_file\(\n.*?^\)\n?") + + +class RustyV8ChecksumError(ValueError): + pass + + +@dataclass(frozen=True) +class RustyV8HttpFile: + start: int + end: int + block: str + name: str + downloaded_file_path: str + sha256: str | None + + +def parse_checksum_manifest(path: Path) -> dict[str, str]: + try: + lines = path.read_text(encoding="utf-8").splitlines() + except FileNotFoundError as exc: + raise RustyV8ChecksumError(f"missing checksum manifest: {path}") from exc + + checksums: dict[str, str] = {} + for line_number, line in enumerate(lines, 1): + if not line.strip(): + continue + parts = line.split() + if len(parts) != 2: + raise RustyV8ChecksumError( + f"{path}:{line_number}: expected ' '" + ) + checksum, filename = parts + if not SHA256_RE.fullmatch(checksum): + raise RustyV8ChecksumError( + f"{path}:{line_number}: invalid SHA-256 digest for {filename}" + ) + if not filename or filename in {".", ".."} or "/" in filename: + raise RustyV8ChecksumError( + f"{path}:{line_number}: expected a bare artifact filename" + ) + if filename in checksums: + raise RustyV8ChecksumError( + f"{path}:{line_number}: duplicate checksum for {filename}" + ) + checksums[filename] = checksum + + if not checksums: + raise RustyV8ChecksumError(f"empty checksum manifest: {path}") + return checksums + + +def string_field(block: str, field: str) -> str | None: + # Matches one-line string fields inside http_file blocks, e.g. `sha256 = "...",`. + match = re.search(rf'^\s*{re.escape(field)}\s*=\s*"([^"]+)",\s*$', block, re.M) + if match: + return match.group(1) + return None + + +def rusty_v8_http_files(module_bazel: str, version: str) -> list[RustyV8HttpFile]: + version_slug = version.replace(".", "_") + name_prefix = f"rusty_v8_{version_slug}_" + entries = [] + for match in HTTP_FILE_BLOCK_RE.finditer(module_bazel): + block = match.group(0) + name = string_field(block, "name") + if not name or not name.startswith(name_prefix): + continue + downloaded_file_path = string_field(block, "downloaded_file_path") + if not downloaded_file_path: + raise RustyV8ChecksumError( + f"MODULE.bazel {name} is missing downloaded_file_path" + ) + entries.append( + RustyV8HttpFile( + start=match.start(), + end=match.end(), + block=block, + name=name, + downloaded_file_path=downloaded_file_path, + sha256=string_field(block, "sha256"), + ) + ) + return entries + + +def module_entry_set_errors( + entries: list[RustyV8HttpFile], + checksums: dict[str, str], + version: str, +) -> list[str]: + errors = [] + if not entries: + errors.append(f"MODULE.bazel has no rusty_v8 http_file entries for {version}") + return errors + + module_files: dict[str, RustyV8HttpFile] = {} + duplicate_files = set() + for entry in entries: + if entry.downloaded_file_path in module_files: + duplicate_files.add(entry.downloaded_file_path) + module_files[entry.downloaded_file_path] = entry + + for filename in sorted(duplicate_files): + errors.append(f"MODULE.bazel has duplicate http_file entries for {filename}") + + for filename in sorted(set(module_files) - set(checksums)): + entry = module_files[filename] + errors.append(f"MODULE.bazel {entry.name} has no checksum in the manifest") + + for filename in sorted(set(checksums) - set(module_files)): + errors.append(f"manifest has {filename}, but MODULE.bazel has no http_file") + + return errors + + +def module_checksum_errors( + entries: list[RustyV8HttpFile], + checksums: dict[str, str], +) -> list[str]: + errors = [] + for entry in entries: + expected = checksums.get(entry.downloaded_file_path) + if expected is None: + continue + if entry.sha256 is None: + errors.append(f"MODULE.bazel {entry.name} is missing sha256") + elif entry.sha256 != expected: + errors.append( + f"MODULE.bazel {entry.name} has sha256 {entry.sha256}, " + f"expected {expected}" + ) + return errors + + +def raise_checksum_errors(message: str, errors: list[str]) -> None: + if errors: + formatted_errors = "\n".join(f"- {error}" for error in errors) + raise RustyV8ChecksumError(f"{message}:\n{formatted_errors}") + + +def check_module_bazel_text( + module_bazel: str, + checksums: dict[str, str], + version: str, +) -> None: + entries = rusty_v8_http_files(module_bazel, version) + errors = [ + *module_entry_set_errors(entries, checksums, version), + *module_checksum_errors(entries, checksums), + ] + raise_checksum_errors("rusty_v8 MODULE.bazel checksum drift", errors) + + +def block_with_sha256(block: str, checksum: str) -> str: + sha256_line_re = re.compile(r'(?m)^(\s*)sha256\s*=\s*"[0-9a-f]+",\s*$') + if sha256_line_re.search(block): + return sha256_line_re.sub( + lambda match: f'{match.group(1)}sha256 = "{checksum}",', + block, + count=1, + ) + + downloaded_file_path_match = re.search( + r'(?m)^(\s*)downloaded_file_path\s*=\s*"[^"]+",\n', + block, + ) + if not downloaded_file_path_match: + raise RustyV8ChecksumError("http_file block is missing downloaded_file_path") + insert_at = downloaded_file_path_match.end() + indent = downloaded_file_path_match.group(1) + return f'{block[:insert_at]}{indent}sha256 = "{checksum}",\n{block[insert_at:]}' + + +def update_module_bazel_text( + module_bazel: str, + checksums: dict[str, str], + version: str, +) -> str: + entries = rusty_v8_http_files(module_bazel, version) + errors = module_entry_set_errors(entries, checksums, version) + raise_checksum_errors("cannot update rusty_v8 MODULE.bazel checksums", errors) + + updated = [] + previous_end = 0 + for entry in entries: + updated.append(module_bazel[previous_end : entry.start]) + updated.append( + block_with_sha256(entry.block, checksums[entry.downloaded_file_path]) + ) + previous_end = entry.end + updated.append(module_bazel[previous_end:]) + return "".join(updated) + + +def check_module_bazel( + module_bazel_path: Path, + manifest_path: Path, + version: str, +) -> None: + checksums = parse_checksum_manifest(manifest_path) + module_bazel = module_bazel_path.read_text(encoding="utf-8") + check_module_bazel_text(module_bazel, checksums, version) + print(f"{module_bazel_path} rusty_v8 {version} checksums match {manifest_path}") + + +def update_module_bazel( + module_bazel_path: Path, + manifest_path: Path, + version: str, +) -> None: + checksums = parse_checksum_manifest(manifest_path) + module_bazel = module_bazel_path.read_text(encoding="utf-8") + updated_module_bazel = update_module_bazel_text(module_bazel, checksums, version) + if updated_module_bazel == module_bazel: + print(f"{module_bazel_path} rusty_v8 {version} checksums are already current") + return + module_bazel_path.write_text(updated_module_bazel, encoding="utf-8") + print(f"updated {module_bazel_path} rusty_v8 {version} checksums") diff --git a/.github/scripts/test_rusty_v8_bazel.py b/.github/scripts/test_rusty_v8_bazel.py new file mode 100644 index 0000000000..e86e82e8b2 --- /dev/null +++ b/.github/scripts/test_rusty_v8_bazel.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import textwrap +import unittest + +import rusty_v8_module_bazel + + +class RustyV8BazelTest(unittest.TestCase): + def test_update_module_bazel_replaces_and_inserts_sha256(self) -> None: + module_bazel = textwrap.dedent( + """\ + http_file( + name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive", + downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + sha256 = "0000000000000000000000000000000000000000000000000000000000000000", + urls = [ + "https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + ], + ) + + http_file( + name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding", + downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs", + urls = [ + "https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs", + ], + ) + + http_file( + name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive", + downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + urls = [ + "https://example.test/old.gz", + ], + ) + """ + ) + checksums = { + "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": ( + "1111111111111111111111111111111111111111111111111111111111111111" + ), + "src_binding_release_x86_64-unknown-linux-musl.rs": ( + "2222222222222222222222222222222222222222222222222222222222222222" + ), + } + + updated = rusty_v8_module_bazel.update_module_bazel_text( + module_bazel, + checksums, + "146.4.0", + ) + + self.assertEqual( + textwrap.dedent( + """\ + http_file( + name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive", + downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + sha256 = "1111111111111111111111111111111111111111111111111111111111111111", + urls = [ + "https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + ], + ) + + http_file( + name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding", + downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs", + sha256 = "2222222222222222222222222222222222222222222222222222222222222222", + urls = [ + "https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs", + ], + ) + + http_file( + name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive", + downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + urls = [ + "https://example.test/old.gz", + ], + ) + """ + ), + updated, + ) + rusty_v8_module_bazel.check_module_bazel_text(updated, checksums, "146.4.0") + + def test_check_module_bazel_rejects_manifest_drift(self) -> None: + module_bazel = textwrap.dedent( + """\ + http_file( + name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive", + downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + sha256 = "1111111111111111111111111111111111111111111111111111111111111111", + urls = [ + "https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + ], + ) + """ + ) + checksums = { + "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": ( + "1111111111111111111111111111111111111111111111111111111111111111" + ), + "orphan.gz": ( + "2222222222222222222222222222222222222222222222222222222222222222" + ), + } + + with self.assertRaisesRegex( + rusty_v8_module_bazel.RustyV8ChecksumError, + "manifest has orphan.gz", + ): + rusty_v8_module_bazel.check_module_bazel_text( + module_bazel, + checksums, + "146.4.0", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/.github/workflows/bazel.yml b/.github/workflows/bazel.yml index 2e14184800..0328ac2073 100644 --- a/.github/workflows/bazel.yml +++ b/.github/workflows/bazel.yml @@ -51,38 +51,24 @@ jobs: steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - - name: Set up Bazel CI - id: setup_bazel - uses: ./.github/actions/setup-bazel-ci + - name: Check rusty_v8 MODULE.bazel checksums + if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu' + shell: bash + run: | + python3 .github/scripts/rusty_v8_bazel.py check-module-bazel + python3 -m unittest discover -s .github/scripts -p test_rusty_v8_bazel.py + + - name: Prepare Bazel CI + id: prepare_bazel + uses: ./.github/actions/prepare-bazel-ci with: target: ${{ matrix.target }} install-test-prereqs: "true" - - # Restore the Bazel repository cache explicitly so external dependencies - # do not need to be re-downloaded on every CI run. Keep restore failures - # non-fatal so transient cache-service errors degrade to a cold build - # instead of failing the job. - - name: Restore bazel repository cache - id: cache_bazel_repository_restore - continue-on-error: true - uses: actions/cache/restore@v5 - with: - path: ${{ steps.setup_bazel.outputs.repository-cache-path }} - key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} - restore-keys: | - bazel-cache-${{ matrix.target }} - - name: Check MODULE.bazel.lock is up to date if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu' shell: bash run: ./scripts/check-module-bazel-lock.sh - - name: Set up Bazel execution logs - shell: bash - run: | - mkdir -p "${RUNNER_TEMP}/bazel-execution-logs" - echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}" - - name: bazel test //... env: BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }} @@ -100,17 +86,21 @@ jobs: --print-failed-test-logs --use-node-test-env ) + bazel_test_args=( + test + --test_tag_filters=-argument-comment-lint + --test_verbose_timeout_warnings + --build_metadata=COMMIT_SHA=${GITHUB_SHA} + ) if [[ "${RUNNER_OS}" == "Windows" ]]; then bazel_wrapper_args+=(--windows-msvc-host-platform) + bazel_test_args+=(--jobs=8) fi ./.github/scripts/run-bazel-ci.sh \ "${bazel_wrapper_args[@]}" \ -- \ - test \ - --test_tag_filters=-argument-comment-lint \ - --test_verbose_timeout_warnings \ - --build_metadata=COMMIT_SHA=${GITHUB_SHA} \ + "${bazel_test_args[@]}" \ -- \ "${bazel_targets[@]}" @@ -123,14 +113,14 @@ jobs: path: ${{ runner.temp }}/bazel-execution-logs if-no-files-found: ignore - # Save bazel repository cache explicitly; make non-fatal so cache uploading - # never fails the overall job. Only save when key wasn't hit. + # Save the Bazel repository cache after every non-cancelled run. Keep the + # upload non-fatal so cache service issues never fail the job itself. - name: Save bazel repository cache - if: always() && !cancelled() && steps.cache_bazel_repository_restore.outputs.cache-hit != 'true' + if: always() && !cancelled() continue-on-error: true uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5 with: - path: ${{ steps.setup_bazel.outputs.repository-cache-path }} + path: ${{ steps.prepare_bazel.outputs.repository-cache-path }} key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} clippy: @@ -155,32 +145,12 @@ jobs: steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - - name: Set up Bazel CI - id: setup_bazel - uses: ./.github/actions/setup-bazel-ci + - name: Prepare Bazel CI + id: prepare_bazel + uses: ./.github/actions/prepare-bazel-ci with: target: ${{ matrix.target }} - # Restore the Bazel repository cache explicitly so external dependencies - # do not need to be re-downloaded on every CI run. Keep restore failures - # non-fatal so transient cache-service errors degrade to a cold build - # instead of failing the job. - - name: Restore bazel repository cache - id: cache_bazel_repository_restore - continue-on-error: true - uses: actions/cache/restore@v5 - with: - path: ${{ steps.setup_bazel.outputs.repository-cache-path }} - key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} - restore-keys: | - bazel-cache-${{ matrix.target }} - - - name: Set up Bazel execution logs - shell: bash - run: | - mkdir -p "${RUNNER_TEMP}/bazel-execution-logs" - echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}" - - name: bazel build --config=clippy lint targets env: BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }} @@ -220,12 +190,94 @@ jobs: path: ${{ runner.temp }}/bazel-execution-logs if-no-files-found: ignore - # Save bazel repository cache explicitly; make non-fatal so cache uploading - # never fails the overall job. Only save when key wasn't hit. + # Save the Bazel repository cache after every non-cancelled run. Keep the + # upload non-fatal so cache service issues never fail the job itself. - name: Save bazel repository cache if: always() && !cancelled() continue-on-error: true uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5 with: - path: ${{ steps.setup_bazel.outputs.repository-cache-path }} + path: ${{ steps.prepare_bazel.outputs.repository-cache-path }} + key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} + + verify-release-build: + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-24.04 + target: x86_64-unknown-linux-gnu + - os: macos-15-xlarge + target: aarch64-apple-darwin + - os: windows-latest + target: x86_64-pc-windows-gnullvm + runs-on: ${{ matrix.os }} + name: Verify release build on ${{ matrix.os }} for ${{ matrix.target }} + + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Prepare Bazel CI + id: prepare_bazel + uses: ./.github/actions/prepare-bazel-ci + with: + target: ${{ matrix.target }} + + - name: bazel build verify-release-build targets + env: + BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }} + shell: bash + run: | + # This job exists to compile Rust code behind + # `cfg(not(debug_assertions))` so PR CI catches failures that would + # otherwise show up only in a release build. We do not need the full + # optimizer and debug-info work that normally comes with a release + # build to get that signal, so keep Bazel in `fastbuild` and disable + # Rust debug assertions explicitly. + bazel_wrapper_args=() + if [[ "${RUNNER_OS}" == "Windows" ]]; then + bazel_wrapper_args+=(--windows-msvc-host-platform) + fi + + bazel_build_args=( + --compilation_mode=fastbuild + --@rules_rust//rust/settings:extra_rustc_flag=-Cdebug-assertions=no + --@rules_rust//rust/settings:extra_exec_rustc_flag=-Cdebug-assertions=no + --build_metadata=COMMIT_SHA=${GITHUB_SHA} + --build_metadata=TAG_job=verify-release-build + --build_metadata=TAG_rust_debug_assertions=off + ) + + bazel_target_lines="$(bash ./scripts/list-bazel-release-targets.sh)" + bazel_targets=() + while IFS= read -r target; do + bazel_targets+=("${target}") + done <<< "${bazel_target_lines}" + + ./.github/scripts/run-bazel-ci.sh \ + "${bazel_wrapper_args[@]}" \ + -- \ + build \ + "${bazel_build_args[@]}" \ + -- \ + "${bazel_targets[@]}" + + - name: Upload Bazel execution logs + if: always() && !cancelled() + continue-on-error: true + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 + with: + name: bazel-execution-logs-verify-release-build-${{ matrix.target }} + path: ${{ runner.temp }}/bazel-execution-logs + if-no-files-found: ignore + + # Save the Bazel repository cache after every non-cancelled run. Keep the + # upload non-fatal so cache service issues never fail the job itself. + - name: Save bazel repository cache + if: always() && !cancelled() + continue-on-error: true + uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5 + with: + path: ${{ steps.prepare_bazel.outputs.repository-cache-path }} key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }} diff --git a/.github/workflows/rust-ci-full.yml b/.github/workflows/rust-ci-full.yml index 97fa33283e..7cda682952 100644 --- a/.github/workflows/rust-ci-full.yml +++ b/.github/workflows/rust-ci-full.yml @@ -43,6 +43,9 @@ jobs: argument_comment_lint_package: name: Argument comment lint package runs-on: ubuntu-24.04 + env: + CARGO_DYLINT_VERSION: 5.0.0 + DYLINT_LINK_VERSION: 5.0.0 steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0 @@ -59,10 +62,13 @@ jobs: ~/.cargo/registry/index ~/.cargo/registry/cache ~/.cargo/git/db - key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }} + key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }} - name: Install cargo-dylint tooling if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }} - run: cargo install --locked cargo-dylint dylint-link + shell: bash + run: | + cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION" + cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION" - name: Check Python wrapper syntax run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py - name: Test Python wrapper helpers @@ -415,22 +421,10 @@ jobs: echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV" - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }} - name: Configure musl rusty_v8 artifact overrides - env: - TARGET: ${{ matrix.target }} - shell: bash - run: | - set -euo pipefail - version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)" - release_tag="rusty-v8-v${version}" - base_url="https://github.com/openai/codex/releases/download/${release_tag}" - archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz" - binding_dir="${RUNNER_TEMP}/rusty_v8" - binding_path="${binding_dir}/src_binding_release_${TARGET}.rs" - mkdir -p "${binding_dir}" - curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}" - echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV" - echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV" + name: Configure musl rusty_v8 artifact overrides and verify checksums + uses: ./.github/actions/setup-rusty-v8-musl + with: + target: ${{ matrix.target }} - name: Install cargo-chef if: ${{ matrix.profile == 'release' }} @@ -670,6 +664,7 @@ jobs: export CODEX_TEST_REMOTE_ENV_CONTAINER_NAME=codex-remote-test-env source "${GITHUB_WORKSPACE}/scripts/test-remote-env.sh" echo "CODEX_TEST_REMOTE_ENV=${CODEX_TEST_REMOTE_ENV}" >> "$GITHUB_ENV" + echo "CODEX_TEST_REMOTE_EXEC_SERVER_URL=${CODEX_TEST_REMOTE_EXEC_SERVER_URL}" >> "$GITHUB_ENV" - name: tests id: test diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml index 3a9eadc8be..4da750b7e4 100644 --- a/.github/workflows/rust-ci.yml +++ b/.github/workflows/rust-ci.yml @@ -90,6 +90,9 @@ jobs: runs-on: ubuntu-24.04 needs: changed if: ${{ needs.changed.outputs.argument_comment_lint_package == 'true' }} + env: + CARGO_DYLINT_VERSION: 5.0.0 + DYLINT_LINK_VERSION: 5.0.0 steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0 @@ -113,10 +116,13 @@ jobs: ~/.cargo/registry/index ~/.cargo/registry/cache ~/.cargo/git/db - key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }} + key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }} - name: Install cargo-dylint tooling if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }} - run: cargo install --locked cargo-dylint dylint-link + shell: bash + run: | + cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION" + cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION" - name: Check Python wrapper syntax run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py - name: Test Python wrapper helpers diff --git a/.github/workflows/rust-release-argument-comment-lint.yml b/.github/workflows/rust-release-argument-comment-lint.yml index a6e88d8d3e..ba0d147d4f 100644 --- a/.github/workflows/rust-release-argument-comment-lint.yml +++ b/.github/workflows/rust-release-argument-comment-lint.yml @@ -19,6 +19,9 @@ jobs: name: Build - ${{ matrix.runner }} - ${{ matrix.target }} runs-on: ${{ matrix.runs_on || matrix.runner }} timeout-minutes: 60 + env: + CARGO_DYLINT_VERSION: 5.0.0 + DYLINT_LINK_VERSION: 5.0.0 strategy: fail-fast: false @@ -65,8 +68,8 @@ jobs: shell: bash run: | install_root="${RUNNER_TEMP}/argument-comment-lint-tools" - cargo install --locked cargo-dylint --root "$install_root" - cargo install --locked dylint-link + cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION" --root "$install_root" + cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION" echo "INSTALL_ROOT=$install_root" >> "$GITHUB_ENV" - name: Cargo build diff --git a/.github/workflows/rust-release.yml b/.github/workflows/rust-release.yml index 30e16c417d..efd3dd11eb 100644 --- a/.github/workflows/rust-release.yml +++ b/.github/workflows/rust-release.yml @@ -211,22 +211,10 @@ jobs: echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV" - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }} - name: Configure musl rusty_v8 artifact overrides - env: - TARGET: ${{ matrix.target }} - shell: bash - run: | - set -euo pipefail - version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)" - release_tag="rusty-v8-v${version}" - base_url="https://github.com/openai/codex/releases/download/${release_tag}" - archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz" - binding_dir="${RUNNER_TEMP}/rusty_v8" - binding_path="${binding_dir}/src_binding_release_${TARGET}.rs" - mkdir -p "${binding_dir}" - curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}" - echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV" - echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV" + name: Configure musl rusty_v8 artifact overrides and verify checksums + uses: ./.github/actions/setup-rusty-v8-musl + with: + target: ${{ matrix.target }} - name: Cargo build shell: bash diff --git a/.github/workflows/rusty-v8-release.yml b/.github/workflows/rusty-v8-release.yml index d06fe0ae88..29e7b3b1ae 100644 --- a/.github/workflows/rusty-v8-release.yml +++ b/.github/workflows/rusty-v8-release.yml @@ -78,7 +78,7 @@ jobs: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - name: Set up Bazel - uses: bazelbuild/setup-bazelisk@6ecf4fd8b7d1f9721785f1dd656a689acf9add47 # v3 + uses: bazelbuild/setup-bazelisk@b39c379c82683a5f25d34f0d062761f62693e0b2 # v3 - name: Set up Python uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6 diff --git a/.github/workflows/v8-canary.yml b/.github/workflows/v8-canary.yml index 0dc7dc0054..f5aa1d7c67 100644 --- a/.github/workflows/v8-canary.yml +++ b/.github/workflows/v8-canary.yml @@ -75,7 +75,7 @@ jobs: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - name: Set up Bazel - uses: bazelbuild/setup-bazelisk@6ecf4fd8b7d1f9721785f1dd656a689acf9add47 # v3 + uses: bazelbuild/setup-bazelisk@b39c379c82683a5f25d34f0d062761f62693e0b2 # v3 - name: Set up Python uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6 diff --git a/MODULE.bazel b/MODULE.bazel index 42875b40f0..04c5c69ebd 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -423,6 +423,7 @@ http_archive( http_file( name = "rusty_v8_146_4_0_aarch64_apple_darwin_archive", downloaded_file_path = "librusty_v8_release_aarch64-apple-darwin.a.gz", + sha256 = "bfe2c9be32a56c28546f0f965825ee68fbf606405f310cc4e17b448a568cf98a", urls = [ "https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-apple-darwin.a.gz", ], @@ -431,6 +432,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_aarch64_unknown_linux_gnu_archive", downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-gnu.a.gz", + sha256 = "dbf165b07c81bdb054bc046b43d23e69fcf7bcc1a4c1b5b4776983a71062ecd8", urls = [ "https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-unknown-linux-gnu.a.gz", ], @@ -439,6 +441,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_aarch64_pc_windows_msvc_archive", downloaded_file_path = "rusty_v8_release_aarch64-pc-windows-msvc.lib.gz", + sha256 = "ed13363659c6d08583ac8fdc40493445c5767d8b94955a4d5d7bb8d5a81f6bf8", urls = [ "https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_aarch64-pc-windows-msvc.lib.gz", ], @@ -447,6 +450,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_x86_64_apple_darwin_archive", downloaded_file_path = "librusty_v8_release_x86_64-apple-darwin.a.gz", + sha256 = "630cd240f1bbecdb071417dc18387ab81cf67c549c1c515a0b4fcf9eba647bb7", urls = [ "https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-apple-darwin.a.gz", ], @@ -455,6 +459,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive", downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", + sha256 = "e64b4d99e4ae293a2e846244a89b80178ba10382c13fb591c1fa6968f5291153", urls = [ "https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz", ], @@ -463,6 +468,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_x86_64_pc_windows_msvc_archive", downloaded_file_path = "rusty_v8_release_x86_64-pc-windows-msvc.lib.gz", + sha256 = "90a9a2346acd3685a355e98df85c24dbe406cb124367d16259a4b5d522621862", urls = [ "https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_x86_64-pc-windows-msvc.lib.gz", ], @@ -471,6 +477,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_archive", downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-musl.a.gz", + sha256 = "27a08ed26c34297bfd93e514692ccc44b85f8b15c6aa39cf34e784f84fb37e8e", urls = [ "https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_aarch64-unknown-linux-musl.a.gz", ], @@ -479,6 +486,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_binding", downloaded_file_path = "src_binding_release_aarch64-unknown-linux-musl.rs", + sha256 = "09f8900ced8297c229246c7a50b2e0ec23c54d0a554f369619cc29863f38dd1a", urls = [ "https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_aarch64-unknown-linux-musl.rs", ], @@ -487,6 +495,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_archive", downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-musl.a.gz", + sha256 = "20d8271ad712323d352c1383c36e3c4b755abc41ece35819c49c75ec7134d2f8", urls = [ "https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_x86_64-unknown-linux-musl.a.gz", ], @@ -495,6 +504,7 @@ http_file( http_file( name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding", downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs", + sha256 = "09f8900ced8297c229246c7a50b2e0ec23c54d0a554f369619cc29863f38dd1a", urls = [ "https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_x86_64-unknown-linux-musl.rs", ], diff --git a/MODULE.bazel.lock b/MODULE.bazel.lock index 0e2232fcbf..7a1acc3816 100644 --- a/MODULE.bazel.lock +++ b/MODULE.bazel.lock @@ -663,6 +663,7 @@ "bitflags_1.3.2": "{\"dependencies\":[{\"name\":\"compiler_builtins\",\"optional\":true,\"req\":\"^0.1.2\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"walkdir\",\"req\":\"^2.3\"}],\"features\":{\"default\":[],\"example_generated\":[],\"rustc-dep-of-std\":[\"core\",\"compiler_builtins\"]}}", "bitflags_2.10.0": "{\"dependencies\":[{\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1.0\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"arbitrary\",\"req\":\"^1.0\"},{\"name\":\"bytemuck\",\"optional\":true,\"req\":\"^1.12\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"bytemuck\",\"req\":\"^1.12.2\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"serde_core\",\"optional\":true,\"req\":\"^1.0.228\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde_lib\",\"package\":\"serde\",\"req\":\"^1.0.103\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0.19\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.18\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"zerocopy\",\"req\":\"^0.8\"}],\"features\":{\"example_generated\":[],\"serde\":[\"serde_core\"],\"std\":[]}}", "bitflags_2.11.0": "{\"dependencies\":[{\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1.0\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"arbitrary\",\"req\":\"^1.0\"},{\"name\":\"bytemuck\",\"optional\":true,\"req\":\"^1.12\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"bytemuck\",\"req\":\"^1.12.2\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"serde_core\",\"optional\":true,\"req\":\"^1.0.228\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde_lib\",\"package\":\"serde\",\"req\":\"^1.0.103\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0.19\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.18\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"zerocopy\",\"req\":\"^0.8\"}],\"features\":{\"example_generated\":[],\"serde\":[\"serde_core\"],\"std\":[]}}", + "blake2_0.10.6": "{\"dependencies\":[{\"features\":[\"mac\"],\"name\":\"digest\",\"req\":\"^0.10.3\"},{\"features\":[\"dev\"],\"kind\":\"dev\",\"name\":\"digest\",\"req\":\"^0.10.3\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.2.2\"}],\"features\":{\"default\":[\"std\"],\"reset\":[],\"simd\":[],\"simd_asm\":[\"simd_opt\"],\"simd_opt\":[\"simd\"],\"size_opt\":[],\"std\":[\"digest/std\"]}}", "block-buffer_0.10.4": "{\"dependencies\":[{\"name\":\"generic-array\",\"req\":\"^0.14\"}],\"features\":{}}", "block-padding_0.3.3": "{\"dependencies\":[{\"name\":\"generic-array\",\"req\":\"^0.14\"}],\"features\":{\"std\":[]}}", "block2_0.6.2": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"std\"],\"name\":\"objc2\",\"req\":\">=0.6.2, <0.8.0\"}],\"features\":{\"alloc\":[],\"compiler-rt\":[\"objc2/unstable-compiler-rt\"],\"default\":[\"std\"],\"gnustep-1-7\":[\"objc2/gnustep-1-7\"],\"gnustep-1-8\":[\"gnustep-1-7\",\"objc2/gnustep-1-8\"],\"gnustep-1-9\":[\"gnustep-1-8\",\"objc2/gnustep-1-9\"],\"gnustep-2-0\":[\"gnustep-1-9\",\"objc2/gnustep-2-0\"],\"gnustep-2-1\":[\"gnustep-2-0\",\"objc2/gnustep-2-1\"],\"std\":[\"alloc\"],\"unstable-coerce-pointee\":[],\"unstable-objfw\":[],\"unstable-private\":[],\"unstable-winobjc\":[\"gnustep-1-8\"]}}", @@ -752,6 +753,8 @@ "crossterm_winapi_0.9.1": "{\"dependencies\":[{\"features\":[\"winbase\",\"consoleapi\",\"processenv\",\"handleapi\",\"synchapi\",\"impl-default\"],\"name\":\"winapi\",\"req\":\"^0.3.8\",\"target\":\"cfg(windows)\"}],\"features\":{}}", "crunchy_0.2.4": "{\"dependencies\":[],\"features\":{\"default\":[\"limit_128\"],\"limit_1024\":[],\"limit_128\":[],\"limit_2048\":[],\"limit_256\":[],\"limit_512\":[],\"limit_64\":[],\"std\":[]}}", "crypto-common_0.1.7": "{\"dependencies\":[{\"features\":[\"more_lengths\"],\"name\":\"generic-array\",\"req\":\"=0.14.7\"},{\"name\":\"rand_core\",\"optional\":true,\"req\":\"^0.6\"},{\"name\":\"typenum\",\"req\":\"^1.14\"}],\"features\":{\"getrandom\":[\"rand_core/getrandom\"],\"std\":[]}}", + "crypto_box_0.9.1": "{\"dependencies\":[{\"default_features\":false,\"name\":\"aead\",\"req\":\"^0.5.2\"},{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"blake2\",\"optional\":true,\"req\":\"^0.10\"},{\"name\":\"chacha20\",\"optional\":true,\"req\":\"^0.9\"},{\"default_features\":false,\"name\":\"crypto_secretbox\",\"req\":\"^0.1.1\"},{\"default_features\":false,\"features\":[\"zeroize\"],\"name\":\"curve25519-dalek\",\"req\":\"^4\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.4\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8\"},{\"kind\":\"dev\",\"name\":\"rmp-serde\",\"req\":\"^1\"},{\"name\":\"salsa20\",\"optional\":true,\"req\":\"^0.10\"},{\"default_features\":false,\"name\":\"serdect\",\"optional\":true,\"req\":\"^0.2\"},{\"default_features\":false,\"name\":\"subtle\",\"req\":\"^2\"},{\"default_features\":false,\"name\":\"zeroize\",\"req\":\"^1\"}],\"features\":{\"alloc\":[\"aead/alloc\"],\"chacha20\":[\"dep:chacha20\",\"crypto_secretbox/chacha20\"],\"default\":[\"alloc\",\"getrandom\",\"salsa20\"],\"getrandom\":[\"aead/getrandom\",\"rand_core\"],\"heapless\":[\"aead/heapless\"],\"rand_core\":[\"aead/rand_core\"],\"salsa20\":[\"dep:salsa20\",\"crypto_secretbox/salsa20\"],\"seal\":[\"dep:blake2\",\"alloc\"],\"serde\":[\"dep:serdect\"],\"std\":[\"aead/std\"]}}", + "crypto_secretbox_0.1.1": "{\"dependencies\":[{\"default_features\":false,\"name\":\"aead\",\"req\":\"^0.5\"},{\"features\":[\"zeroize\"],\"name\":\"chacha20\",\"optional\":true,\"req\":\"^0.9\"},{\"default_features\":false,\"name\":\"cipher\",\"req\":\"^0.4\"},{\"default_features\":false,\"features\":[\"zeroize\"],\"name\":\"generic-array\",\"req\":\"^0.14.7\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.4\"},{\"name\":\"poly1305\",\"req\":\"^0.8\"},{\"features\":[\"zeroize\"],\"name\":\"salsa20\",\"optional\":true,\"req\":\"^0.10\"},{\"default_features\":false,\"name\":\"subtle\",\"req\":\"^2\"},{\"default_features\":false,\"name\":\"zeroize\",\"req\":\"^1\"}],\"features\":{\"alloc\":[\"aead/alloc\"],\"default\":[\"alloc\",\"getrandom\",\"salsa20\"],\"getrandom\":[\"aead/getrandom\",\"rand_core\"],\"heapless\":[\"aead/heapless\"],\"rand_core\":[\"aead/rand_core\"],\"std\":[\"aead/std\",\"alloc\"],\"stream\":[\"aead/stream\"]}}", "csv-core_0.1.13": "{\"dependencies\":[{\"default_features\":false,\"kind\":\"dev\",\"name\":\"arrayvec\",\"req\":\"^0.5\"},{\"default_features\":false,\"name\":\"memchr\",\"req\":\"^2\"}],\"features\":{\"default\":[],\"libc\":[\"memchr/libc\"]}}", "csv_1.4.0": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"alloc\",\"serde\"],\"kind\":\"dev\",\"name\":\"bstr\",\"req\":\"^1.7.0\"},{\"name\":\"csv-core\",\"req\":\"^0.1.11\"},{\"name\":\"itoa\",\"req\":\"^1\"},{\"name\":\"ryu\",\"req\":\"^1\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0.221\"},{\"name\":\"serde_core\",\"req\":\"^1.0.221\"}],\"features\":{}}", "ctor-proc-macro_0.0.7": "{\"dependencies\":[],\"features\":{\"default\":[]}}", @@ -782,6 +785,7 @@ "debugid_0.8.0": "{\"dependencies\":[{\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.85\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.37\"},{\"name\":\"uuid\",\"req\":\"^1.0.0\"}],\"features\":{}}", "debugserver-types_0.5.0": "{\"dependencies\":[{\"name\":\"schemafy\",\"req\":\"^0.5.0\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"req\":\"^1.0\"},{\"name\":\"serde_json\",\"req\":\"^1.0\"}],\"features\":{}}", "deflate64_0.1.10": "{\"dependencies\":[{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"bytemuck\",\"req\":\"^1.13.1\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1.2.0\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.7.1\"}],\"features\":{}}", + "deno_core_icudata_0.77.0": "{\"dependencies\":[],\"features\":{}}", "der-parser_10.0.0": "{\"dependencies\":[{\"name\":\"asn1-rs\",\"req\":\"^0.7\"},{\"name\":\"bitvec\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"cookie-factory\",\"optional\":true,\"req\":\"^0.3.0\"},{\"default_features\":false,\"name\":\"displaydoc\",\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.4\"},{\"name\":\"nom\",\"req\":\"^7.0\"},{\"name\":\"num-bigint\",\"optional\":true,\"req\":\"^0.4\"},{\"name\":\"num-traits\",\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"pretty_assertions\",\"req\":\"^1.0\"},{\"name\":\"rusticata-macros\",\"req\":\"^4.0\"},{\"kind\":\"dev\",\"name\":\"test-case\",\"req\":\"^3.0\"}],\"features\":{\"as_bitvec\":[\"bitvec\"],\"bigint\":[\"num-bigint\"],\"default\":[\"std\"],\"serialize\":[\"std\",\"cookie-factory\"],\"std\":[],\"unstable\":[]}}", "der_0.7.10": "{\"dependencies\":[{\"features\":[\"derive\"],\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1.3\"},{\"default_features\":false,\"name\":\"bytes\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"const-oid\",\"optional\":true,\"req\":\"^0.9.2\"},{\"name\":\"der_derive\",\"optional\":true,\"req\":\"^0.7.2\"},{\"name\":\"flagset\",\"optional\":true,\"req\":\"^0.4.3\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.4.1\"},{\"features\":[\"alloc\"],\"name\":\"pem-rfc7468\",\"optional\":true,\"req\":\"^0.7\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"time\",\"optional\":true,\"req\":\"^0.3.4\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1.5\"}],\"features\":{\"alloc\":[\"zeroize?/alloc\"],\"arbitrary\":[\"dep:arbitrary\",\"const-oid?/arbitrary\",\"std\"],\"bytes\":[\"dep:bytes\",\"alloc\"],\"derive\":[\"dep:der_derive\"],\"oid\":[\"dep:const-oid\"],\"pem\":[\"dep:pem-rfc7468\",\"alloc\",\"zeroize\"],\"real\":[],\"std\":[\"alloc\"]}}", "deranged_0.5.5": "{\"dependencies\":[{\"name\":\"deranged-macros\",\"optional\":true,\"req\":\"=0.3.0\"},{\"default_features\":false,\"name\":\"num-traits\",\"optional\":true,\"req\":\"^0.2.15\"},{\"default_features\":false,\"name\":\"powerfmt\",\"optional\":true,\"req\":\"^0.2.0\"},{\"default_features\":false,\"name\":\"quickcheck\",\"optional\":true,\"req\":\"^1.0.3\"},{\"default_features\":false,\"name\":\"rand08\",\"optional\":true,\"package\":\"rand\",\"req\":\"^0.8.4\"},{\"kind\":\"dev\",\"name\":\"rand08\",\"package\":\"rand\",\"req\":\"^0.8.4\"},{\"default_features\":false,\"name\":\"rand09\",\"optional\":true,\"package\":\"rand\",\"req\":\"^0.9.0\"},{\"kind\":\"dev\",\"name\":\"rand09\",\"package\":\"rand\",\"req\":\"^0.9.0\"},{\"default_features\":false,\"name\":\"serde_core\",\"optional\":true,\"req\":\"^1.0.220\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.86\"}],\"features\":{\"alloc\":[],\"default\":[],\"macros\":[\"dep:deranged-macros\"],\"num\":[\"dep:num-traits\"],\"powerfmt\":[\"dep:powerfmt\"],\"quickcheck\":[\"dep:quickcheck\",\"alloc\"],\"rand\":[\"rand08\",\"rand09\"],\"rand08\":[\"dep:rand08\"],\"rand09\":[\"dep:rand09\"],\"serde\":[\"dep:serde_core\"]}}", @@ -818,6 +822,8 @@ "dylint_linting_5.0.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"assert_cmd\",\"req\":\"^2.0\"},{\"name\":\"cargo_metadata\",\"req\":\"^0.23\"},{\"features\":[\"config\"],\"name\":\"dylint_internal\",\"req\":\"=5.0.0\"},{\"name\":\"paste\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"rustc_version\",\"req\":\"^0.4\"},{\"name\":\"rustversion\",\"req\":\"^1.0\"},{\"name\":\"serde\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.23\"},{\"name\":\"thiserror\",\"req\":\"^2.0\"},{\"name\":\"toml\",\"req\":\"^0.9\"},{\"kind\":\"build\",\"name\":\"toml\",\"req\":\"^0.9\"}],\"features\":{\"constituent\":[]}}", "dylint_testing_5.0.0": "{\"dependencies\":[{\"name\":\"anyhow\",\"req\":\"^1.0\"},{\"name\":\"cargo_metadata\",\"req\":\"^0.23\"},{\"name\":\"compiletest_rs\",\"req\":\"^0.11\"},{\"name\":\"dylint\",\"req\":\"=5.0.0\"},{\"name\":\"dylint_internal\",\"req\":\"=5.0.0\"},{\"name\":\"env_logger\",\"req\":\"^0.11\"},{\"name\":\"once_cell\",\"req\":\"^1.21\"},{\"name\":\"regex\",\"req\":\"^1.11\"},{\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"name\":\"tempfile\",\"req\":\"^3.23\"}],\"features\":{\"default\":[],\"deny_warnings\":[]}}", "dyn-clone_1.0.20": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0\"},{\"features\":[\"diff\"],\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.66\"}],\"features\":{}}", + "ed25519-dalek_2.2.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"blake2\",\"req\":\"^0.10\"},{\"features\":[\"html_reports\"],\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5\"},{\"default_features\":false,\"features\":[\"digest\"],\"name\":\"curve25519-dalek\",\"req\":\"^4\"},{\"default_features\":false,\"features\":[\"digest\",\"rand_core\"],\"kind\":\"dev\",\"name\":\"curve25519-dalek\",\"req\":\"^4\"},{\"default_features\":false,\"name\":\"ed25519\",\"req\":\">=2.2, <2.3\"},{\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"^0.4\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.4\"},{\"default_features\":false,\"name\":\"merlin\",\"optional\":true,\"req\":\"^3\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8\"},{\"default_features\":false,\"name\":\"rand_core\",\"optional\":true,\"req\":\"^0.6.4\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"rand_core\",\"req\":\"^0.6.4\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"sha2\",\"req\":\"^0.10\"},{\"kind\":\"dev\",\"name\":\"sha3\",\"req\":\"^0.10\"},{\"default_features\":false,\"name\":\"signature\",\"optional\":true,\"req\":\">=2.0, <2.3\"},{\"default_features\":false,\"name\":\"subtle\",\"req\":\"^2.3.0\"},{\"kind\":\"dev\",\"name\":\"toml\",\"req\":\"^0.7\"},{\"default_features\":false,\"features\":[\"static_secrets\"],\"kind\":\"dev\",\"name\":\"x25519-dalek\",\"req\":\"^2\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1.5\"}],\"features\":{\"alloc\":[\"curve25519-dalek/alloc\",\"ed25519/alloc\",\"serde?/alloc\",\"zeroize/alloc\"],\"asm\":[\"sha2/asm\"],\"batch\":[\"alloc\",\"merlin\",\"rand_core\"],\"default\":[\"fast\",\"std\",\"zeroize\"],\"digest\":[\"signature/digest\"],\"fast\":[\"curve25519-dalek/precomputed-tables\"],\"hazmat\":[],\"legacy_compatibility\":[\"curve25519-dalek/legacy_compatibility\"],\"pem\":[\"alloc\",\"ed25519/pem\",\"pkcs8\"],\"pkcs8\":[\"ed25519/pkcs8\"],\"rand_core\":[\"dep:rand_core\"],\"serde\":[\"dep:serde\",\"ed25519/serde\"],\"std\":[\"alloc\",\"ed25519/std\",\"serde?/std\",\"sha2/std\"],\"zeroize\":[\"dep:zeroize\",\"curve25519-dalek/zeroize\"]}}", + "ed25519_2.2.3": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1\"},{\"features\":[\"rand_core\"],\"kind\":\"dev\",\"name\":\"ed25519-dalek\",\"req\":\"^2\"},{\"kind\":\"dev\",\"name\":\"hex-literal\",\"req\":\"^0.4\"},{\"name\":\"pkcs8\",\"optional\":true,\"req\":\"^0.10\"},{\"features\":[\"std\"],\"kind\":\"dev\",\"name\":\"rand_core\",\"req\":\"^0.6\"},{\"default_features\":false,\"features\":[\"signature\"],\"kind\":\"dev\",\"name\":\"ring-compat\",\"req\":\"^0.8\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"serde_bytes\",\"optional\":true,\"req\":\"^0.11\"},{\"default_features\":false,\"name\":\"signature\",\"req\":\"^2\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"alloc\":[\"pkcs8?/alloc\"],\"default\":[\"std\"],\"pem\":[\"alloc\",\"pkcs8/pem\"],\"serde_bytes\":[\"serde\",\"dep:serde_bytes\"],\"std\":[\"pkcs8?/std\",\"signature/std\"]}}", "either_1.15.0": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"alloc\",\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.95\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.0\"}],\"features\":{\"default\":[\"std\"],\"std\":[],\"use_std\":[\"std\"]}}", "ena_0.14.3": "{\"dependencies\":[{\"name\":\"dogged\",\"optional\":true,\"req\":\"^0.2.0\"},{\"name\":\"log\",\"req\":\"^0.4\"}],\"features\":{\"bench\":[],\"persistent\":[\"dogged\"]}}", "encode_unicode_1.0.0": "{\"dependencies\":[{\"default_features\":false,\"name\":\"ascii\",\"optional\":true,\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"lazy_static\",\"req\":\"^1.0\",\"target\":\"cfg(unix)\"},{\"features\":[\"https-native\"],\"kind\":\"dev\",\"name\":\"minreq\",\"req\":\"^2.6\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}", @@ -903,8 +909,8 @@ "git+https://github.com/juberti-oai/rust-sdks.git?rev=e2d1d1d230c6fc9df171ccb181423f957bb3c1f0#e2d1d1d230c6fc9df171ccb181423f957bb3c1f0_livekit-runtime": "{\"dependencies\":[{\"default_features\":true,\"features\":[],\"name\":\"async-io\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"async-std\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"async-task\",\"optional\":true},{\"name\":\"futures\",\"optional\":true},{\"default_features\":false,\"features\":[\"net\",\"rt\",\"rt-multi-thread\",\"time\"],\"name\":\"tokio\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"tokio-stream\",\"optional\":true}],\"features\":{\"async\":[\"dep:async-std\",\"dep:futures\",\"dep:async-io\"],\"default\":[\"tokio\"],\"dispatcher\":[\"dep:futures\",\"dep:async-io\",\"dep:async-std\",\"dep:async-task\"],\"tokio\":[\"dep:tokio\",\"dep:tokio-stream\"]},\"strip_prefix\":\"livekit-runtime\"}", "git+https://github.com/juberti-oai/rust-sdks.git?rev=e2d1d1d230c6fc9df171ccb181423f957bb3c1f0#e2d1d1d230c6fc9df171ccb181423f957bb3c1f0_webrtc-sys": "{\"dependencies\":[{\"name\":\"cxx\"},{\"name\":\"log\"},{\"kind\":\"build\",\"name\":\"cc\"},{\"kind\":\"build\",\"name\":\"cxx-build\"},{\"kind\":\"build\",\"name\":\"glob\"},{\"kind\":\"build\",\"name\":\"pkg-config\"},{\"default_features\":true,\"features\":[],\"kind\":\"build\",\"name\":\"webrtc-sys-build\",\"optional\":false}],\"features\":{\"default\":[]},\"strip_prefix\":\"webrtc-sys\"}", "git+https://github.com/juberti-oai/rust-sdks.git?rev=e2d1d1d230c6fc9df171ccb181423f957bb3c1f0#e2d1d1d230c6fc9df171ccb181423f957bb3c1f0_webrtc-sys-build": "{\"dependencies\":[{\"name\":\"anyhow\"},{\"name\":\"fs2\"},{\"name\":\"regex\"},{\"default_features\":false,\"features\":[\"rustls-tls-native-roots\",\"blocking\"],\"name\":\"reqwest\",\"optional\":false},{\"name\":\"scratch\"},{\"name\":\"semver\"},{\"name\":\"zip\"}],\"features\":{},\"strip_prefix\":\"webrtc-sys/build\"}", - "git+https://github.com/nornagon/crossterm?branch=nornagon%2Fcolor-query#87db8bfa6dc99427fd3b071681b07fc31c6ce995_crossterm": "{\"dependencies\":[{\"default_features\":true,\"features\":[],\"name\":\"bitflags\",\"optional\":false},{\"default_features\":false,\"features\":[],\"name\":\"futures-core\",\"optional\":true},{\"name\":\"parking_lot\"},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"filedescriptor\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":false,\"features\":[],\"name\":\"libc\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[\"os-poll\"],\"name\":\"mio\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":false,\"features\":[\"std\",\"stdio\",\"termios\"],\"name\":\"rustix\",\"optional\":false,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[],\"name\":\"signal-hook\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[\"support-v1_0\"],\"name\":\"signal-hook-mio\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[],\"name\":\"crossterm_winapi\",\"optional\":true,\"target\":\"cfg(windows)\"},{\"default_features\":true,\"features\":[\"winuser\",\"winerror\"],\"name\":\"winapi\",\"optional\":true,\"target\":\"cfg(windows)\"}],\"features\":{\"bracketed-paste\":[],\"default\":[\"bracketed-paste\",\"windows\",\"events\"],\"event-stream\":[\"dep:futures-core\",\"events\"],\"events\":[\"dep:mio\",\"dep:signal-hook\",\"dep:signal-hook-mio\"],\"serde\":[\"dep:serde\",\"bitflags/serde\"],\"use-dev-tty\":[\"filedescriptor\",\"rustix/process\"],\"windows\":[\"dep:winapi\",\"dep:crossterm_winapi\"]},\"strip_prefix\":\"\"}", - "git+https://github.com/nornagon/ratatui?branch=nornagon-v0.29.0-patch#9b2ad1298408c45918ee9f8241a6f95498cdbed2_ratatui": "{\"dependencies\":[{\"name\":\"bitflags\"},{\"name\":\"cassowary\"},{\"name\":\"compact_str\"},{\"default_features\":true,\"features\":[],\"name\":\"crossterm\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"document-features\",\"optional\":true},{\"name\":\"indoc\"},{\"name\":\"instability\"},{\"name\":\"itertools\"},{\"name\":\"lru\"},{\"default_features\":true,\"features\":[],\"name\":\"palette\",\"optional\":true},{\"name\":\"paste\"},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"strum\",\"optional\":false},{\"default_features\":true,\"features\":[],\"name\":\"termwiz\",\"optional\":true},{\"default_features\":true,\"features\":[\"local-offset\"],\"name\":\"time\",\"optional\":true},{\"name\":\"unicode-segmentation\"},{\"name\":\"unicode-truncate\"},{\"name\":\"unicode-width\"},{\"default_features\":true,\"features\":[],\"name\":\"termion\",\"optional\":true,\"target\":\"cfg(not(windows))\"}],\"features\":{\"all-widgets\":[\"widget-calendar\"],\"crossterm\":[\"dep:crossterm\"],\"default\":[\"crossterm\",\"underline-color\"],\"macros\":[],\"palette\":[\"dep:palette\"],\"scrolling-regions\":[],\"serde\":[\"dep:serde\",\"bitflags/serde\",\"compact_str/serde\"],\"termion\":[\"dep:termion\"],\"termwiz\":[\"dep:termwiz\"],\"underline-color\":[\"dep:crossterm\"],\"unstable\":[\"unstable-rendered-line-info\",\"unstable-widget-ref\",\"unstable-backend-writer\"],\"unstable-backend-writer\":[],\"unstable-rendered-line-info\":[],\"unstable-widget-ref\":[],\"widget-calendar\":[\"dep:time\"]},\"strip_prefix\":\"\"}", + "git+https://github.com/nornagon/crossterm?rev=87db8bfa6dc99427fd3b071681b07fc31c6ce995#87db8bfa6dc99427fd3b071681b07fc31c6ce995_crossterm": "{\"dependencies\":[{\"default_features\":true,\"features\":[],\"name\":\"bitflags\",\"optional\":false},{\"default_features\":false,\"features\":[],\"name\":\"futures-core\",\"optional\":true},{\"name\":\"parking_lot\"},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"filedescriptor\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":false,\"features\":[],\"name\":\"libc\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[\"os-poll\"],\"name\":\"mio\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":false,\"features\":[\"std\",\"stdio\",\"termios\"],\"name\":\"rustix\",\"optional\":false,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[],\"name\":\"signal-hook\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[\"support-v1_0\"],\"name\":\"signal-hook-mio\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[],\"name\":\"crossterm_winapi\",\"optional\":true,\"target\":\"cfg(windows)\"},{\"default_features\":true,\"features\":[\"winuser\",\"winerror\"],\"name\":\"winapi\",\"optional\":true,\"target\":\"cfg(windows)\"}],\"features\":{\"bracketed-paste\":[],\"default\":[\"bracketed-paste\",\"windows\",\"events\"],\"event-stream\":[\"dep:futures-core\",\"events\"],\"events\":[\"dep:mio\",\"dep:signal-hook\",\"dep:signal-hook-mio\"],\"serde\":[\"dep:serde\",\"bitflags/serde\"],\"use-dev-tty\":[\"filedescriptor\",\"rustix/process\"],\"windows\":[\"dep:winapi\",\"dep:crossterm_winapi\"]},\"strip_prefix\":\"\"}", + "git+https://github.com/nornagon/ratatui?rev=9b2ad1298408c45918ee9f8241a6f95498cdbed2#9b2ad1298408c45918ee9f8241a6f95498cdbed2_ratatui": "{\"dependencies\":[{\"name\":\"bitflags\"},{\"name\":\"cassowary\"},{\"name\":\"compact_str\"},{\"default_features\":true,\"features\":[],\"name\":\"crossterm\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"document-features\",\"optional\":true},{\"name\":\"indoc\"},{\"name\":\"instability\"},{\"name\":\"itertools\"},{\"name\":\"lru\"},{\"default_features\":true,\"features\":[],\"name\":\"palette\",\"optional\":true},{\"name\":\"paste\"},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"strum\",\"optional\":false},{\"default_features\":true,\"features\":[],\"name\":\"termwiz\",\"optional\":true},{\"default_features\":true,\"features\":[\"local-offset\"],\"name\":\"time\",\"optional\":true},{\"name\":\"unicode-segmentation\"},{\"name\":\"unicode-truncate\"},{\"name\":\"unicode-width\"},{\"default_features\":true,\"features\":[],\"name\":\"termion\",\"optional\":true,\"target\":\"cfg(not(windows))\"}],\"features\":{\"all-widgets\":[\"widget-calendar\"],\"crossterm\":[\"dep:crossterm\"],\"default\":[\"crossterm\",\"underline-color\"],\"macros\":[],\"palette\":[\"dep:palette\"],\"scrolling-regions\":[],\"serde\":[\"dep:serde\",\"bitflags/serde\",\"compact_str/serde\"],\"termion\":[\"dep:termion\"],\"termwiz\":[\"dep:termwiz\"],\"underline-color\":[\"dep:crossterm\"],\"unstable\":[\"unstable-rendered-line-info\",\"unstable-widget-ref\",\"unstable-backend-writer\"],\"unstable-backend-writer\":[],\"unstable-rendered-line-info\":[],\"unstable-widget-ref\":[],\"widget-calendar\":[\"dep:time\"]},\"strip_prefix\":\"\"}", "git+https://github.com/openai-oss-forks/tokio-tungstenite?rev=132f5b39c862e3a970f731d709608b3e6276d5f6#132f5b39c862e3a970f731d709608b3e6276d5f6_tokio-tungstenite": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"sink\",\"std\"],\"name\":\"futures-util\",\"optional\":false},{\"name\":\"log\"},{\"default_features\":true,\"features\":[],\"name\":\"native-tls-crate\",\"optional\":true,\"package\":\"native-tls\"},{\"default_features\":false,\"features\":[],\"name\":\"rustls\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-native-certs\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-pki-types\",\"optional\":true},{\"default_features\":false,\"features\":[\"io-util\"],\"name\":\"tokio\",\"optional\":false},{\"default_features\":true,\"features\":[],\"name\":\"tokio-native-tls\",\"optional\":true},{\"default_features\":false,\"features\":[],\"name\":\"tokio-rustls\",\"optional\":true},{\"default_features\":false,\"features\":[],\"name\":\"tungstenite\",\"optional\":false},{\"default_features\":true,\"features\":[],\"name\":\"webpki-roots\",\"optional\":true}],\"features\":{\"__rustls-tls\":[\"rustls\",\"rustls-pki-types\",\"tokio-rustls\",\"stream\",\"tungstenite/__rustls-tls\",\"handshake\"],\"connect\":[\"stream\",\"tokio/net\",\"handshake\"],\"default\":[\"connect\",\"handshake\"],\"handshake\":[\"tungstenite/handshake\"],\"native-tls\":[\"native-tls-crate\",\"tokio-native-tls\",\"stream\",\"tungstenite/native-tls\",\"handshake\"],\"native-tls-vendored\":[\"native-tls\",\"native-tls-crate/vendored\",\"tungstenite/native-tls-vendored\"],\"proxy\":[\"tungstenite/proxy\",\"tokio/net\",\"handshake\"],\"rustls-tls-native-roots\":[\"__rustls-tls\",\"rustls-native-certs\"],\"rustls-tls-webpki-roots\":[\"__rustls-tls\",\"webpki-roots\"],\"stream\":[],\"url\":[\"tungstenite/url\"]},\"strip_prefix\":\"\"}", "git+https://github.com/openai-oss-forks/tungstenite-rs?rev=9200079d3b54a1ff51072e24d81fd354f085156f#9200079d3b54a1ff51072e24d81fd354f085156f_tungstenite": "{\"dependencies\":[{\"name\":\"bytes\"},{\"default_features\":true,\"features\":[],\"name\":\"data-encoding\",\"optional\":true},{\"default_features\":false,\"features\":[\"zlib\"],\"name\":\"flate2\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"headers\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"http\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"httparse\",\"optional\":true},{\"name\":\"log\"},{\"default_features\":true,\"features\":[],\"name\":\"native-tls-crate\",\"optional\":true,\"package\":\"native-tls\"},{\"name\":\"rand\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"rustls\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-native-certs\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-pki-types\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"sha1\",\"optional\":true},{\"name\":\"thiserror\"},{\"default_features\":true,\"features\":[],\"name\":\"url\",\"optional\":true},{\"name\":\"utf-8\"},{\"default_features\":true,\"features\":[],\"name\":\"webpki-roots\",\"optional\":true}],\"features\":{\"__rustls-tls\":[\"rustls\",\"rustls-pki-types\"],\"default\":[\"handshake\"],\"deflate\":[\"headers\",\"flate2\"],\"handshake\":[\"data-encoding\",\"headers\",\"httparse\",\"sha1\"],\"headers\":[\"http\",\"dep:headers\"],\"native-tls\":[\"native-tls-crate\"],\"native-tls-vendored\":[\"native-tls\",\"native-tls-crate/vendored\"],\"proxy\":[\"handshake\"],\"rustls-tls-native-roots\":[\"__rustls-tls\",\"rustls-native-certs\"],\"rustls-tls-webpki-roots\":[\"__rustls-tls\",\"webpki-roots\"],\"url\":[\"dep:url\"]},\"strip_prefix\":\"\"}", "git+https://github.com/rust-lang/rust-clippy?rev=20ce69b9a63bcd2756cd906fe0964d1e901e042a#20ce69b9a63bcd2756cd906fe0964d1e901e042a_clippy_utils": "{\"dependencies\":[{\"default_features\":false,\"features\":[],\"name\":\"arrayvec\",\"optional\":false},{\"name\":\"itertools\"},{\"name\":\"rustc_apfloat\"},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":false}],\"features\":{},\"strip_prefix\":\"clippy_utils\"}", @@ -1200,9 +1206,11 @@ "process-wrap_9.0.1": "{\"dependencies\":[{\"name\":\"futures\",\"optional\":true,\"req\":\"^0.3.30\"},{\"name\":\"indexmap\",\"req\":\"^2.9.0\"},{\"default_features\":false,\"features\":[\"fs\",\"poll\",\"signal\"],\"name\":\"nix\",\"optional\":true,\"req\":\"^0.30.1\",\"target\":\"cfg(unix)\"},{\"kind\":\"dev\",\"name\":\"remoteprocess\",\"req\":\"^0.5.0\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.20.0\"},{\"features\":[\"io-util\",\"macros\",\"process\",\"rt\"],\"name\":\"tokio\",\"optional\":true,\"req\":\"^1.38.2\"},{\"features\":[\"io-util\",\"macros\",\"process\",\"rt\",\"rt-multi-thread\",\"time\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.38.2\"},{\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.40\"},{\"name\":\"windows\",\"optional\":true,\"req\":\"^0.62.2\",\"target\":\"cfg(windows)\"}],\"features\":{\"creation-flags\":[\"dep:windows\",\"windows/Win32_System_Threading\"],\"default\":[\"creation-flags\",\"job-object\",\"kill-on-drop\",\"process-group\",\"process-session\",\"tracing\"],\"job-object\":[\"dep:windows\",\"windows/Win32_Security\",\"windows/Win32_System_Diagnostics_ToolHelp\",\"windows/Win32_System_IO\",\"windows/Win32_System_JobObjects\",\"windows/Win32_System_Threading\"],\"kill-on-drop\":[],\"process-group\":[],\"process-session\":[\"process-group\"],\"reset-sigmask\":[],\"std\":[\"dep:nix\"],\"tokio1\":[\"dep:nix\",\"dep:futures\",\"dep:tokio\"],\"tracing\":[\"dep:tracing\"]}}", "proptest_1.9.0": "{\"dependencies\":[{\"name\":\"bit-set\",\"optional\":true,\"req\":\"^0.8.0\"},{\"name\":\"bit-vec\",\"optional\":true,\"req\":\"^0.8.0\"},{\"name\":\"bitflags\",\"req\":\"^2.9\"},{\"default_features\":false,\"name\":\"num-traits\",\"req\":\"^0.2.15\"},{\"name\":\"proptest-macro\",\"optional\":true,\"req\":\"^0.4.0\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"rand\",\"req\":\"^0.9\"},{\"default_features\":false,\"name\":\"rand_chacha\",\"req\":\"^0.9\"},{\"name\":\"rand_xorshift\",\"req\":\"^0.4\"},{\"kind\":\"dev\",\"name\":\"regex\",\"req\":\"^1.0\"},{\"name\":\"regex-syntax\",\"optional\":true,\"req\":\"^0.8\"},{\"default_features\":false,\"name\":\"rusty-fork\",\"optional\":true,\"req\":\"^0.3.0\"},{\"name\":\"tempfile\",\"optional\":true,\"req\":\"^3.0\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"=1.0.112\"},{\"name\":\"unarray\",\"req\":\"^0.1.4\"},{\"name\":\"x86\",\"optional\":true,\"req\":\"^0.52.0\"}],\"features\":{\"alloc\":[],\"atomic64bit\":[],\"attr-macro\":[\"proptest-macro\"],\"bit-set\":[\"dep:bit-set\",\"dep:bit-vec\"],\"default\":[\"std\",\"fork\",\"timeout\",\"bit-set\"],\"default-code-coverage\":[\"std\",\"fork\",\"timeout\",\"bit-set\"],\"fork\":[\"std\",\"rusty-fork\",\"tempfile\"],\"handle-panics\":[\"std\"],\"hardware-rng\":[\"x86\"],\"no_std\":[\"num-traits/libm\"],\"std\":[\"rand/std\",\"rand/os_rng\",\"regex-syntax\",\"num-traits/std\"],\"timeout\":[\"fork\",\"rusty-fork/timeout\"],\"unstable\":[]}}", "prost-build_0.12.6": "{\"dependencies\":[{\"default_features\":false,\"name\":\"bytes\",\"req\":\"^1\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.10\"},{\"name\":\"heck\",\"req\":\">=0.4, <=0.5\"},{\"default_features\":false,\"features\":[\"use_alloc\"],\"name\":\"itertools\",\"req\":\">=0.10, <=0.12\"},{\"name\":\"log\",\"req\":\"^0.4.4\"},{\"default_features\":false,\"name\":\"multimap\",\"req\":\">=0.8, <=0.10\"},{\"name\":\"once_cell\",\"req\":\"^1.17.1\"},{\"default_features\":false,\"name\":\"petgraph\",\"req\":\"^0.6\"},{\"name\":\"prettyplease\",\"optional\":true,\"req\":\"^0.2\"},{\"default_features\":false,\"name\":\"prost\",\"req\":\"^0.12.6\"},{\"default_features\":false,\"name\":\"prost-types\",\"req\":\"^0.12.6\"},{\"default_features\":false,\"name\":\"pulldown-cmark\",\"optional\":true,\"req\":\"^0.9.1\"},{\"name\":\"pulldown-cmark-to-cmark\",\"optional\":true,\"req\":\"^10.0.1\"},{\"default_features\":false,\"features\":[\"std\",\"unicode-bool\"],\"name\":\"regex\",\"req\":\"^1.8.1\"},{\"features\":[\"full\"],\"name\":\"syn\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"tempfile\",\"req\":\"^3\"}],\"features\":{\"cleanup-markdown\":[\"dep:pulldown-cmark\",\"dep:pulldown-cmark-to-cmark\"],\"default\":[\"format\"],\"format\":[\"dep:prettyplease\",\"dep:syn\"]}}", + "prost-build_0.14.3": "{\"dependencies\":[{\"default_features\":false,\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.11\"},{\"name\":\"heck\",\"req\":\">=0.4, <=0.5\"},{\"default_features\":false,\"features\":[\"use_alloc\"],\"name\":\"itertools\",\"req\":\">=0.10, <=0.14\"},{\"name\":\"log\",\"req\":\"^0.4.4\"},{\"default_features\":false,\"name\":\"multimap\",\"req\":\">=0.8, <=0.10\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"petgraph\",\"req\":\"^0.8\"},{\"name\":\"prettyplease\",\"optional\":true,\"req\":\"^0.2\"},{\"default_features\":false,\"name\":\"prost\",\"req\":\"^0.14.3\"},{\"default_features\":false,\"name\":\"prost-types\",\"req\":\"^0.14.3\"},{\"default_features\":false,\"name\":\"pulldown-cmark\",\"optional\":true,\"req\":\"^0.13\"},{\"name\":\"pulldown-cmark-to-cmark\",\"optional\":true,\"req\":\"^22\"},{\"default_features\":false,\"features\":[\"std\",\"unicode-bool\"],\"name\":\"regex\",\"req\":\"^1.8.1\"},{\"features\":[\"full\"],\"name\":\"syn\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"tempfile\",\"req\":\"^3\"}],\"features\":{\"cleanup-markdown\":[\"dep:pulldown-cmark\",\"dep:pulldown-cmark-to-cmark\"],\"default\":[\"format\"],\"format\":[\"dep:prettyplease\",\"dep:syn\"]}}", "prost-derive_0.12.6": "{\"dependencies\":[{\"name\":\"anyhow\",\"req\":\"^1.0.1\"},{\"default_features\":false,\"features\":[\"use_alloc\"],\"name\":\"itertools\",\"req\":\">=0.10, <=0.12\"},{\"name\":\"proc-macro2\",\"req\":\"^1\"},{\"name\":\"quote\",\"req\":\"^1\"},{\"features\":[\"extra-traits\"],\"name\":\"syn\",\"req\":\"^2\"}],\"features\":{}}", "prost-derive_0.14.3": "{\"dependencies\":[{\"name\":\"anyhow\",\"req\":\"^1.0.1\"},{\"name\":\"itertools\",\"req\":\">=0.10.1, <=0.14\"},{\"name\":\"proc-macro2\",\"req\":\"^1.0.60\"},{\"name\":\"quote\",\"req\":\"^1\"},{\"features\":[\"extra-traits\"],\"name\":\"syn\",\"req\":\"^2\"}],\"features\":{}}", "prost-types_0.12.6": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"prost-derive\"],\"name\":\"prost\",\"req\":\"^0.12.6\"}],\"features\":{\"default\":[\"std\"],\"std\":[\"prost/std\"]}}", + "prost-types_0.14.3": "{\"dependencies\":[{\"features\":[\"derive\"],\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1.4\"},{\"default_features\":false,\"name\":\"chrono\",\"optional\":true,\"req\":\"^0.4.34\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"derive\"],\"name\":\"prost\",\"req\":\"^0.14.3\"}],\"features\":{\"arbitrary\":[\"dep:arbitrary\"],\"default\":[\"std\"],\"std\":[\"prost/std\"]}}", "prost_0.12.6": "{\"dependencies\":[{\"default_features\":false,\"name\":\"bytes\",\"req\":\"^1\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.4\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"name\":\"prost-derive\",\"optional\":true,\"req\":\"^0.12.6\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8\"}],\"features\":{\"default\":[\"derive\",\"std\"],\"derive\":[\"dep:prost-derive\"],\"no-recursion-limit\":[],\"prost-derive\":[\"derive\"],\"std\":[]}}", "prost_0.14.3": "{\"dependencies\":[{\"default_features\":false,\"name\":\"bytes\",\"req\":\"^1\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.7\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"name\":\"prost-derive\",\"optional\":true,\"req\":\"^0.14.3\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.9\"}],\"features\":{\"default\":[\"derive\",\"std\"],\"derive\":[\"dep:prost-derive\"],\"no-recursion-limit\":[],\"std\":[]}}", "psl-types_2.0.11": "{\"dependencies\":[],\"features\":{}}", @@ -1238,7 +1246,7 @@ "rama-unix_0.3.0-alpha.4": "{\"dependencies\":[{\"name\":\"pin-project-lite\",\"req\":\"^0.2\"},{\"name\":\"rama-core\",\"req\":\"^0.3.0-alpha.4\"},{\"name\":\"rama-net\",\"req\":\"^0.3.0-alpha.4\"},{\"features\":[\"macros\",\"net\"],\"name\":\"tokio\",\"req\":\"^1.48\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.48\"}],\"features\":{\"default\":[]}}", "rama-utils_0.3.0-alpha.4": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"ahash\",\"req\":\"^0.8\"},{\"name\":\"const_format\",\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"loom\",\"req\":\"^0.7\",\"target\":\"cfg(loom)\"},{\"name\":\"parking_lot\",\"req\":\"^0.12\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"quickcheck\",\"req\":\"^1.0\"},{\"name\":\"rama-macros\",\"req\":\"^0.3.0-alpha.4\"},{\"name\":\"regex\",\"req\":\"^1.12\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1\"},{\"features\":[\"write\",\"serde\",\"const_generics\",\"const_new\"],\"name\":\"smallvec\",\"req\":\"^1.15\"},{\"name\":\"smol_str\",\"req\":\"^0.3\"},{\"features\":[\"time\",\"macros\"],\"name\":\"tokio\",\"req\":\"^1.48\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0\"},{\"name\":\"wildcard\",\"req\":\"^0.3\"}],\"features\":{}}", "rand_0.8.5": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.2.1\"},{\"default_features\":false,\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.22\",\"target\":\"cfg(unix)\"},{\"name\":\"log\",\"optional\":true,\"req\":\"^0.4.4\"},{\"features\":[\"into_bits\"],\"name\":\"packed_simd\",\"optional\":true,\"package\":\"packed_simd_2\",\"req\":\"^0.3.7\"},{\"default_features\":false,\"name\":\"rand_chacha\",\"optional\":true,\"req\":\"^0.3.0\"},{\"name\":\"rand_core\",\"req\":\"^0.6.0\"},{\"kind\":\"dev\",\"name\":\"rand_pcg\",\"req\":\"^0.3.0\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.103\"}],\"features\":{\"alloc\":[\"rand_core/alloc\"],\"default\":[\"std\",\"std_rng\"],\"getrandom\":[\"rand_core/getrandom\"],\"min_const_gen\":[],\"nightly\":[],\"serde1\":[\"serde\",\"rand_core/serde1\"],\"simd_support\":[\"packed_simd\"],\"small_rng\":[],\"std\":[\"rand_core/std\",\"rand_chacha/std\",\"alloc\",\"getrandom\",\"libc\"],\"std_rng\":[\"rand_chacha\"]}}", - "rand_0.9.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.2.1\"},{\"name\":\"log\",\"optional\":true,\"req\":\"^0.4.4\"},{\"default_features\":false,\"name\":\"rand_chacha\",\"optional\":true,\"req\":\"^0.9.0\"},{\"default_features\":false,\"name\":\"rand_core\",\"req\":\"^0.9.0\"},{\"kind\":\"dev\",\"name\":\"rand_pcg\",\"req\":\"^0.9.0\"},{\"kind\":\"dev\",\"name\":\"rayon\",\"req\":\"^1.7\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.103\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.140\"}],\"features\":{\"alloc\":[],\"default\":[\"std\",\"std_rng\",\"os_rng\",\"small_rng\",\"thread_rng\"],\"log\":[\"dep:log\"],\"nightly\":[],\"os_rng\":[\"rand_core/os_rng\"],\"serde\":[\"dep:serde\",\"rand_core/serde\"],\"simd_support\":[],\"small_rng\":[],\"std\":[\"rand_core/std\",\"rand_chacha?/std\",\"alloc\"],\"std_rng\":[\"dep:rand_chacha\"],\"thread_rng\":[\"std\",\"std_rng\",\"os_rng\"],\"unbiased\":[]}}", + "rand_0.9.3": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.2.1\"},{\"default_features\":false,\"name\":\"rand_chacha\",\"optional\":true,\"req\":\"^0.9.0\"},{\"default_features\":false,\"name\":\"rand_core\",\"req\":\"^0.9.0\"},{\"kind\":\"dev\",\"name\":\"rand_pcg\",\"req\":\"^0.9.0\"},{\"kind\":\"dev\",\"name\":\"rayon\",\"req\":\"^1.7\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.103\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.140\"}],\"features\":{\"alloc\":[],\"default\":[\"std\",\"std_rng\",\"os_rng\",\"small_rng\",\"thread_rng\"],\"log\":[],\"nightly\":[],\"os_rng\":[\"rand_core/os_rng\"],\"serde\":[\"dep:serde\",\"rand_core/serde\"],\"simd_support\":[],\"small_rng\":[],\"std\":[\"rand_core/std\",\"rand_chacha?/std\",\"alloc\"],\"std_rng\":[\"dep:rand_chacha\"],\"thread_rng\":[\"std\",\"std_rng\",\"os_rng\"],\"unbiased\":[]}}", "rand_chacha_0.3.1": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"simd\"],\"name\":\"ppv-lite86\",\"req\":\"^0.2.8\"},{\"name\":\"rand_core\",\"req\":\"^0.6.0\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"}],\"features\":{\"default\":[\"std\"],\"serde1\":[\"serde\"],\"simd\":[],\"std\":[\"ppv-lite86/std\"]}}", "rand_chacha_0.9.0": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"simd\"],\"name\":\"ppv-lite86\",\"req\":\"^0.2.14\"},{\"name\":\"rand_core\",\"req\":\"^0.9.0\"},{\"features\":[\"os_rng\"],\"kind\":\"dev\",\"name\":\"rand_core\",\"req\":\"^0.9.0\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"}],\"features\":{\"default\":[\"std\"],\"os_rng\":[\"rand_core/os_rng\"],\"serde\":[\"dep:serde\"],\"std\":[\"ppv-lite86/std\",\"rand_core/std\"]}}", "rand_core_0.6.4": "{\"dependencies\":[{\"name\":\"getrandom\",\"optional\":true,\"req\":\"^0.2\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"alloc\":[],\"serde1\":[\"serde\"],\"std\":[\"alloc\",\"getrandom\",\"getrandom/std\"]}}", @@ -1286,7 +1294,7 @@ "rustix_1.1.4": "{\"dependencies\":[{\"default_features\":false,\"name\":\"bitflags\",\"req\":\"^2.4.0\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.4\",\"target\":\"cfg(all(criterion, not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"kind\":\"dev\",\"name\":\"flate2\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.182\",\"target\":\"cfg(all(not(windows), any(rustix_use_libc, miri, not(all(target_os = \\\"linux\\\", any(target_endian = \\\"little\\\", any(target_arch = \\\"s390x\\\", target_arch = \\\"powerpc\\\")), any(target_arch = \\\"arm\\\", all(target_arch = \\\"aarch64\\\", target_pointer_width = \\\"64\\\"), target_arch = \\\"riscv64\\\", all(rustix_use_experimental_asm, target_arch = \\\"powerpc\\\"), all(rustix_use_experimental_asm, target_arch = \\\"powerpc64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"s390x\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips32r6\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64r6\\\"), target_arch = \\\"x86\\\", all(target_arch = \\\"x86_64\\\", target_pointer_width = \\\"64\\\")))))))\"},{\"default_features\":false,\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.182\",\"target\":\"cfg(all(not(rustix_use_libc), not(miri), target_os = \\\"linux\\\", any(target_endian = \\\"little\\\", any(target_arch = \\\"s390x\\\", target_arch = \\\"powerpc\\\")), any(target_arch = \\\"arm\\\", all(target_arch = \\\"aarch64\\\", target_pointer_width = \\\"64\\\"), target_arch = \\\"riscv64\\\", all(rustix_use_experimental_asm, target_arch = \\\"powerpc\\\"), all(rustix_use_experimental_asm, target_arch = \\\"powerpc64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"s390x\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips32r6\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64r6\\\"), target_arch = \\\"x86\\\", all(target_arch = \\\"x86_64\\\", target_pointer_width = \\\"64\\\"))))\"},{\"kind\":\"dev\",\"name\":\"libc\",\"req\":\"^0.2.171\"},{\"default_features\":false,\"name\":\"libc_errno\",\"package\":\"errno\",\"req\":\"^0.3.10\",\"target\":\"cfg(all(not(windows), any(rustix_use_libc, miri, not(all(target_os = \\\"linux\\\", any(target_endian = \\\"little\\\", any(target_arch = \\\"s390x\\\", target_arch = \\\"powerpc\\\")), any(target_arch = \\\"arm\\\", all(target_arch = \\\"aarch64\\\", target_pointer_width = \\\"64\\\"), target_arch = \\\"riscv64\\\", all(rustix_use_experimental_asm, target_arch = \\\"powerpc\\\"), all(rustix_use_experimental_asm, target_arch = \\\"powerpc64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"s390x\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips32r6\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64r6\\\"), target_arch = \\\"x86\\\", all(target_arch = \\\"x86_64\\\", target_pointer_width = \\\"64\\\")))))))\"},{\"default_features\":false,\"name\":\"libc_errno\",\"package\":\"errno\",\"req\":\"^0.3.10\",\"target\":\"cfg(windows)\"},{\"default_features\":false,\"name\":\"libc_errno\",\"optional\":true,\"package\":\"errno\",\"req\":\"^0.3.10\",\"target\":\"cfg(all(not(rustix_use_libc), not(miri), target_os = \\\"linux\\\", any(target_endian = \\\"little\\\", any(target_arch = \\\"s390x\\\", target_arch = \\\"powerpc\\\")), any(target_arch = \\\"arm\\\", all(target_arch = \\\"aarch64\\\", target_pointer_width = \\\"64\\\"), target_arch = \\\"riscv64\\\", all(rustix_use_experimental_asm, target_arch = \\\"powerpc\\\"), all(rustix_use_experimental_asm, target_arch = \\\"powerpc64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"s390x\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips32r6\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64r6\\\"), target_arch = \\\"x86\\\", all(target_arch = \\\"x86_64\\\", target_pointer_width = \\\"64\\\"))))\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"libc_errno\",\"package\":\"errno\",\"req\":\"^0.3.10\"},{\"default_features\":false,\"features\":[\"general\",\"ioctl\",\"no_std\"],\"name\":\"linux-raw-sys\",\"req\":\"^0.12\",\"target\":\"cfg(all(any(target_os = \\\"linux\\\", target_os = \\\"android\\\"), any(rustix_use_libc, miri, not(all(target_os = \\\"linux\\\", any(target_endian = \\\"little\\\", any(target_arch = \\\"s390x\\\", target_arch = \\\"powerpc\\\")), any(target_arch = \\\"arm\\\", all(target_arch = \\\"aarch64\\\", target_pointer_width = \\\"64\\\"), target_arch = \\\"riscv64\\\", all(rustix_use_experimental_asm, target_arch = \\\"powerpc\\\"), all(rustix_use_experimental_asm, target_arch = \\\"powerpc64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"s390x\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips32r6\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64r6\\\"), target_arch = \\\"x86\\\", all(target_arch = \\\"x86_64\\\", target_pointer_width = \\\"64\\\")))))))\"},{\"default_features\":false,\"features\":[\"auxvec\",\"general\",\"errno\",\"ioctl\",\"no_std\",\"elf\"],\"name\":\"linux-raw-sys\",\"req\":\"^0.12\",\"target\":\"cfg(all(not(rustix_use_libc), not(miri), target_os = \\\"linux\\\", any(target_endian = \\\"little\\\", any(target_arch = \\\"s390x\\\", target_arch = \\\"powerpc\\\")), any(target_arch = \\\"arm\\\", all(target_arch = \\\"aarch64\\\", target_pointer_width = \\\"64\\\"), target_arch = \\\"riscv64\\\", all(rustix_use_experimental_asm, target_arch = \\\"powerpc\\\"), all(rustix_use_experimental_asm, target_arch = \\\"powerpc64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"s390x\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips32r6\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64\\\"), all(rustix_use_experimental_asm, target_arch = \\\"mips64r6\\\"), target_arch = \\\"x86\\\", all(target_arch = \\\"x86_64\\\", target_pointer_width = \\\"64\\\"))))\"},{\"kind\":\"dev\",\"name\":\"memoffset\",\"req\":\"^0.9.0\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"^1.20.3\",\"target\":\"cfg(windows)\"},{\"name\":\"rustc-std-workspace-alloc\",\"optional\":true,\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"serial_test\",\"req\":\"^2.0.0\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1.1.0\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.5.0\"},{\"features\":[\"Win32_Foundation\",\"Win32_Networking_WinSock\"],\"name\":\"windows-sys\",\"req\":\">=0.52, <0.62\",\"target\":\"cfg(windows)\"}],\"features\":{\"all-apis\":[\"event\",\"fs\",\"io_uring\",\"mm\",\"mount\",\"net\",\"param\",\"pipe\",\"process\",\"pty\",\"rand\",\"runtime\",\"shm\",\"stdio\",\"system\",\"termios\",\"thread\",\"time\"],\"alloc\":[],\"default\":[\"std\"],\"event\":[],\"fs\":[],\"io_uring\":[\"event\",\"fs\",\"net\",\"thread\",\"linux-raw-sys/io_uring\"],\"linux_4_11\":[],\"linux_5_1\":[\"linux_4_11\"],\"linux_5_11\":[\"linux_5_1\"],\"linux_latest\":[\"linux_5_11\"],\"mm\":[],\"mount\":[],\"net\":[\"linux-raw-sys/net\",\"linux-raw-sys/netlink\",\"linux-raw-sys/if_ether\",\"linux-raw-sys/xdp\"],\"param\":[],\"pipe\":[],\"process\":[\"linux-raw-sys/prctl\"],\"pty\":[\"fs\"],\"rand\":[],\"runtime\":[\"linux-raw-sys/prctl\"],\"rustc-dep-of-std\":[\"core\",\"rustc-std-workspace-alloc\",\"linux-raw-sys/rustc-dep-of-std\",\"bitflags/rustc-dep-of-std\"],\"shm\":[\"fs\"],\"std\":[\"bitflags/std\",\"alloc\",\"libc?/std\",\"libc_errno?/std\"],\"stdio\":[],\"system\":[\"linux-raw-sys/system\"],\"termios\":[],\"thread\":[\"linux-raw-sys/prctl\"],\"time\":[],\"try_close\":[],\"use-explicitly-provided-auxv\":[],\"use-libc\":[\"libc_errno\",\"libc\"],\"use-libc-auxv\":[]}}", "rustls-native-certs_0.8.3": "{\"dependencies\":[{\"name\":\"openssl-probe\",\"req\":\"^0.2\",\"target\":\"cfg(all(unix, not(target_os = \\\"macos\\\")))\"},{\"features\":[\"std\"],\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.10\"},{\"kind\":\"dev\",\"name\":\"ring\",\"req\":\"^0.17\"},{\"kind\":\"dev\",\"name\":\"rustls\",\"req\":\"^0.23\"},{\"kind\":\"dev\",\"name\":\"rustls-webpki\",\"req\":\"^0.103\"},{\"name\":\"schannel\",\"req\":\"^0.1\",\"target\":\"cfg(windows)\"},{\"name\":\"security-framework\",\"req\":\"^3\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"kind\":\"dev\",\"name\":\"serial_test\",\"req\":\"^3\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.5\"},{\"kind\":\"dev\",\"name\":\"untrusted\",\"req\":\"^0.9\"},{\"kind\":\"dev\",\"name\":\"webpki-roots\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.18\"}],\"features\":{}}", "rustls-pki-types_1.14.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"crabgrind\",\"req\":\"=0.1.9\",\"target\":\"cfg(all(target_os = \\\"linux\\\", target_arch = \\\"x86_64\\\"))\"},{\"name\":\"web-time\",\"optional\":true,\"req\":\"^1\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", target_os = \\\"unknown\\\"))\"},{\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"alloc\":[\"dep:zeroize\"],\"default\":[\"alloc\"],\"std\":[\"alloc\"],\"web\":[\"web-time\"]}}", - "rustls-webpki_0.103.10": "{\"dependencies\":[{\"default_features\":false,\"name\":\"aws-lc-rs\",\"optional\":true,\"req\":\"^1.14\"},{\"kind\":\"dev\",\"name\":\"base64\",\"req\":\"^0.22\"},{\"kind\":\"dev\",\"name\":\"bencher\",\"req\":\"^0.1.5\"},{\"kind\":\"dev\",\"name\":\"bzip2\",\"req\":\"^0.6\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"^1.17.2\"},{\"default_features\":false,\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.12\"},{\"default_features\":false,\"features\":[\"aws_lc_rs\"],\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.14.2\"},{\"default_features\":false,\"name\":\"ring\",\"optional\":true,\"req\":\"^0.17\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"name\":\"untrusted\",\"req\":\"^0.9\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.18.1\"}],\"features\":{\"alloc\":[\"ring?/alloc\",\"pki-types/alloc\"],\"aws-lc-rs\":[\"dep:aws-lc-rs\",\"aws-lc-rs/aws-lc-sys\",\"aws-lc-rs/prebuilt-nasm\"],\"aws-lc-rs-fips\":[\"dep:aws-lc-rs\",\"aws-lc-rs/fips\"],\"aws-lc-rs-unstable\":[\"aws-lc-rs\",\"aws-lc-rs/unstable\"],\"default\":[\"std\"],\"ring\":[\"dep:ring\"],\"std\":[\"alloc\",\"pki-types/std\"]}}", + "rustls-webpki_0.103.12": "{\"dependencies\":[{\"default_features\":false,\"name\":\"aws-lc-rs\",\"optional\":true,\"req\":\"^1.14\"},{\"kind\":\"dev\",\"name\":\"base64\",\"req\":\"^0.22\"},{\"kind\":\"dev\",\"name\":\"bencher\",\"req\":\"^0.1.5\"},{\"kind\":\"dev\",\"name\":\"bzip2\",\"req\":\"^0.6\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"^1.17.2\"},{\"default_features\":false,\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.12\"},{\"default_features\":false,\"features\":[\"aws_lc_rs\"],\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.14.2\"},{\"default_features\":false,\"name\":\"ring\",\"optional\":true,\"req\":\"^0.17\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"name\":\"untrusted\",\"req\":\"^0.9\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.18.1\"}],\"features\":{\"alloc\":[\"ring?/alloc\",\"pki-types/alloc\"],\"aws-lc-rs\":[\"dep:aws-lc-rs\",\"aws-lc-rs/aws-lc-sys\",\"aws-lc-rs/prebuilt-nasm\"],\"aws-lc-rs-fips\":[\"dep:aws-lc-rs\",\"aws-lc-rs/fips\"],\"aws-lc-rs-unstable\":[\"aws-lc-rs\",\"aws-lc-rs/unstable\"],\"default\":[\"std\"],\"ring\":[\"dep:ring\"],\"std\":[\"alloc\",\"pki-types/std\"]}}", "rustls_0.23.36": "{\"dependencies\":[{\"default_features\":false,\"name\":\"aws-lc-rs\",\"optional\":true,\"req\":\"^1.14\"},{\"kind\":\"dev\",\"name\":\"base64\",\"req\":\"^0.22\"},{\"kind\":\"dev\",\"name\":\"bencher\",\"req\":\"^0.1.5\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"brotli\",\"optional\":true,\"req\":\"^8\"},{\"name\":\"brotli-decompressor\",\"optional\":true,\"req\":\"^5.0.0\"},{\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.11\"},{\"default_features\":false,\"features\":[\"default-hasher\",\"inline-more\"],\"name\":\"hashbrown\",\"optional\":true,\"req\":\"^0.15\"},{\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"^0.4\"},{\"name\":\"log\",\"optional\":true,\"req\":\"^0.4.8\"},{\"kind\":\"dev\",\"name\":\"log\",\"req\":\"^0.4.8\"},{\"kind\":\"dev\",\"name\":\"macro_rules_attribute\",\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"num-bigint\",\"req\":\"^0.4.4\"},{\"default_features\":false,\"features\":[\"alloc\",\"race\"],\"name\":\"once_cell\",\"req\":\"^1.16\"},{\"features\":[\"alloc\"],\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.12\"},{\"default_features\":false,\"features\":[\"pem\",\"aws_lc_rs\"],\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.14\"},{\"name\":\"ring\",\"optional\":true,\"req\":\"^0.17\"},{\"kind\":\"build\",\"name\":\"rustversion\",\"optional\":true,\"req\":\"^1.0.6\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"subtle\",\"req\":\"^2.5.0\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"time\",\"req\":\"^0.3.6\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"webpki\",\"package\":\"rustls-webpki\",\"req\":\"^0.103.5\"},{\"kind\":\"dev\",\"name\":\"webpki-roots\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.17\"},{\"name\":\"zeroize\",\"req\":\"^1.8\"},{\"name\":\"zlib-rs\",\"optional\":true,\"req\":\"^0.5\"}],\"features\":{\"aws-lc-rs\":[\"aws_lc_rs\"],\"aws_lc_rs\":[\"dep:aws-lc-rs\",\"webpki/aws-lc-rs\",\"aws-lc-rs/aws-lc-sys\",\"aws-lc-rs/prebuilt-nasm\"],\"brotli\":[\"dep:brotli\",\"dep:brotli-decompressor\",\"std\"],\"custom-provider\":[],\"default\":[\"aws_lc_rs\",\"logging\",\"prefer-post-quantum\",\"std\",\"tls12\"],\"fips\":[\"aws_lc_rs\",\"aws-lc-rs?/fips\",\"webpki/aws-lc-rs-fips\"],\"logging\":[\"log\"],\"prefer-post-quantum\":[\"aws_lc_rs\"],\"read_buf\":[\"rustversion\",\"std\"],\"ring\":[\"dep:ring\",\"webpki/ring\"],\"std\":[\"webpki/std\",\"pki-types/std\",\"once_cell/std\"],\"tls12\":[],\"zlib\":[\"dep:zlib-rs\"]}}", "rustversion_1.0.22": "{\"dependencies\":[{\"features\":[\"diff\"],\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.49\"}],\"features\":{}}", "rustyline_14.0.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"assert_matches\",\"req\":\"^1.2\"},{\"name\":\"bitflags\",\"req\":\"^2.0\"},{\"default_features\":false,\"name\":\"buffer-redux\",\"optional\":true,\"req\":\"^1.0\",\"target\":\"cfg(unix)\"},{\"name\":\"cfg-if\",\"req\":\"^1.0\"},{\"name\":\"clipboard-win\",\"req\":\"^5.0\",\"target\":\"cfg(windows)\"},{\"kind\":\"dev\",\"name\":\"doc-comment\",\"req\":\"^0.3\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.11\"},{\"name\":\"fd-lock\",\"optional\":true,\"req\":\"^4.0.0\"},{\"name\":\"home\",\"optional\":true,\"req\":\"^0.5.4\"},{\"name\":\"libc\",\"req\":\"^0.2\"},{\"name\":\"log\",\"req\":\"^0.4\"},{\"name\":\"memchr\",\"req\":\"^2.0\"},{\"default_features\":false,\"features\":[\"fs\",\"ioctl\",\"poll\",\"signal\",\"term\"],\"name\":\"nix\",\"req\":\"^0.28\",\"target\":\"cfg(unix)\"},{\"name\":\"radix_trie\",\"optional\":true,\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8\"},{\"name\":\"regex\",\"optional\":true,\"req\":\"^1.5.5\"},{\"default_features\":false,\"features\":[\"bundled\",\"backup\"],\"name\":\"rusqlite\",\"optional\":true,\"req\":\"^0.31.0\"},{\"name\":\"rustyline-derive\",\"optional\":true,\"req\":\"^0.10.0\"},{\"default_features\":false,\"name\":\"signal-hook\",\"optional\":true,\"req\":\"^0.3\",\"target\":\"cfg(unix)\"},{\"default_features\":false,\"name\":\"skim\",\"optional\":true,\"req\":\"^0.10\",\"target\":\"cfg(unix)\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\"},{\"name\":\"termios\",\"optional\":true,\"req\":\"^0.3.3\",\"target\":\"cfg(unix)\"},{\"name\":\"unicode-segmentation\",\"req\":\"^1.0\"},{\"name\":\"unicode-width\",\"req\":\"^0.1\"},{\"name\":\"utf8parse\",\"req\":\"^0.2\",\"target\":\"cfg(unix)\"},{\"features\":[\"Win32_Foundation\",\"Win32_System_Console\",\"Win32_Security\",\"Win32_System_Threading\",\"Win32_UI_Input_KeyboardAndMouse\"],\"name\":\"windows-sys\",\"req\":\"^0.52.0\",\"target\":\"cfg(windows)\"}],\"features\":{\"case_insensitive_history_search\":[\"regex\"],\"custom-bindings\":[\"radix_trie\"],\"default\":[\"custom-bindings\",\"with-dirs\",\"with-file-history\"],\"derive\":[\"rustyline-derive\"],\"with-dirs\":[\"home\"],\"with-file-history\":[\"fd-lock\"],\"with-fuzzy\":[\"skim\"],\"with-sqlite-history\":[\"rusqlite\"]}}", @@ -1456,6 +1464,8 @@ "toml_parser_1.0.6+spec-1.1.0": "{\"dependencies\":[{\"name\":\"anstream\",\"optional\":true,\"req\":\"^0.6.20\"},{\"features\":[\"test\"],\"kind\":\"dev\",\"name\":\"anstream\",\"req\":\"^0.6.20\"},{\"name\":\"anstyle\",\"optional\":true,\"req\":\"^1.0.11\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.21\"},{\"default_features\":false,\"name\":\"winnow\",\"req\":\"^0.7.13\"}],\"features\":{\"alloc\":[],\"debug\":[\"std\",\"dep:anstream\",\"dep:anstyle\"],\"default\":[\"std\"],\"simd\":[\"winnow/simd\"],\"std\":[\"alloc\"],\"unsafe\":[]}}", "toml_parser_1.0.9+spec-1.1.0": "{\"dependencies\":[{\"name\":\"anstream\",\"optional\":true,\"req\":\"^0.6.20\"},{\"features\":[\"test\"],\"kind\":\"dev\",\"name\":\"anstream\",\"req\":\"^0.6.20\"},{\"name\":\"anstyle\",\"optional\":true,\"req\":\"^1.0.11\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.21\"},{\"default_features\":false,\"name\":\"winnow\",\"req\":\"^0.7.13\"}],\"features\":{\"alloc\":[],\"debug\":[\"std\",\"dep:anstream\",\"dep:anstyle\"],\"default\":[\"std\"],\"simd\":[\"winnow/simd\"],\"std\":[\"alloc\"],\"unsafe\":[]}}", "toml_writer_1.0.6+spec-1.1.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1.7.0\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.21\"},{\"kind\":\"dev\",\"name\":\"toml_old\",\"package\":\"toml\",\"req\":\"^0.5.11\"}],\"features\":{\"alloc\":[],\"default\":[\"std\"],\"std\":[\"alloc\"]}}", + "tonic-build_0.14.3": "{\"dependencies\":[{\"name\":\"prettyplease\",\"req\":\"^0.2\"},{\"name\":\"proc-macro2\",\"req\":\"^1.0\"},{\"name\":\"quote\",\"req\":\"^1.0\"},{\"name\":\"syn\",\"req\":\"^2.0\"}],\"features\":{\"default\":[\"transport\"],\"transport\":[]}}", + "tonic-prost-build_0.14.3": "{\"dependencies\":[{\"name\":\"prettyplease\",\"req\":\"^0.2\"},{\"name\":\"proc-macro2\",\"req\":\"^1.0\"},{\"name\":\"prost-build\",\"req\":\"^0.14\"},{\"name\":\"prost-types\",\"req\":\"^0.14\"},{\"name\":\"quote\",\"req\":\"^1.0\"},{\"name\":\"syn\",\"req\":\"^2.0\"},{\"name\":\"tempfile\",\"req\":\"^3.0\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"tonic\",\"req\":\"^0.14.0\"},{\"default_features\":false,\"name\":\"tonic-build\",\"req\":\"^0.14.0\"}],\"features\":{\"cleanup-markdown\":[\"prost-build/cleanup-markdown\"],\"default\":[\"transport\",\"cleanup-markdown\"],\"transport\":[\"tonic-build/transport\"]}}", "tonic-prost_0.14.3": "{\"dependencies\":[{\"name\":\"bytes\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"http-body\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"http-body-util\",\"req\":\"^0.1\"},{\"name\":\"prost\",\"req\":\"^0.14\"},{\"features\":[\"macros\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"tokio-stream\",\"req\":\"^0.1\"},{\"default_features\":false,\"name\":\"tonic\",\"req\":\"^0.14.0\"}],\"features\":{}}", "tonic_0.14.3": "{\"dependencies\":[{\"name\":\"async-trait\",\"optional\":true,\"req\":\"^0.1.13\"},{\"default_features\":false,\"name\":\"axum\",\"optional\":true,\"req\":\"^0.8\"},{\"name\":\"base64\",\"req\":\"^0.22\"},{\"kind\":\"dev\",\"name\":\"bencher\",\"req\":\"^0.1.5\"},{\"name\":\"bytes\",\"req\":\"^1.0\"},{\"name\":\"flate2\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"h2\",\"optional\":true,\"req\":\"^0.4\"},{\"name\":\"http\",\"req\":\"^1.1.0\"},{\"name\":\"http-body\",\"req\":\"^1\"},{\"name\":\"http-body-util\",\"req\":\"^0.1\"},{\"features\":[\"http1\",\"http2\"],\"name\":\"hyper\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"hyper-timeout\",\"optional\":true,\"req\":\"^0.5\"},{\"features\":[\"tokio\"],\"name\":\"hyper-util\",\"optional\":true,\"req\":\"^0.1.11\"},{\"name\":\"percent-encoding\",\"req\":\"^2.1\"},{\"name\":\"pin-project\",\"req\":\"^1.0.11\"},{\"kind\":\"dev\",\"name\":\"quickcheck\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"quickcheck_macros\",\"req\":\"^1.0\"},{\"name\":\"rustls-native-certs\",\"optional\":true,\"req\":\"^0.8\"},{\"features\":[\"all\"],\"name\":\"socket2\",\"optional\":true,\"req\":\"^0.6\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1.0\"},{\"name\":\"sync_wrapper\",\"req\":\"^1.0.2\"},{\"default_features\":false,\"name\":\"tokio\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"rt-multi-thread\",\"macros\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.0\"},{\"default_features\":false,\"features\":[\"logging\",\"tls12\"],\"name\":\"tokio-rustls\",\"optional\":true,\"req\":\"^0.26.1\"},{\"default_features\":false,\"name\":\"tokio-stream\",\"req\":\"^0.1.16\"},{\"default_features\":false,\"name\":\"tower\",\"optional\":true,\"req\":\"^0.5\"},{\"features\":[\"load-shed\",\"timeout\"],\"kind\":\"dev\",\"name\":\"tower\",\"req\":\"^0.5\"},{\"name\":\"tower-layer\",\"req\":\"^0.3\"},{\"name\":\"tower-service\",\"req\":\"^0.3\"},{\"name\":\"tracing\",\"req\":\"^0.1\"},{\"name\":\"webpki-roots\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"zstd\",\"optional\":true,\"req\":\"^0.13.0\"}],\"features\":{\"_tls-any\":[\"dep:tokio\",\"tokio?/rt\",\"tokio?/macros\",\"tls-connect-info\"],\"channel\":[\"dep:hyper\",\"hyper?/client\",\"dep:hyper-util\",\"hyper-util?/client-legacy\",\"dep:tower\",\"tower?/balance\",\"tower?/buffer\",\"tower?/discover\",\"tower?/limit\",\"tower?/load-shed\",\"tower?/util\",\"dep:tokio\",\"tokio?/time\",\"dep:hyper-timeout\"],\"codegen\":[\"dep:async-trait\"],\"default\":[\"router\",\"transport\",\"codegen\"],\"deflate\":[\"dep:flate2\"],\"gzip\":[\"dep:flate2\"],\"router\":[\"dep:axum\",\"dep:tower\",\"tower?/util\"],\"server\":[\"dep:h2\",\"dep:hyper\",\"hyper?/server\",\"dep:hyper-util\",\"hyper-util?/service\",\"hyper-util?/server-auto\",\"dep:socket2\",\"dep:tokio\",\"tokio?/macros\",\"tokio?/net\",\"tokio?/time\",\"tokio-stream/net\",\"dep:tower\",\"tower?/util\",\"tower?/limit\",\"tower?/load-shed\"],\"tls-aws-lc\":[\"_tls-any\",\"tokio-rustls/aws-lc-rs\"],\"tls-connect-info\":[\"dep:tokio-rustls\"],\"tls-native-roots\":[\"_tls-any\",\"channel\",\"dep:rustls-native-certs\"],\"tls-ring\":[\"_tls-any\",\"tokio-rustls/ring\"],\"tls-webpki-roots\":[\"_tls-any\",\"channel\",\"dep:webpki-roots\"],\"transport\":[\"server\",\"channel\"],\"zstd\":[\"dep:zstd\"]}}", "tower-http_0.6.8": "{\"dependencies\":[{\"features\":[\"tokio\"],\"name\":\"async-compression\",\"optional\":true,\"req\":\"^0.4\"},{\"name\":\"base64\",\"optional\":true,\"req\":\"^0.22\"},{\"name\":\"bitflags\",\"req\":\"^2.0.2\"},{\"kind\":\"dev\",\"name\":\"brotli\",\"req\":\"^8\"},{\"name\":\"bytes\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"bytes\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"flate2\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"futures-core\",\"optional\":true,\"req\":\"^0.3\"},{\"default_features\":false,\"name\":\"futures-util\",\"optional\":true,\"req\":\"^0.3.14\"},{\"kind\":\"dev\",\"name\":\"futures-util\",\"req\":\"^0.3.14\"},{\"name\":\"http\",\"req\":\"^1.0\"},{\"name\":\"http-body\",\"optional\":true,\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"http-body\",\"req\":\"^1.0.0\"},{\"name\":\"http-body-util\",\"optional\":true,\"req\":\"^0.1.0\"},{\"kind\":\"dev\",\"name\":\"http-body-util\",\"req\":\"^0.1.0\"},{\"name\":\"http-range-header\",\"optional\":true,\"req\":\"^0.4.0\"},{\"name\":\"httpdate\",\"optional\":true,\"req\":\"^1.0\"},{\"features\":[\"client-legacy\",\"http1\",\"tokio\"],\"kind\":\"dev\",\"name\":\"hyper-util\",\"req\":\"^0.1\"},{\"name\":\"iri-string\",\"optional\":true,\"req\":\"^0.7.0\"},{\"default_features\":false,\"name\":\"mime\",\"optional\":true,\"req\":\"^0.3.17\"},{\"default_features\":false,\"name\":\"mime_guess\",\"optional\":true,\"req\":\"^2\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"^1\"},{\"name\":\"percent-encoding\",\"optional\":true,\"req\":\"^2.1.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.7\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"sync_wrapper\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"tokio\",\"optional\":true,\"req\":\"^1.6\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"io\"],\"name\":\"tokio-util\",\"optional\":true,\"req\":\"^0.7\"},{\"name\":\"tower\",\"optional\":true,\"req\":\"^0.5\"},{\"features\":[\"buffer\",\"util\",\"retry\",\"make\",\"timeout\"],\"kind\":\"dev\",\"name\":\"tower\",\"req\":\"^0.5\"},{\"name\":\"tower-layer\",\"req\":\"^0.3.3\"},{\"name\":\"tower-service\",\"req\":\"^0.3\"},{\"default_features\":false,\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1\"},{\"kind\":\"dev\",\"name\":\"tracing-subscriber\",\"req\":\"^0.3\"},{\"features\":[\"v4\"],\"name\":\"uuid\",\"optional\":true,\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"zstd\",\"req\":\"^0.13\"}],\"features\":{\"add-extension\":[],\"auth\":[\"base64\",\"validate-request\"],\"catch-panic\":[\"tracing\",\"futures-util/std\",\"dep:http-body\",\"dep:http-body-util\"],\"compression-br\":[\"async-compression/brotli\",\"futures-core\",\"dep:http-body\",\"tokio-util\",\"tokio\"],\"compression-deflate\":[\"async-compression/zlib\",\"futures-core\",\"dep:http-body\",\"tokio-util\",\"tokio\"],\"compression-full\":[\"compression-br\",\"compression-deflate\",\"compression-gzip\",\"compression-zstd\"],\"compression-gzip\":[\"async-compression/gzip\",\"futures-core\",\"dep:http-body\",\"tokio-util\",\"tokio\"],\"compression-zstd\":[\"async-compression/zstd\",\"futures-core\",\"dep:http-body\",\"tokio-util\",\"tokio\"],\"cors\":[],\"decompression-br\":[\"async-compression/brotli\",\"futures-core\",\"dep:http-body\",\"dep:http-body-util\",\"tokio-util\",\"tokio\"],\"decompression-deflate\":[\"async-compression/zlib\",\"futures-core\",\"dep:http-body\",\"dep:http-body-util\",\"tokio-util\",\"tokio\"],\"decompression-full\":[\"decompression-br\",\"decompression-deflate\",\"decompression-gzip\",\"decompression-zstd\"],\"decompression-gzip\":[\"async-compression/gzip\",\"futures-core\",\"dep:http-body\",\"dep:http-body-util\",\"tokio-util\",\"tokio\"],\"decompression-zstd\":[\"async-compression/zstd\",\"futures-core\",\"dep:http-body\",\"dep:http-body-util\",\"tokio-util\",\"tokio\"],\"default\":[],\"follow-redirect\":[\"futures-util\",\"dep:http-body\",\"iri-string\",\"tower/util\"],\"fs\":[\"futures-core\",\"futures-util\",\"dep:http-body\",\"dep:http-body-util\",\"tokio/fs\",\"tokio-util/io\",\"tokio/io-util\",\"dep:http-range-header\",\"mime_guess\",\"mime\",\"percent-encoding\",\"httpdate\",\"set-status\",\"futures-util/alloc\",\"tracing\"],\"full\":[\"add-extension\",\"auth\",\"catch-panic\",\"compression-full\",\"cors\",\"decompression-full\",\"follow-redirect\",\"fs\",\"limit\",\"map-request-body\",\"map-response-body\",\"metrics\",\"normalize-path\",\"propagate-header\",\"redirect\",\"request-id\",\"sensitive-headers\",\"set-header\",\"set-status\",\"timeout\",\"trace\",\"util\",\"validate-request\"],\"limit\":[\"dep:http-body\",\"dep:http-body-util\"],\"map-request-body\":[],\"map-response-body\":[],\"metrics\":[\"dep:http-body\",\"tokio/time\"],\"normalize-path\":[],\"propagate-header\":[],\"redirect\":[],\"request-id\":[\"uuid\"],\"sensitive-headers\":[],\"set-header\":[],\"set-status\":[],\"timeout\":[\"dep:http-body\",\"tokio/time\"],\"trace\":[\"dep:http-body\",\"tracing\"],\"util\":[\"tower\"],\"validate-request\":[\"mime\"]}}", diff --git a/SECURITY.md b/SECURITY.md index d6dd568910..29a9b903df 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -11,3 +11,7 @@ Our security program is managed through Bugcrowd, and we ask that any validated ## Vulnerability Disclosure Program Our Vulnerability Program Guidelines are defined on our [Bugcrowd program page](https://bugcrowd.com/engagements/openai). + +## How to operate CODEX safely + +For details on Codex security boundaries, including sandboxing, approvals, and network controls, see [Agent approvals & security](https://developers.openai.com/codex/agent-approvals-security). diff --git a/announcement_tip.toml b/announcement_tip.toml index 2473258368..79c0daea76 100644 --- a/announcement_tip.toml +++ b/announcement_tip.toml @@ -4,20 +4,14 @@ # version_regex matches against the CLI version (env!("CARGO_PKG_VERSION")); omit to apply to all versions. # target_app specify which app should display the announcement (cli, vsce, ...). -[[announcements]] -content = "Welcome to Codex! Check out the new onboarding flow." -from_date = "2024-10-01" -to_date = "2024-10-15" -target_app = "cli" - -# Test announcement only for local build version until 2026-01-10 excluded (past) +# Test announcement only for local build version until 2027-05-10 excluded [[announcements]] content = "This is a test announcement" version_regex = "^0\\.0\\.0$" -to_date = "2026-05-10" +to_date = "2027-05-10" [[announcements]] -content = "**BREAKING NEWS**: `gpt-5.3-codex` is out! Upgrade to `0.98.0` for a faster, smarter, more steerable agent." -from_date = "2026-02-01" -to_date = "2026-02-16" -version_regex = "^0\\.(?:[0-9]|[1-8][0-9]|9[0-7])\\." +content = "Update Required - This version will no longer be supported starting May 8th. Please upgrade to the latest version (https://github.com/openai/codex/releases/latest) using your preferred package manager." +# Matches 0.x.y versions from 0.0.y through 0.119.y; excludes 0.120.0 and newer. +version_regex = "^0\\.(?:[0-9]|[1-9][0-9]|1[01][0-9])\\." +to_date = "2026-05-08" diff --git a/codex-rs/.cargo/audit.toml b/codex-rs/.cargo/audit.toml index 143e64163a..3760d28648 100644 --- a/codex-rs/.cargo/audit.toml +++ b/codex-rs/.cargo/audit.toml @@ -1,6 +1,10 @@ [advisories] +# Reviewed 2026-04-15. Keep this list in sync with ../deny.toml. ignore = [ "RUSTSEC-2024-0388", # derivative 2.2.0 via starlark; upstream crate is unmaintained "RUSTSEC-2025-0057", # fxhash 0.2.1 via starlark_map; upstream crate is unmaintained "RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained + "RUSTSEC-2024-0320", # yaml-rust via syntect; remove when syntect drops or updates it + "RUSTSEC-2025-0141", # bincode via syntect; remove when syntect drops or updates it + "RUSTSEC-2026-0097", # rand 0.8.5 via age/codex-secrets and zbus/keyring; remove when transitive deps move to rand >=0.9.3 ] diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index 6c101b940d..76188da5b8 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -940,6 +940,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -1375,6 +1384,7 @@ dependencies = [ "codex-login", "codex-plugin", "codex-protocol", + "codex-utils-absolute-path", "os_info", "pretty_assertions", "serde", @@ -1445,6 +1455,7 @@ dependencies = [ "codex-cloud-requirements", "codex-config", "codex-core", + "codex-core-plugins", "codex-exec-server", "codex-features", "codex-feedback", @@ -1460,6 +1471,7 @@ dependencies = [ "codex-sandboxing", "codex-shell-command", "codex-state", + "codex-thread-store", "codex-tools", "codex-utils-absolute-path", "codex-utils-cargo-bin", @@ -1528,7 +1540,6 @@ dependencies = [ "anyhow", "clap", "codex-experimental-api-macros", - "codex-git-utils", "codex-protocol", "codex-shell-command", "codex-utils-absolute-path", @@ -1646,6 +1657,7 @@ version = "0.0.0" dependencies = [ "anyhow", "clap", + "codex-app-server-protocol", "codex-config", "codex-connectors", "codex-core", @@ -1669,6 +1681,7 @@ dependencies = [ "assert_matches", "clap", "clap_complete", + "codex-api", "codex-app-server", "codex-app-server-protocol", "codex-app-server-test-client", @@ -1692,6 +1705,7 @@ dependencies = [ "codex-stdio-to-uds", "codex-terminal-detection", "codex-tui", + "codex-utils-absolute-path", "codex-utils-cargo-bin", "codex-utils-cli", "codex-utils-path", @@ -1726,7 +1740,7 @@ dependencies = [ "opentelemetry", "opentelemetry_sdk", "pretty_assertions", - "rand 0.9.2", + "rand 0.9.3", "reqwest", "rustls", "rustls-native-certs", @@ -1828,6 +1842,8 @@ name = "codex-code-mode" version = "0.0.0" dependencies = [ "async-trait", + "codex-protocol", + "deno_core_icudata", "pretty_assertions", "serde", "serde_json", @@ -1849,12 +1865,11 @@ dependencies = [ "codex-app-server-protocol", "codex-execpolicy", "codex-features", - "codex-git-utils", "codex-model-provider-info", "codex-network-proxy", "codex-protocol", "codex-utils-absolute-path", - "dunce", + "codex-utils-path", "futures", "multimap", "pretty_assertions", @@ -1907,6 +1922,7 @@ dependencies = [ "codex-code-mode", "codex-config", "codex-connectors", + "codex-core-plugins", "codex-core-skills", "codex-exec-server", "codex-execpolicy", @@ -1932,6 +1948,8 @@ dependencies = [ "codex-shell-escalation", "codex-state", "codex-terminal-detection", + "codex-test-binary-support", + "codex-thread-store", "codex-tools", "codex-utils-absolute-path", "codex-utils-cache", @@ -1949,10 +1967,12 @@ dependencies = [ "codex-windows-sandbox", "core-foundation 0.9.4", "core_test_support", + "crypto_box", "csv", "ctor 0.6.3", "dirs", "dunce", + "ed25519-dalek", "env-flags", "eventsource-stream", "futures", @@ -1970,7 +1990,7 @@ dependencies = [ "opentelemetry_sdk", "predicates", "pretty_assertions", - "rand 0.9.2", + "rand 0.9.3", "regex-lite", "reqwest", "rmcp", @@ -1978,6 +1998,7 @@ dependencies = [ "serde_json", "serial_test", "sha1", + "sha2", "shlex", "similar", "tempfile", @@ -2004,6 +2025,34 @@ dependencies = [ "zstd 0.13.3", ] +[[package]] +name = "codex-core-plugins" +version = "0.0.0" +dependencies = [ + "chrono", + "codex-app-server-protocol", + "codex-config", + "codex-core-skills", + "codex-exec-server", + "codex-git-utils", + "codex-login", + "codex-plugin", + "codex-protocol", + "codex-utils-absolute-path", + "codex-utils-plugins", + "dirs", + "pretty_assertions", + "reqwest", + "serde", + "serde_json", + "tempfile", + "thiserror 2.0.18", + "tokio", + "toml 0.9.11+spec-1.1.0", + "tracing", + "url", +] + [[package]] name = "codex-core-skills" version = "0.0.0" @@ -2012,6 +2061,7 @@ dependencies = [ "codex-analytics", "codex-app-server-protocol", "codex-config", + "codex-exec-server", "codex-instructions", "codex-login", "codex-otel", @@ -2097,16 +2147,19 @@ dependencies = [ "arc-swap", "async-trait", "base64 0.22.1", - "clap", "codex-app-server-protocol", + "codex-config", "codex-protocol", + "codex-sandboxing", + "codex-test-binary-support", "codex-utils-absolute-path", - "codex-utils-cargo-bin", "codex-utils-pty", + "ctor 0.6.3", "futures", "pretty_assertions", "serde", "serde_json", + "serial_test", "tempfile", "test-case", "thiserror 2.0.18", @@ -2209,6 +2262,8 @@ name = "codex-git-utils" version = "0.0.0" dependencies = [ "assert_matches", + "codex-exec-server", + "codex-protocol", "codex-utils-absolute-path", "futures", "once_cell", @@ -2231,6 +2286,7 @@ dependencies = [ "chrono", "codex-config", "codex-protocol", + "codex-utils-absolute-path", "futures", "pretty_assertions", "regex", @@ -2241,6 +2297,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "codex-install-context" +version = "0.0.0" +dependencies = [ + "codex-utils-home-dir", + "pretty_assertions", + "tempfile", +] + [[package]] name = "codex-instructions" version = "0.0.0" @@ -2269,6 +2334,7 @@ dependencies = [ "codex-protocol", "codex-sandboxing", "codex-utils-absolute-path", + "globset", "landlock", "libc", "pkg-config", @@ -2318,7 +2384,7 @@ dependencies = [ "once_cell", "os_info", "pretty_assertions", - "rand 0.9.2", + "rand 0.9.3", "regex-lite", "reqwest", "serde", @@ -2349,6 +2415,7 @@ dependencies = [ "codex-plugin", "codex-protocol", "codex-rmcp-client", + "codex-utils-absolute-path", "codex-utils-plugins", "futures", "pretty_assertions", @@ -2379,6 +2446,7 @@ dependencies = [ "codex-models-manager", "codex-protocol", "codex-shell-command", + "codex-utils-absolute-path", "codex-utils-cli", "codex-utils-json-to-toml", "core_test_support", @@ -2553,13 +2621,13 @@ dependencies = [ "chrono", "codex-async-utils", "codex-execpolicy", - "codex-git-utils", "codex-network-proxy", "codex-utils-absolute-path", "codex-utils-image", "codex-utils-string", "codex-utils-template", "encoding_rs", + "globset", "http 1.4.0", "icu_decimal", "icu_locale_core", @@ -2684,14 +2752,18 @@ dependencies = [ name = "codex-sandboxing" version = "0.0.0" dependencies = [ + "anyhow", + "async-trait", "codex-network-proxy", "codex-protocol", "codex-utils-absolute-path", "dunce", "libc", "pretty_assertions", + "regex-lite", "serde_json", "tempfile", + "tokio", "tracing", "url", "which 8.0.0", @@ -2708,7 +2780,7 @@ dependencies = [ "codex-keyring-store", "keyring", "pretty_assertions", - "rand 0.9.2", + "rand 0.9.3", "regex", "schemars 0.8.22", "serde", @@ -2810,6 +2882,38 @@ dependencies = [ "tracing", ] +[[package]] +name = "codex-test-binary-support" +version = "0.0.0" +dependencies = [ + "codex-arg0", + "tempfile", +] + +[[package]] +name = "codex-thread-store" +version = "0.0.0" +dependencies = [ + "async-trait", + "chrono", + "codex-git-utils", + "codex-protocol", + "codex-rollout", + "codex-state", + "pretty_assertions", + "prost 0.14.3", + "serde", + "serde_json", + "tempfile", + "thiserror 2.0.18", + "tokio", + "tokio-stream", + "tonic", + "tonic-prost", + "tonic-prost-build", + "uuid", +] + [[package]] name = "codex-tools" version = "0.0.0" @@ -2845,11 +2949,13 @@ dependencies = [ "codex-cli", "codex-cloud-requirements", "codex-config", + "codex-connectors", "codex-exec-server", "codex-features", "codex-feedback", "codex-file-search", "codex-git-utils", + "codex-install-context", "codex-login", "codex-mcp", "codex-model-provider-info", @@ -2888,7 +2994,7 @@ dependencies = [ "pathdiff", "pretty_assertions", "pulldown-cmark", - "rand 0.9.2", + "rand 0.9.3", "ratatui", "ratatui-macros", "regex-lite", @@ -2987,6 +3093,7 @@ version = "0.0.0" name = "codex-utils-home-dir" version = "0.0.0" dependencies = [ + "codex-utils-absolute-path", "dirs", "pretty_assertions", "tempfile", @@ -3046,10 +3153,13 @@ dependencies = [ name = "codex-utils-plugins" version = "0.0.0" dependencies = [ + "codex-exec-server", "codex-login", + "codex-utils-absolute-path", "serde", "serde_json", "tempfile", + "tokio", ] [[package]] @@ -3523,7 +3633,7 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crossterm" version = "0.28.1" -source = "git+https://github.com/nornagon/crossterm?branch=nornagon%2Fcolor-query#87db8bfa6dc99427fd3b071681b07fc31c6ce995" +source = "git+https://github.com/nornagon/crossterm?rev=87db8bfa6dc99427fd3b071681b07fc31c6ce995#87db8bfa6dc99427fd3b071681b07fc31c6ce995" dependencies = [ "bitflags 2.10.0", "crossterm_winapi", @@ -3558,9 +3668,40 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] +[[package]] +name = "crypto_box" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16182b4f39a82ec8a6851155cc4c0cda3065bb1db33651726a29e1951de0f009" +dependencies = [ + "aead", + "blake2", + "crypto_secretbox", + "curve25519-dalek", + "salsa20", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto_secretbox" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d6cf87adf719ddf43a805e92c6870a531aedda35ff640442cbaf8674e141e1" +dependencies = [ + "aead", + "cipher", + "generic-array", + "poly1305", + "salsa20", + "subtle", + "zeroize", +] + [[package]] name = "csv" version = "1.4.0" @@ -3617,6 +3758,7 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", + "digest", "fiat-crypto", "rustc_version", "subtle", @@ -3886,6 +4028,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26bf8fc351c5ed29b5c2f0cbbac1b209b74f60ecd62e675a998df72c49af5204" +[[package]] +name = "deno_core_icudata" +version = "0.77.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9efff8990a82c1ae664292507e1a5c6749ddd2312898cdf9cd7cb1fd4bc64c6" + [[package]] name = "der" version = "0.7.10" @@ -4087,7 +4235,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4191,6 +4339,30 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "serde", + "sha2", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.15.0" @@ -4819,6 +4991,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -5120,7 +5293,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.2", + "rand 0.9.3", "ring", "thiserror 2.0.18", "tinyvec", @@ -5142,7 +5315,7 @@ dependencies = [ "moka", "once_cell", "parking_lot", - "rand 0.9.2", + "rand 0.9.3", "resolv-conf", "smallvec", "thiserror 2.0.18", @@ -6689,7 +6862,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -7288,7 +7461,7 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.2", + "rand 0.9.3", "thiserror 2.0.18", "tokio", "tokio-stream", @@ -7441,7 +7614,7 @@ dependencies = [ "heck 0.4.1", "itertools 0.11.0", "prost 0.12.6", - "prost-types", + "prost-types 0.12.6", ] [[package]] @@ -7455,7 +7628,7 @@ dependencies = [ "pbjson", "pbjson-build", "prost 0.12.6", - "prost-build", + "prost-build 0.12.6", "serde", ] @@ -7837,7 +8010,7 @@ checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bitflags 2.10.0", "num-traits", - "rand 0.9.2", + "rand 0.9.3", "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax 0.8.8", @@ -7879,7 +8052,26 @@ dependencies = [ "petgraph 0.6.5", "prettyplease", "prost 0.12.6", - "prost-types", + "prost-types 0.12.6", + "regex", + "syn 2.0.114", + "tempfile", +] + +[[package]] +name = "prost-build" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" +dependencies = [ + "heck 0.5.0", + "itertools 0.14.0", + "log", + "multimap", + "petgraph 0.8.3", + "prettyplease", + "prost 0.14.3", + "prost-types 0.14.3", "regex", "syn 2.0.114", "tempfile", @@ -7920,6 +8112,15 @@ dependencies = [ "prost 0.12.6", ] +[[package]] +name = "prost-types" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" +dependencies = [ + "prost 0.14.3", +] + [[package]] name = "psl" version = "2.1.184" @@ -8008,7 +8209,7 @@ dependencies = [ "bytes", "getrandom 0.3.4", "lru-slab", - "rand 0.9.2", + "rand 0.9.3", "ring", "rustc-hash 2.1.1", "rustls", @@ -8140,7 +8341,7 @@ dependencies = [ "rama-http-types", "rama-net", "rama-utils", - "rand 0.9.2", + "rand 0.9.3", "serde", "serde_html_form", "serde_json", @@ -8210,7 +8411,7 @@ dependencies = [ "rama-macros", "rama-net", "rama-utils", - "rand 0.9.2", + "rand 0.9.3", "serde", "sha1", ] @@ -8238,7 +8439,7 @@ dependencies = [ "rama-error", "rama-macros", "rama-utils", - "rand 0.9.2", + "rand 0.9.3", "serde", "serde_json", "sync_wrapper", @@ -8312,7 +8513,7 @@ dependencies = [ "rama-http-types", "rama-net", "rama-utils", - "rand 0.9.2", + "rand 0.9.3", "tokio", ] @@ -8392,9 +8593,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +checksum = "7ec095654a25171c2124e9e3393a930bddbffdc939556c914957a4c3e0a87166" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.5", @@ -8450,7 +8651,7 @@ dependencies = [ [[package]] name = "ratatui" version = "0.29.0" -source = "git+https://github.com/nornagon/ratatui?branch=nornagon-v0.29.0-patch#9b2ad1298408c45918ee9f8241a6f95498cdbed2" +source = "git+https://github.com/nornagon/ratatui?rev=9b2ad1298408c45918ee9f8241a6f95498cdbed2#9b2ad1298408c45918ee9f8241a6f95498cdbed2" dependencies = [ "bitflags 2.10.0", "cassowary", @@ -8709,7 +8910,7 @@ dependencies = [ "pastey", "pin-project-lite", "process-wrap", - "rand 0.9.2", + "rand 0.9.3", "reqwest", "rmcp-macros", "schemars 1.2.1", @@ -8916,9 +9117,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.10" +version = "0.103.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" dependencies = [ "aws-lc-rs", "ring", @@ -9293,7 +9494,7 @@ version = "0.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26ab054c34b87f96c3e4701bea1888317cde30cc7e4a6136d2c48454ab96661c" dependencies = [ - "rand 0.9.2", + "rand 0.9.3", "sentry-types", "serde", "serde_json", @@ -9341,7 +9542,7 @@ checksum = "eecbd63e9d15a26a40675ed180d376fcb434635d2e33de1c24003f61e3e2230d" dependencies = [ "debugid", "hex", - "rand 0.9.2", + "rand 0.9.3", "serde", "serde_json", "thiserror 2.0.18", @@ -10828,8 +11029,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a286e33f82f8a1ee2df63f4fa35c0becf4a85a0cb03091a15fd7bf0b402dc94a" dependencies = [ "async-trait", + "axum", "base64 0.22.1", "bytes", + "h2", "http 1.4.0", "http-body", "http-body-util", @@ -10839,6 +11042,7 @@ dependencies = [ "percent-encoding", "pin-project", "rustls-native-certs", + "socket2 0.6.2", "sync_wrapper", "tokio", "tokio-rustls", @@ -10849,6 +11053,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "tonic-build" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27aac809edf60b741e2d7db6367214d078856b8a5bff0087e94ff330fb97b6fc" +dependencies = [ + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "tonic-prost" version = "0.14.3" @@ -10860,6 +11076,22 @@ dependencies = [ "tonic", ] +[[package]] +name = "tonic-prost-build" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4556786613791cfef4ed134aa670b61a85cfcacf71543ef33e8d801abae988f" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build 0.14.3", + "prost-types 0.14.3", + "quote", + "syn 2.0.114", + "tempfile", + "tonic-build", +] + [[package]] name = "tower" version = "0.5.3" @@ -11126,7 +11358,7 @@ dependencies = [ "http 1.4.0", "httparse", "log", - "rand 0.9.2", + "rand 0.9.3", "rustls", "rustls-pki-types", "sha1", @@ -11813,7 +12045,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index 32ae50bfb7..c8f6b9e5a7 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -13,6 +13,7 @@ members = [ "arg0", "feedback", "features", + "install-context", "codex-backend-openapi-models", "code-mode", "cloud-requirements", @@ -27,6 +28,7 @@ members = [ "shell-escalation", "skills", "core", + "core-plugins", "core-skills", "hooks", "instructions", @@ -86,6 +88,8 @@ members = [ "codex-api", "state", "terminal-detection", + "test-binary-support", + "thread-store", "codex-experimental-api-macros", "plugin", ] @@ -125,6 +129,7 @@ codex-code-mode = { path = "code-mode" } codex-config = { path = "config" } codex-connectors = { path = "connectors" } codex-core = { path = "core" } +codex-core-plugins = { path = "core-plugins" } codex-core-skills = { path = "core-skills" } codex-exec = { path = "exec" } codex-exec-server = { path = "exec-server" } @@ -132,6 +137,7 @@ codex-execpolicy = { path = "execpolicy" } codex-experimental-api-macros = { path = "codex-experimental-api-macros" } codex-features = { path = "features" } codex-feedback = { path = "feedback" } +codex-install-context = { path = "install-context" } codex-file-search = { path = "file-search" } codex-git-utils = { path = "git-utils" } codex-hooks = { path = "hooks" } @@ -163,6 +169,8 @@ codex-skills = { path = "skills" } codex-state = { path = "state" } codex-stdio-to-uds = { path = "stdio-to-uds" } codex-terminal-detection = { path = "terminal-detection" } +codex-test-binary-support = { path = "test-binary-support" } +codex-thread-store = { path = "thread-store" } codex-tools = { path = "tools" } codex-tui = { path = "tui" } codex-utils-absolute-path = { path = "utils/absolute-path" } @@ -216,13 +224,16 @@ color-eyre = "0.6.3" constant_time_eq = "0.3.1" crossbeam-channel = "0.5.15" crossterm = "0.28.1" +crypto_box = { version = "0.9.1", features = ["seal"] } csv = "1.3.1" ctor = "0.6.3" +deno_core_icudata = "0.77.0" derive_more = "2" diffy = "0.4.2" dirs = "6" dotenvy = "0.15.7" dunce = "1.0.4" +ed25519-dalek = { version = "2.2.0", features = ["pkcs8"] } encoding_rs = "0.8.35" env-flags = "0.1.1" env_logger = "0.11.9" @@ -337,6 +348,8 @@ tracing-appender = "0.2.3" tracing-opentelemetry = "0.32.0" tracing-subscriber = "0.3.22" tracing-test = "0.2.5" +tonic = { version = "0.14.3", default-features = false, features = ["channel", "codegen"] } +tonic-prost = "0.14.3" tree-sitter = "0.25.10" tree-sitter-bash = "0.25" ts-rs = "11" @@ -427,8 +440,8 @@ opt-level = 0 [patch.crates-io] # Uncomment to debug local changes. # ratatui = { path = "../../ratatui" } -crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" } -ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" } +crossterm = { git = "https://github.com/nornagon/crossterm", rev = "87db8bfa6dc99427fd3b071681b07fc31c6ce995" } +ratatui = { git = "https://github.com/nornagon/ratatui", rev = "9b2ad1298408c45918ee9f8241a6f95498cdbed2" } tokio-tungstenite = { git = "https://github.com/openai-oss-forks/tokio-tungstenite", rev = "132f5b39c862e3a970f731d709608b3e6276d5f6" } tungstenite = { git = "https://github.com/openai-oss-forks/tungstenite-rs", rev = "9200079d3b54a1ff51072e24d81fd354f085156f" } diff --git a/codex-rs/README.md b/codex-rs/README.md index 6307668f39..2ad7158f98 100644 --- a/codex-rs/README.md +++ b/codex-rs/README.md @@ -1,6 +1,6 @@ # Codex CLI (Rust Implementation) -We provide Codex CLI as a standalone, native executable to ensure a zero-dependency install. +We provide Codex CLI as a standalone executable to ensure a zero-dependency install. ## Installing Codex diff --git a/codex-rs/analytics/Cargo.toml b/codex-rs/analytics/Cargo.toml index 0f36373145..f706814d41 100644 --- a/codex-rs/analytics/Cargo.toml +++ b/codex-rs/analytics/Cargo.toml @@ -29,4 +29,5 @@ tokio = { workspace = true, features = [ tracing = { workspace = true, features = ["log"] } [dev-dependencies] +codex-utils-absolute-path = { workspace = true } pretty_assertions = { workspace = true } diff --git a/codex-rs/analytics/src/analytics_client_tests.rs b/codex-rs/analytics/src/analytics_client_tests.rs index e5aca39b8e..4b13b2819e 100644 --- a/codex-rs/analytics/src/analytics_client_tests.rs +++ b/codex-rs/analytics/src/analytics_client_tests.rs @@ -4,18 +4,21 @@ use crate::events::CodexAppMentionedEventRequest; use crate::events::CodexAppServerClientMetadata; use crate::events::CodexAppUsedEventRequest; use crate::events::CodexCompactionEventRequest; +use crate::events::CodexHookRunEventRequest; use crate::events::CodexPluginEventRequest; use crate::events::CodexPluginUsedEventRequest; use crate::events::CodexRuntimeMetadata; -use crate::events::ThreadInitializationMode; +use crate::events::CodexTurnEventRequest; use crate::events::ThreadInitializedEvent; use crate::events::ThreadInitializedEventParams; use crate::events::TrackEventRequest; use crate::events::codex_app_metadata; +use crate::events::codex_hook_run_metadata; use crate::events::codex_plugin_metadata; use crate::events::codex_plugin_used_metadata; use crate::events::subagent_thread_started_event_request; use crate::facts::AnalyticsFact; +use crate::facts::AnalyticsJsonRpcError; use crate::facts::AppInvocation; use crate::facts::AppMentionedInput; use crate::facts::AppUsedInput; @@ -27,6 +30,9 @@ use crate::facts::CompactionStatus; use crate::facts::CompactionStrategy; use crate::facts::CompactionTrigger; use crate::facts::CustomAnalyticsFact; +use crate::facts::HookRunFact; +use crate::facts::HookRunInput; +use crate::facts::InputError; use crate::facts::InvocationType; use crate::facts::PluginState; use crate::facts::PluginStateChangedInput; @@ -34,30 +40,60 @@ use crate::facts::PluginUsedInput; use crate::facts::SkillInvocation; use crate::facts::SkillInvokedInput; use crate::facts::SubAgentThreadStartedInput; +use crate::facts::ThreadInitializationMode; use crate::facts::TrackEventsContext; +use crate::facts::TurnResolvedConfigFact; +use crate::facts::TurnStatus; +use crate::facts::TurnSteerRequestError; +use crate::facts::TurnTokenUsageFact; use crate::reducer::AnalyticsReducer; use crate::reducer::normalize_path_for_skill_id; use crate::reducer::skill_id_for_local_skill; use codex_app_server_protocol::ApprovalsReviewer as AppServerApprovalsReviewer; use codex_app_server_protocol::AskForApproval as AppServerAskForApproval; use codex_app_server_protocol::ClientInfo; +use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::ClientResponse; +use codex_app_server_protocol::CodexErrorInfo; use codex_app_server_protocol::InitializeCapabilities; use codex_app_server_protocol::InitializeParams; +use codex_app_server_protocol::JSONRPCErrorError; +use codex_app_server_protocol::NonSteerableTurnKind; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SandboxPolicy as AppServerSandboxPolicy; +use codex_app_server_protocol::ServerNotification; use codex_app_server_protocol::SessionSource as AppServerSessionSource; use codex_app_server_protocol::Thread; use codex_app_server_protocol::ThreadResumeResponse; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::ThreadStatus as AppServerThreadStatus; +use codex_app_server_protocol::Turn; +use codex_app_server_protocol::TurnCompletedNotification; +use codex_app_server_protocol::TurnError as AppServerTurnError; +use codex_app_server_protocol::TurnStartParams; +use codex_app_server_protocol::TurnStartedNotification; +use codex_app_server_protocol::TurnStatus as AppServerTurnStatus; +use codex_app_server_protocol::TurnSteerParams; +use codex_app_server_protocol::TurnSteerResponse; +use codex_app_server_protocol::UserInput; use codex_login::default_client::DEFAULT_ORIGINATOR; use codex_login::default_client::originator; use codex_plugin::AppConnectorId; use codex_plugin::PluginCapabilitySummary; use codex_plugin::PluginId; use codex_plugin::PluginTelemetryMetadata; +use codex_protocol::config_types::ApprovalsReviewer; +use codex_protocol::config_types::ModeKind; +use codex_protocol::protocol::AskForApproval; +use codex_protocol::protocol::HookEventName; +use codex_protocol::protocol::HookRunStatus; +use codex_protocol::protocol::HookSource; +use codex_protocol::protocol::SandboxPolicy; +use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; +use codex_protocol::protocol::TokenUsage; +use codex_utils_absolute_path::test_support::PathBufExt; +use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use serde_json::json; use std::collections::HashSet; @@ -85,7 +121,7 @@ fn sample_thread_with_source( updated_at: 2, status: AppServerThreadStatus::Idle, path: None, - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), cli_version: "0.0.0".to_string(), source, agent_nickname: None, @@ -104,7 +140,7 @@ fn sample_thread_start_response(thread_id: &str, ephemeral: bool, model: &str) - model: model.to_string(), model_provider: "openai".to_string(), service_tier: None, - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), instruction_sources: Vec::new(), approval_policy: AppServerAskForApproval::OnFailure, approvals_reviewer: AppServerApprovalsReviewer::User, @@ -155,7 +191,7 @@ fn sample_thread_resume_response_with_source( model: model.to_string(), model_provider: "openai".to_string(), service_tier: None, - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), instruction_sources: Vec::new(), approval_policy: AppServerAskForApproval::OnFailure, approvals_reviewer: AppServerApprovalsReviewer::User, @@ -165,6 +201,339 @@ fn sample_thread_resume_response_with_source( } } +fn sample_turn_start_request(thread_id: &str, request_id: i64) -> ClientRequest { + ClientRequest::TurnStart { + request_id: RequestId::Integer(request_id), + params: TurnStartParams { + thread_id: thread_id.to_string(), + input: vec![ + UserInput::Text { + text: "hello".to_string(), + text_elements: vec![], + }, + UserInput::Image { + url: "https://example.com/a.png".to_string(), + }, + ], + ..Default::default() + }, + } +} + +fn sample_turn_start_response(turn_id: &str, request_id: i64) -> ClientResponse { + ClientResponse::TurnStart { + request_id: RequestId::Integer(request_id), + response: codex_app_server_protocol::TurnStartResponse { + turn: Turn { + id: turn_id.to_string(), + items: vec![], + status: AppServerTurnStatus::InProgress, + error: None, + started_at: None, + completed_at: None, + duration_ms: None, + }, + }, + } +} + +fn sample_turn_started_notification(thread_id: &str, turn_id: &str) -> ServerNotification { + ServerNotification::TurnStarted(TurnStartedNotification { + thread_id: thread_id.to_string(), + turn: Turn { + id: turn_id.to_string(), + items: vec![], + status: AppServerTurnStatus::InProgress, + error: None, + started_at: Some(455), + completed_at: None, + duration_ms: None, + }, + }) +} + +fn sample_turn_token_usage_fact(thread_id: &str, turn_id: &str) -> TurnTokenUsageFact { + TurnTokenUsageFact { + thread_id: thread_id.to_string(), + turn_id: turn_id.to_string(), + token_usage: TokenUsage { + total_tokens: 321, + input_tokens: 123, + cached_input_tokens: 45, + output_tokens: 140, + reasoning_output_tokens: 13, + }, + } +} + +fn sample_turn_completed_notification( + thread_id: &str, + turn_id: &str, + status: AppServerTurnStatus, + codex_error_info: Option, +) -> ServerNotification { + ServerNotification::TurnCompleted(TurnCompletedNotification { + thread_id: thread_id.to_string(), + turn: Turn { + id: turn_id.to_string(), + items: vec![], + status, + error: codex_error_info.map(|codex_error_info| AppServerTurnError { + message: "turn failed".to_string(), + codex_error_info: Some(codex_error_info), + additional_details: None, + }), + started_at: None, + completed_at: Some(456), + duration_ms: Some(1234), + }, + }) +} + +fn sample_turn_resolved_config(turn_id: &str) -> TurnResolvedConfigFact { + TurnResolvedConfigFact { + turn_id: turn_id.to_string(), + thread_id: "thread-2".to_string(), + num_input_images: 1, + submission_type: None, + ephemeral: false, + session_source: SessionSource::Exec, + model: "gpt-5".to_string(), + model_provider: "openai".to_string(), + sandbox_policy: SandboxPolicy::new_read_only_policy(), + reasoning_effort: None, + reasoning_summary: None, + service_tier: None, + approval_policy: AskForApproval::OnRequest, + approvals_reviewer: ApprovalsReviewer::GuardianSubagent, + sandbox_network_access: true, + collaboration_mode: ModeKind::Plan, + personality: None, + is_first_turn: true, + } +} + +fn sample_turn_steer_request( + thread_id: &str, + expected_turn_id: &str, + request_id: i64, +) -> ClientRequest { + ClientRequest::TurnSteer { + request_id: RequestId::Integer(request_id), + params: TurnSteerParams { + thread_id: thread_id.to_string(), + expected_turn_id: expected_turn_id.to_string(), + input: vec![ + UserInput::Text { + text: "more".to_string(), + text_elements: vec![], + }, + UserInput::LocalImage { + path: "/tmp/a.png".into(), + }, + ], + responsesapi_client_metadata: None, + }, + } +} + +fn sample_turn_steer_response(turn_id: &str, request_id: i64) -> ClientResponse { + ClientResponse::TurnSteer { + request_id: RequestId::Integer(request_id), + response: TurnSteerResponse { + turn_id: turn_id.to_string(), + }, + } +} + +fn no_active_turn_steer_error() -> JSONRPCErrorError { + JSONRPCErrorError { + code: -32600, + message: "no active turn to steer".to_string(), + data: None, + } +} + +fn no_active_turn_steer_error_type() -> AnalyticsJsonRpcError { + AnalyticsJsonRpcError::TurnSteer(TurnSteerRequestError::NoActiveTurn) +} + +fn non_steerable_review_error() -> JSONRPCErrorError { + JSONRPCErrorError { + code: -32600, + message: "cannot steer a review turn".to_string(), + data: Some( + serde_json::to_value(AppServerTurnError { + message: "cannot steer a review turn".to_string(), + codex_error_info: Some(CodexErrorInfo::ActiveTurnNotSteerable { + turn_kind: NonSteerableTurnKind::Review, + }), + additional_details: None, + }) + .expect("serialize turn error"), + ), + } +} + +fn non_steerable_review_error_type() -> AnalyticsJsonRpcError { + AnalyticsJsonRpcError::TurnSteer(TurnSteerRequestError::NonSteerableReview) +} + +fn input_too_large_steer_error() -> JSONRPCErrorError { + JSONRPCErrorError { + code: -32602, + message: "Input exceeds the maximum length of 1048576 characters.".to_string(), + data: Some(json!({ + "input_error_code": "input_too_large", + "actual_chars": 1048577, + "max_chars": 1048576, + })), + } +} + +fn input_too_large_error_type() -> AnalyticsJsonRpcError { + AnalyticsJsonRpcError::Input(InputError::TooLarge) +} + +async fn ingest_rejected_turn_steer( + reducer: &mut AnalyticsReducer, + out: &mut Vec, + error: JSONRPCErrorError, + error_type: Option, +) -> serde_json::Value { + ingest_turn_prerequisites( + reducer, out, /*include_initialize*/ true, /*include_resolved_config*/ false, + /*include_started*/ false, /*include_token_usage*/ false, + ) + .await; + reducer + .ingest( + AnalyticsFact::Request { + connection_id: 7, + request_id: RequestId::Integer(4), + request: Box::new(sample_turn_steer_request( + "thread-2", "turn-2", /*request_id*/ 4, + )), + }, + out, + ) + .await; + reducer + .ingest( + AnalyticsFact::ErrorResponse { + connection_id: 7, + request_id: RequestId::Integer(4), + error, + error_type, + }, + out, + ) + .await; + + assert_eq!(out.len(), 1); + serde_json::to_value(&out[0]).expect("serialize turn steer event") +} + +async fn ingest_initialize(reducer: &mut AnalyticsReducer, out: &mut Vec) { + reducer + .ingest( + AnalyticsFact::Initialize { + connection_id: 7, + params: InitializeParams { + client_info: ClientInfo { + name: "codex-tui".to_string(), + title: None, + version: "1.0.0".to_string(), + }, + capabilities: None, + }, + product_client_id: "codex-tui".to_string(), + runtime: sample_runtime_metadata(), + rpc_transport: AppServerRpcTransport::Stdio, + }, + out, + ) + .await; +} + +async fn ingest_turn_prerequisites( + reducer: &mut AnalyticsReducer, + out: &mut Vec, + include_initialize: bool, + include_resolved_config: bool, + include_started: bool, + include_token_usage: bool, +) { + if include_initialize { + ingest_initialize(reducer, out).await; + reducer + .ingest( + AnalyticsFact::Response { + connection_id: 7, + response: Box::new(sample_thread_start_response( + "thread-2", /*ephemeral*/ false, "gpt-5", + )), + }, + out, + ) + .await; + out.clear(); + } + + reducer + .ingest( + AnalyticsFact::Request { + connection_id: 7, + request_id: RequestId::Integer(3), + request: Box::new(sample_turn_start_request("thread-2", /*request_id*/ 3)), + }, + out, + ) + .await; + reducer + .ingest( + AnalyticsFact::Response { + connection_id: 7, + response: Box::new(sample_turn_start_response("turn-2", /*request_id*/ 3)), + }, + out, + ) + .await; + + if include_resolved_config { + reducer + .ingest( + AnalyticsFact::Custom(CustomAnalyticsFact::TurnResolvedConfig(Box::new( + sample_turn_resolved_config("turn-2"), + ))), + out, + ) + .await; + } + + if include_started { + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_started_notification( + "thread-2", "turn-2", + ))), + out, + ) + .await; + } + + if include_token_usage { + reducer + .ingest( + AnalyticsFact::Custom(CustomAnalyticsFact::TurnTokenUsage(Box::new( + sample_turn_token_usage_fact("thread-2", "turn-2"), + ))), + out, + ) + .await; + } +} + fn expected_absolute_path(path: &PathBuf) -> String { std::fs::canonicalize(path) .unwrap_or_else(|_| path.to_path_buf()) @@ -565,10 +934,6 @@ async fn initialize_caches_client_and_thread_lifecycle_publishes_once_initialize payload[0]["event_params"]["runtime"]["runtime_arch"], "x86_64" ); - assert_eq!(payload[0]["event_params"]["initialization_mode"], "resumed"); - assert_eq!(payload[0]["event_params"]["thread_source"], "user"); - assert_eq!(payload[0]["event_params"]["subagent_source"], json!(null)); - assert_eq!(payload[0]["event_params"]["parent_thread_id"], json!(null)); } #[tokio::test] @@ -924,6 +1289,109 @@ fn plugin_management_event_serializes_expected_shape() { ); } +#[test] +fn hook_run_event_serializes_expected_shape() { + let tracking = TrackEventsContext { + model_slug: "gpt-5".to_string(), + thread_id: "thread-3".to_string(), + turn_id: "turn-3".to_string(), + }; + let event = TrackEventRequest::HookRun(CodexHookRunEventRequest { + event_type: "codex_hook_run", + event_params: codex_hook_run_metadata( + &tracking, + HookRunFact { + event_name: HookEventName::PreToolUse, + hook_source: HookSource::User, + status: HookRunStatus::Completed, + }, + ), + }); + + let payload = serde_json::to_value(&event).expect("serialize hook run event"); + + assert_eq!( + payload, + json!({ + "event_type": "codex_hook_run", + "event_params": { + "thread_id": "thread-3", + "turn_id": "turn-3", + "model_slug": "gpt-5", + "hook_name": "PreToolUse", + "hook_source": "user", + "status": "completed" + } + }) + ); +} + +#[test] +fn hook_run_metadata_maps_sources_and_statuses() { + let tracking = TrackEventsContext { + model_slug: "gpt-5".to_string(), + thread_id: "thread-1".to_string(), + turn_id: "turn-1".to_string(), + }; + + let system = serde_json::to_value(codex_hook_run_metadata( + &tracking, + HookRunFact { + event_name: HookEventName::SessionStart, + hook_source: HookSource::System, + status: HookRunStatus::Completed, + }, + )) + .expect("serialize system hook"); + let project = serde_json::to_value(codex_hook_run_metadata( + &tracking, + HookRunFact { + event_name: HookEventName::Stop, + hook_source: HookSource::Project, + status: HookRunStatus::Blocked, + }, + )) + .expect("serialize project hook"); + let unknown = serde_json::to_value(codex_hook_run_metadata( + &tracking, + HookRunFact { + event_name: HookEventName::UserPromptSubmit, + hook_source: HookSource::Unknown, + status: HookRunStatus::Failed, + }, + )) + .expect("serialize unknown hook"); + + assert_eq!(system["hook_source"], "system"); + assert_eq!(system["status"], "completed"); + assert_eq!(project["hook_source"], "project"); + assert_eq!(project["status"], "blocked"); + assert_eq!(unknown["hook_source"], "unknown"); + assert_eq!(unknown["status"], "failed"); +} + +#[test] +fn hook_run_metadata_maps_stopped_status() { + let tracking = TrackEventsContext { + model_slug: "gpt-5".to_string(), + thread_id: "thread-1".to_string(), + turn_id: "turn-1".to_string(), + }; + + let stopped = serde_json::to_value(codex_hook_run_metadata( + &tracking, + HookRunFact { + event_name: HookEventName::Stop, + hook_source: HookSource::User, + status: HookRunStatus::Stopped, + }, + )) + .expect("serialize stopped hook"); + + assert_eq!(stopped["hook_source"], "user"); + assert_eq!(stopped["status"], "stopped"); +} + #[test] fn plugin_used_dedupe_is_keyed_by_turn_and_plugin() { let (sender, _receiver) = mpsc::channel(1); @@ -1001,6 +1469,37 @@ async fn reducer_ingests_skill_invoked_fact() { ); } +#[tokio::test] +async fn reducer_ingests_hook_run_fact() { + let mut reducer = AnalyticsReducer::default(); + let mut events = Vec::new(); + + reducer + .ingest( + AnalyticsFact::Custom(CustomAnalyticsFact::HookRun(HookRunInput { + tracking: TrackEventsContext { + model_slug: "gpt-5".to_string(), + thread_id: "thread-1".to_string(), + turn_id: "turn-1".to_string(), + }, + hook: HookRunFact { + event_name: HookEventName::PostToolUse, + hook_source: HookSource::Unknown, + status: HookRunStatus::Failed, + }, + })), + &mut events, + ) + .await; + + let payload = serde_json::to_value(&events).expect("serialize events"); + assert_eq!(payload.as_array().expect("events array").len(), 1); + assert_eq!(payload[0]["event_type"], "codex_hook_run"); + assert_eq!(payload[0]["event_params"]["hook_name"], "PostToolUse"); + assert_eq!(payload[0]["event_params"]["hook_source"], "unknown"); + assert_eq!(payload[0]["event_params"]["status"], "failed"); +} + #[tokio::test] async fn reducer_ingests_app_and_plugin_facts() { let mut reducer = AnalyticsReducer::default(); @@ -1089,6 +1588,675 @@ async fn reducer_ingests_plugin_state_changed_fact() { ); } +#[test] +fn turn_event_serializes_expected_shape() { + let event = TrackEventRequest::TurnEvent(Box::new(CodexTurnEventRequest { + event_type: "codex_turn_event", + event_params: crate::events::CodexTurnEventParams { + thread_id: "thread-2".to_string(), + turn_id: "turn-2".to_string(), + app_server_client: sample_app_server_client_metadata(), + runtime: sample_runtime_metadata(), + submission_type: None, + ephemeral: false, + thread_source: Some("user".to_string()), + initialization_mode: ThreadInitializationMode::New, + subagent_source: None, + parent_thread_id: None, + model: Some("gpt-5".to_string()), + model_provider: "openai".to_string(), + sandbox_policy: Some("read_only"), + reasoning_effort: Some("high".to_string()), + reasoning_summary: Some("detailed".to_string()), + service_tier: "flex".to_string(), + approval_policy: "on-request".to_string(), + approvals_reviewer: "guardian_subagent".to_string(), + sandbox_network_access: true, + collaboration_mode: Some("plan"), + personality: Some("pragmatic".to_string()), + num_input_images: 2, + is_first_turn: true, + status: Some(TurnStatus::Completed), + turn_error: None, + steer_count: Some(0), + total_tool_call_count: None, + shell_command_count: None, + file_change_count: None, + mcp_tool_call_count: None, + dynamic_tool_call_count: None, + subagent_tool_call_count: None, + web_search_count: None, + image_generation_count: None, + input_tokens: None, + cached_input_tokens: None, + output_tokens: None, + reasoning_output_tokens: None, + total_tokens: None, + duration_ms: Some(1234), + started_at: Some(455), + completed_at: Some(456), + }, + })); + + let payload = serde_json::to_value(&event).expect("serialize turn event"); + let expected = serde_json::from_str::( + r#"{ + "event_type": "codex_turn_event", + "event_params": { + "thread_id": "thread-2", + "turn_id": "turn-2", + "submission_type": null, + "app_server_client": { + "product_client_id": "codex_cli_rs", + "client_name": "codex-tui", + "client_version": "1.0.0", + "rpc_transport": "stdio", + "experimental_api_enabled": true + }, + "runtime": { + "codex_rs_version": "0.1.0", + "runtime_os": "macos", + "runtime_os_version": "15.3.1", + "runtime_arch": "aarch64" + }, + "ephemeral": false, + "thread_source": "user", + "initialization_mode": "new", + "subagent_source": null, + "parent_thread_id": null, + "model": "gpt-5", + "model_provider": "openai", + "sandbox_policy": "read_only", + "reasoning_effort": "high", + "reasoning_summary": "detailed", + "service_tier": "flex", + "approval_policy": "on-request", + "approvals_reviewer": "guardian_subagent", + "sandbox_network_access": true, + "collaboration_mode": "plan", + "personality": "pragmatic", + "num_input_images": 2, + "is_first_turn": true, + "status": "completed", + "turn_error": null, + "steer_count": 0, + "total_tool_call_count": null, + "shell_command_count": null, + "file_change_count": null, + "mcp_tool_call_count": null, + "dynamic_tool_call_count": null, + "subagent_tool_call_count": null, + "web_search_count": null, + "image_generation_count": null, + "input_tokens": null, + "cached_input_tokens": null, + "output_tokens": null, + "reasoning_output_tokens": null, + "total_tokens": null, + "duration_ms": 1234, + "started_at": 455, + "completed_at": 456 + } + }"#, + ) + .expect("parse expected turn event"); + + assert_eq!(payload, expected); +} + +#[tokio::test] +async fn accepted_turn_steer_emits_expected_event() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ true, + /*include_resolved_config*/ false, + /*include_started*/ false, + /*include_token_usage*/ false, + ) + .await; + reducer + .ingest( + AnalyticsFact::Request { + connection_id: 7, + request_id: RequestId::Integer(4), + request: Box::new(sample_turn_steer_request( + "thread-2", "turn-2", /*request_id*/ 4, + )), + }, + &mut out, + ) + .await; + reducer + .ingest( + AnalyticsFact::Response { + connection_id: 7, + response: Box::new(sample_turn_steer_response("turn-2", /*request_id*/ 4)), + }, + &mut out, + ) + .await; + + assert_eq!(out.len(), 1); + let payload = serde_json::to_value(&out[0]).expect("serialize turn steer event"); + assert_eq!(payload["event_type"], json!("codex_turn_steer_event")); + assert_eq!(payload["event_params"]["thread_id"], json!("thread-2")); + assert_eq!(payload["event_params"]["expected_turn_id"], json!("turn-2")); + assert_eq!(payload["event_params"]["accepted_turn_id"], json!("turn-2")); + assert_eq!(payload["event_params"]["num_input_images"], json!(1)); + assert_eq!(payload["event_params"]["result"], json!("accepted")); + assert_eq!(payload["event_params"]["rejection_reason"], json!(null)); + assert!( + payload["event_params"]["created_at"] + .as_u64() + .expect("created_at") + > 0 + ); + assert_eq!( + payload["event_params"]["app_server_client"]["product_client_id"], + json!("codex-tui") + ); + assert_eq!( + payload["event_params"]["runtime"]["codex_rs_version"], + json!("0.1.0") + ); + assert_eq!(payload["event_params"]["thread_source"], json!("user")); + assert_eq!(payload["event_params"]["subagent_source"], json!(null)); + assert_eq!(payload["event_params"]["parent_thread_id"], json!(null)); + assert!(payload["event_params"].get("product_client_id").is_none()); +} + +#[tokio::test] +async fn rejected_turn_steer_uses_request_connection_metadata() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + let payload = ingest_rejected_turn_steer( + &mut reducer, + &mut out, + no_active_turn_steer_error(), + Some(no_active_turn_steer_error_type()), + ) + .await; + + assert_eq!(payload["event_type"], json!("codex_turn_steer_event")); + assert_eq!(payload["event_params"]["thread_id"], json!("thread-2")); + assert_eq!(payload["event_params"]["expected_turn_id"], json!("turn-2")); + assert_eq!(payload["event_params"]["accepted_turn_id"], json!(null)); + assert_eq!(payload["event_params"]["num_input_images"], json!(1)); + assert_eq!( + payload["event_params"]["app_server_client"]["product_client_id"], + json!("codex-tui") + ); + assert_eq!( + payload["event_params"]["runtime"]["codex_rs_version"], + json!("0.1.0") + ); + assert_eq!(payload["event_params"]["thread_source"], json!("user")); + assert_eq!(payload["event_params"]["subagent_source"], json!(null)); + assert_eq!(payload["event_params"]["parent_thread_id"], json!(null)); + assert_eq!(payload["event_params"]["result"], json!("rejected")); + assert_eq!( + payload["event_params"]["rejection_reason"], + json!("no_active_turn") + ); + assert!( + payload["event_params"]["created_at"] + .as_u64() + .expect("created_at") + > 0 + ); +} + +#[tokio::test] +async fn rejected_turn_steer_maps_active_turn_not_steerable_error_type() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + let payload = ingest_rejected_turn_steer( + &mut reducer, + &mut out, + non_steerable_review_error(), + Some(non_steerable_review_error_type()), + ) + .await; + + assert_eq!( + payload["event_params"]["rejection_reason"], + json!("non_steerable_review") + ); +} + +#[tokio::test] +async fn rejected_turn_steer_maps_input_too_large_error_type() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + let payload = ingest_rejected_turn_steer( + &mut reducer, + &mut out, + input_too_large_steer_error(), + Some(input_too_large_error_type()), + ) + .await; + + assert_eq!( + payload["event_params"]["rejection_reason"], + json!("input_too_large") + ); +} + +#[tokio::test] +async fn turn_steer_does_not_emit_without_pending_request() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + reducer + .ingest( + AnalyticsFact::ErrorResponse { + connection_id: 7, + request_id: RequestId::Integer(4), + error: no_active_turn_steer_error(), + error_type: Some(no_active_turn_steer_error_type()), + }, + &mut out, + ) + .await; + + assert!(out.is_empty()); +} + +#[tokio::test] +async fn turn_start_error_response_discards_pending_start_request() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_initialize(&mut reducer, &mut out).await; + reducer + .ingest( + AnalyticsFact::Request { + connection_id: 7, + request_id: RequestId::Integer(3), + request: Box::new(sample_turn_start_request("thread-2", /*request_id*/ 3)), + }, + &mut out, + ) + .await; + reducer + .ingest( + AnalyticsFact::ErrorResponse { + connection_id: 7, + request_id: RequestId::Integer(3), + error: no_active_turn_steer_error(), + error_type: None, + }, + &mut out, + ) + .await; + + // A late/synthetic response for the same request id must not resurrect the + // failed turn/start request and attach request-scoped connection metadata. + reducer + .ingest( + AnalyticsFact::Response { + connection_id: 7, + response: Box::new(sample_turn_start_response("turn-2", /*request_id*/ 3)), + }, + &mut out, + ) + .await; + assert!(out.is_empty()); + + reducer + .ingest( + AnalyticsFact::Custom(CustomAnalyticsFact::TurnResolvedConfig(Box::new( + sample_turn_resolved_config("turn-2"), + ))), + &mut out, + ) + .await; + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Completed, + /*codex_error_info*/ None, + ))), + &mut out, + ) + .await; + + assert!(out.is_empty()); +} + +#[tokio::test] +async fn turn_lifecycle_emits_turn_event() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ true, + /*include_resolved_config*/ true, + /*include_started*/ true, + /*include_token_usage*/ true, + ) + .await; + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Completed, + /*codex_error_info*/ None, + ))), + &mut out, + ) + .await; + + assert_eq!(out.len(), 1); + let payload = serde_json::to_value(&out[0]).expect("serialize turn event"); + assert_eq!(payload["event_type"], json!("codex_turn_event")); + assert_eq!(payload["event_params"]["thread_id"], json!("thread-2")); + assert_eq!(payload["event_params"]["turn_id"], json!("turn-2")); + assert_eq!( + payload["event_params"]["app_server_client"], + json!({ + "product_client_id": "codex-tui", + "client_name": "codex-tui", + "client_version": "1.0.0", + "rpc_transport": "stdio", + "experimental_api_enabled": null, + }) + ); + assert_eq!( + payload["event_params"]["runtime"], + json!({ + "codex_rs_version": "0.1.0", + "runtime_os": "macos", + "runtime_os_version": "15.3.1", + "runtime_arch": "aarch64", + }) + ); + assert!(payload["event_params"].get("product_client_id").is_none()); + assert_eq!(payload["event_params"]["ephemeral"], json!(false)); + assert_eq!(payload["event_params"]["num_input_images"], json!(1)); + assert_eq!(payload["event_params"]["status"], json!("completed")); + assert_eq!(payload["event_params"]["steer_count"], json!(0)); + assert_eq!(payload["event_params"]["started_at"], json!(455)); + assert_eq!(payload["event_params"]["completed_at"], json!(456)); + assert_eq!(payload["event_params"]["duration_ms"], json!(1234)); + assert_eq!(payload["event_params"]["input_tokens"], json!(123)); + assert_eq!(payload["event_params"]["cached_input_tokens"], json!(45)); + assert_eq!(payload["event_params"]["output_tokens"], json!(140)); + assert_eq!( + payload["event_params"]["reasoning_output_tokens"], + json!(13) + ); + assert_eq!(payload["event_params"]["total_tokens"], json!(321)); +} + +#[tokio::test] +async fn accepted_steers_increment_turn_steer_count() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ true, + /*include_resolved_config*/ true, + /*include_started*/ true, + /*include_token_usage*/ false, + ) + .await; + + reducer + .ingest( + AnalyticsFact::Request { + connection_id: 7, + request_id: RequestId::Integer(4), + request: Box::new(sample_turn_steer_request( + "thread-2", "turn-2", /*request_id*/ 4, + )), + }, + &mut out, + ) + .await; + reducer + .ingest( + AnalyticsFact::Response { + connection_id: 7, + response: Box::new(sample_turn_steer_response("turn-2", /*request_id*/ 4)), + }, + &mut out, + ) + .await; + + reducer + .ingest( + AnalyticsFact::Request { + connection_id: 7, + request_id: RequestId::Integer(5), + request: Box::new(sample_turn_steer_request( + "thread-2", "turn-2", /*request_id*/ 5, + )), + }, + &mut out, + ) + .await; + reducer + .ingest( + AnalyticsFact::ErrorResponse { + connection_id: 7, + request_id: RequestId::Integer(5), + error: no_active_turn_steer_error(), + error_type: Some(no_active_turn_steer_error_type()), + }, + &mut out, + ) + .await; + + reducer + .ingest( + AnalyticsFact::Request { + connection_id: 7, + request_id: RequestId::Integer(6), + request: Box::new(sample_turn_steer_request( + "thread-2", "turn-2", /*request_id*/ 6, + )), + }, + &mut out, + ) + .await; + reducer + .ingest( + AnalyticsFact::Response { + connection_id: 7, + response: Box::new(sample_turn_steer_response("turn-2", /*request_id*/ 6)), + }, + &mut out, + ) + .await; + + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Completed, + /*codex_error_info*/ None, + ))), + &mut out, + ) + .await; + + let turn_event = out + .iter() + .find(|event| matches!(event, TrackEventRequest::TurnEvent(_))) + .expect("turn event should be emitted"); + let payload = serde_json::to_value(turn_event).expect("serialize turn event"); + assert_eq!(payload["event_params"]["steer_count"], json!(2)); +} + +#[tokio::test] +async fn turn_does_not_emit_without_required_prerequisites() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ false, + /*include_resolved_config*/ true, + /*include_started*/ false, + /*include_token_usage*/ false, + ) + .await; + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Completed, + /*codex_error_info*/ None, + ))), + &mut out, + ) + .await; + assert!(out.is_empty()); + + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ true, + /*include_resolved_config*/ false, + /*include_started*/ false, + /*include_token_usage*/ false, + ) + .await; + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Completed, + /*codex_error_info*/ None, + ))), + &mut out, + ) + .await; + assert!(out.is_empty()); +} + +#[tokio::test] +async fn turn_lifecycle_emits_failed_turn_event() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ true, + /*include_resolved_config*/ true, + /*include_started*/ true, + /*include_token_usage*/ false, + ) + .await; + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Failed, + Some(codex_app_server_protocol::CodexErrorInfo::BadRequest), + ))), + &mut out, + ) + .await; + + assert_eq!(out.len(), 1); + let payload = serde_json::to_value(&out[0]).expect("serialize turn event"); + assert_eq!(payload["event_params"]["status"], json!("failed")); + assert_eq!(payload["event_params"]["turn_error"], json!("badRequest")); +} + +#[tokio::test] +async fn turn_lifecycle_emits_interrupted_turn_event_without_error() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ true, + /*include_resolved_config*/ true, + /*include_started*/ true, + /*include_token_usage*/ false, + ) + .await; + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Interrupted, + /*codex_error_info*/ None, + ))), + &mut out, + ) + .await; + + assert_eq!(out.len(), 1); + let payload = serde_json::to_value(&out[0]).expect("serialize turn event"); + assert_eq!(payload["event_params"]["status"], json!("interrupted")); + assert_eq!(payload["event_params"]["turn_error"], json!(null)); +} + +#[tokio::test] +async fn turn_completed_without_started_notification_emits_null_started_at() { + let mut reducer = AnalyticsReducer::default(); + let mut out = Vec::new(); + + ingest_turn_prerequisites( + &mut reducer, + &mut out, + /*include_initialize*/ true, + /*include_resolved_config*/ true, + /*include_started*/ false, + /*include_token_usage*/ false, + ) + .await; + reducer + .ingest( + AnalyticsFact::Notification(Box::new(sample_turn_completed_notification( + "thread-2", + "turn-2", + AppServerTurnStatus::Completed, + /*codex_error_info*/ None, + ))), + &mut out, + ) + .await; + + let payload = serde_json::to_value(&out[0]).expect("serialize turn event"); + assert_eq!(payload["event_params"]["started_at"], json!(null)); + assert_eq!(payload["event_params"]["duration_ms"], json!(1234)); + assert_eq!(payload["event_params"]["input_tokens"], json!(null)); + assert_eq!(payload["event_params"]["cached_input_tokens"], json!(null)); + assert_eq!(payload["event_params"]["output_tokens"], json!(null)); + assert_eq!( + payload["event_params"]["reasoning_output_tokens"], + json!(null) + ); + assert_eq!(payload["event_params"]["total_tokens"], json!(null)); +} + fn sample_plugin_metadata() -> PluginTelemetryMetadata { PluginTelemetryMetadata { plugin_id: PluginId::parse("sample@test").expect("valid plugin id"), diff --git a/codex-rs/analytics/src/client.rs b/codex-rs/analytics/src/client.rs index 0d96ee6061..1a4b5defe9 100644 --- a/codex-rs/analytics/src/client.rs +++ b/codex-rs/analytics/src/client.rs @@ -4,19 +4,28 @@ use crate::events::TrackEventRequest; use crate::events::TrackEventsRequest; use crate::events::current_runtime_metadata; use crate::facts::AnalyticsFact; +use crate::facts::AnalyticsJsonRpcError; use crate::facts::AppInvocation; use crate::facts::AppMentionedInput; use crate::facts::AppUsedInput; use crate::facts::CustomAnalyticsFact; +use crate::facts::HookRunFact; +use crate::facts::HookRunInput; use crate::facts::PluginState; use crate::facts::PluginStateChangedInput; use crate::facts::SkillInvocation; use crate::facts::SkillInvokedInput; use crate::facts::SubAgentThreadStartedInput; use crate::facts::TrackEventsContext; +use crate::facts::TurnResolvedConfigFact; +use crate::facts::TurnTokenUsageFact; use crate::reducer::AnalyticsReducer; +use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::ClientResponse; use codex_app_server_protocol::InitializeParams; +use codex_app_server_protocol::JSONRPCErrorError; +use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ServerNotification; use codex_login::AuthManager; use codex_login::default_client::create_client; use codex_plugin::PluginTelemetryMetadata; @@ -167,6 +176,14 @@ impl AnalyticsEventsClient { ))); } + pub fn track_request(&self, connection_id: u64, request_id: RequestId, request: ClientRequest) { + self.record_fact(AnalyticsFact::Request { + connection_id, + request_id, + request: Box::new(request), + }); + } + pub fn track_app_used(&self, tracking: TrackEventsContext, app: AppInvocation) { if !self.queue.should_enqueue_app_used(&tracking, &app) { return; @@ -176,6 +193,12 @@ impl AnalyticsEventsClient { ))); } + pub fn track_hook_run(&self, tracking: TrackEventsContext, hook: HookRunFact) { + self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::HookRun( + HookRunInput { tracking, hook }, + ))); + } + pub fn track_plugin_used(&self, tracking: TrackEventsContext, plugin: PluginTelemetryMetadata) { if !self.queue.should_enqueue_plugin_used(&tracking, &plugin) { return; @@ -191,6 +214,18 @@ impl AnalyticsEventsClient { ))); } + pub fn track_turn_resolved_config(&self, fact: TurnResolvedConfigFact) { + self.record_fact(AnalyticsFact::Custom( + CustomAnalyticsFact::TurnResolvedConfig(Box::new(fact)), + )); + } + + pub fn track_turn_token_usage(&self, fact: TurnTokenUsageFact) { + self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::TurnTokenUsage( + Box::new(fact), + ))); + } + pub fn track_plugin_installed(&self, plugin: PluginTelemetryMetadata) { self.record_fact(AnalyticsFact::Custom( CustomAnalyticsFact::PluginStateChanged(PluginStateChangedInput { @@ -240,6 +275,25 @@ impl AnalyticsEventsClient { response: Box::new(response), }); } + + pub fn track_error_response( + &self, + connection_id: u64, + request_id: RequestId, + error: JSONRPCErrorError, + error_type: Option, + ) { + self.record_fact(AnalyticsFact::ErrorResponse { + connection_id, + request_id, + error, + error_type, + }); + } + + pub fn track_notification(&self, notification: ServerNotification) { + self.record_fact(AnalyticsFact::Notification(Box::new(notification))); + } } async fn send_track_events( diff --git a/codex-rs/analytics/src/events.rs b/codex-rs/analytics/src/events.rs index 618dd8ffeb..d3aa5263da 100644 --- a/codex-rs/analytics/src/events.rs +++ b/codex-rs/analytics/src/events.rs @@ -1,15 +1,24 @@ use crate::facts::AppInvocation; use crate::facts::CodexCompactionEvent; +use crate::facts::HookRunFact; use crate::facts::InvocationType; use crate::facts::PluginState; use crate::facts::SubAgentThreadStartedInput; +use crate::facts::ThreadInitializationMode; use crate::facts::TrackEventsContext; +use crate::facts::TurnStatus; +use crate::facts::TurnSteerRejectionReason; +use crate::facts::TurnSteerResult; +use crate::facts::TurnSubmissionType; +use codex_app_server_protocol::CodexErrorInfo; use codex_login::default_client::originator; use codex_plugin::PluginTelemetryMetadata; use codex_protocol::approvals::NetworkApprovalProtocol; use codex_protocol::models::PermissionProfile; use codex_protocol::models::SandboxPermissions; -use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::HookEventName; +use codex_protocol::protocol::HookRunStatus; +use codex_protocol::protocol::HookSource; use codex_protocol::protocol::SubAgentSource; use serde::Serialize; @@ -21,14 +30,6 @@ pub enum AppServerRpcTransport { InProcess, } -#[derive(Clone, Copy, Debug, Serialize)] -#[serde(rename_all = "snake_case")] -pub(crate) enum ThreadInitializationMode { - New, - Forked, - Resumed, -} - #[derive(Serialize)] pub(crate) struct TrackEventsRequest { pub(crate) events: Vec, @@ -42,7 +43,10 @@ pub(crate) enum TrackEventRequest { GuardianReview(Box), AppMentioned(CodexAppMentionedEventRequest), AppUsed(CodexAppUsedEventRequest), + HookRun(CodexHookRunEventRequest), Compaction(Box), + TurnEvent(Box), + TurnSteer(CodexTurnSteerEventRequest), PluginUsed(CodexPluginUsedEventRequest), PluginInstalled(CodexPluginEventRequest), PluginUninstalled(CodexPluginEventRequest), @@ -301,6 +305,22 @@ pub(crate) struct CodexAppUsedEventRequest { pub(crate) event_params: CodexAppMetadata, } +#[derive(Serialize)] +pub(crate) struct CodexHookRunMetadata { + pub(crate) thread_id: Option, + pub(crate) turn_id: Option, + pub(crate) model_slug: Option, + pub(crate) hook_name: Option, + pub(crate) hook_source: Option<&'static str>, + pub(crate) status: Option, +} + +#[derive(Serialize)] +pub(crate) struct CodexHookRunEventRequest { + pub(crate) event_type: &'static str, + pub(crate) event_params: CodexHookRunMetadata, +} + #[derive(Serialize)] pub(crate) struct CodexCompactionEventParams { pub(crate) thread_id: String, @@ -330,6 +350,84 @@ pub(crate) struct CodexCompactionEventRequest { pub(crate) event_params: CodexCompactionEventParams, } +#[derive(Serialize)] +pub(crate) struct CodexTurnEventParams { + pub(crate) thread_id: String, + pub(crate) turn_id: String, + // TODO(rhan-oai): Populate once queued/default submission type is plumbed from + // the turn/start callsites instead of always being reported as None. + pub(crate) submission_type: Option, + pub(crate) app_server_client: CodexAppServerClientMetadata, + pub(crate) runtime: CodexRuntimeMetadata, + pub(crate) ephemeral: bool, + pub(crate) thread_source: Option, + pub(crate) initialization_mode: ThreadInitializationMode, + pub(crate) subagent_source: Option, + pub(crate) parent_thread_id: Option, + pub(crate) model: Option, + pub(crate) model_provider: String, + pub(crate) sandbox_policy: Option<&'static str>, + pub(crate) reasoning_effort: Option, + pub(crate) reasoning_summary: Option, + pub(crate) service_tier: String, + pub(crate) approval_policy: String, + pub(crate) approvals_reviewer: String, + pub(crate) sandbox_network_access: bool, + pub(crate) collaboration_mode: Option<&'static str>, + pub(crate) personality: Option, + pub(crate) num_input_images: usize, + pub(crate) is_first_turn: bool, + pub(crate) status: Option, + pub(crate) turn_error: Option, + pub(crate) steer_count: Option, + // TODO(rhan-oai): Populate these once tool-call accounting is emitted from + // core; the schema is reserved but these fields are currently always None. + pub(crate) total_tool_call_count: Option, + pub(crate) shell_command_count: Option, + pub(crate) file_change_count: Option, + pub(crate) mcp_tool_call_count: Option, + pub(crate) dynamic_tool_call_count: Option, + pub(crate) subagent_tool_call_count: Option, + pub(crate) web_search_count: Option, + pub(crate) image_generation_count: Option, + pub(crate) input_tokens: Option, + pub(crate) cached_input_tokens: Option, + pub(crate) output_tokens: Option, + pub(crate) reasoning_output_tokens: Option, + pub(crate) total_tokens: Option, + pub(crate) duration_ms: Option, + pub(crate) started_at: Option, + pub(crate) completed_at: Option, +} + +#[derive(Serialize)] +pub(crate) struct CodexTurnEventRequest { + pub(crate) event_type: &'static str, + pub(crate) event_params: CodexTurnEventParams, +} + +#[derive(Serialize)] +pub(crate) struct CodexTurnSteerEventParams { + pub(crate) thread_id: String, + pub(crate) expected_turn_id: Option, + pub(crate) accepted_turn_id: Option, + pub(crate) app_server_client: CodexAppServerClientMetadata, + pub(crate) runtime: CodexRuntimeMetadata, + pub(crate) thread_source: Option, + pub(crate) subagent_source: Option, + pub(crate) parent_thread_id: Option, + pub(crate) num_input_images: usize, + pub(crate) result: TurnSteerResult, + pub(crate) rejection_reason: Option, + pub(crate) created_at: u64, +} + +#[derive(Serialize)] +pub(crate) struct CodexTurnSteerEventRequest { + pub(crate) event_type: &'static str, + pub(crate) event_params: CodexTurnSteerEventParams, +} + #[derive(Serialize)] pub(crate) struct CodexPluginMetadata { pub(crate) plugin_id: Option, @@ -452,11 +550,40 @@ pub(crate) fn codex_plugin_used_metadata( } } -pub(crate) fn thread_source_name(thread_source: &SessionSource) -> Option<&'static str> { - match thread_source { - SessionSource::Cli | SessionSource::VSCode | SessionSource::Exec => Some("user"), - SessionSource::SubAgent(_) => Some("subagent"), - SessionSource::Mcp | SessionSource::Custom(_) | SessionSource::Unknown => None, +pub(crate) fn codex_hook_run_metadata( + tracking: &TrackEventsContext, + hook: HookRunFact, +) -> CodexHookRunMetadata { + CodexHookRunMetadata { + thread_id: Some(tracking.thread_id.clone()), + turn_id: Some(tracking.turn_id.clone()), + model_slug: Some(tracking.model_slug.clone()), + hook_name: Some(analytics_hook_event_name(hook.event_name).to_owned()), + hook_source: Some(analytics_hook_source(hook.hook_source)), + status: Some(analytics_hook_status(hook.status)), + } +} + +fn analytics_hook_event_name(event_name: HookEventName) -> &'static str { + match event_name { + HookEventName::PreToolUse => "PreToolUse", + HookEventName::PostToolUse => "PostToolUse", + HookEventName::SessionStart => "SessionStart", + HookEventName::UserPromptSubmit => "UserPromptSubmit", + HookEventName::Stop => "Stop", + } +} + +fn analytics_hook_source(source: HookSource) -> &'static str { + match source { + HookSource::System => "system", + HookSource::User => "user", + HookSource::Project => "project", + HookSource::Mdm => "mdm", + HookSource::SessionFlags => "session_flags", + HookSource::LegacyManagedConfigFile => "legacy_managed_config_file", + HookSource::LegacyManagedConfigMdm => "legacy_managed_config_mdm", + HookSource::Unknown => "unknown", } } @@ -517,3 +644,11 @@ pub(crate) fn subagent_parent_thread_id(subagent_source: &SubAgentSource) -> Opt _ => None, } } + +fn analytics_hook_status(status: HookRunStatus) -> HookRunStatus { + match status { + // Running is unexpected here and normalized defensively. + HookRunStatus::Running => HookRunStatus::Failed, + other => other, + } +} diff --git a/codex-rs/analytics/src/facts.rs b/codex-rs/analytics/src/facts.rs index 931ae01013..1d371acb1c 100644 --- a/codex-rs/analytics/src/facts.rs +++ b/codex-rs/analytics/src/facts.rs @@ -4,11 +4,25 @@ use crate::events::GuardianReviewEventParams; use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::ClientResponse; use codex_app_server_protocol::InitializeParams; +use codex_app_server_protocol::JSONRPCErrorError; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ServerNotification; use codex_plugin::PluginTelemetryMetadata; +use codex_protocol::config_types::ApprovalsReviewer; +use codex_protocol::config_types::ModeKind; +use codex_protocol::config_types::Personality; +use codex_protocol::config_types::ReasoningSummary; +use codex_protocol::config_types::ServiceTier; +use codex_protocol::openai_models::ReasoningEffort; +use codex_protocol::protocol::AskForApproval; +use codex_protocol::protocol::HookEventName; +use codex_protocol::protocol::HookRunStatus; +use codex_protocol::protocol::HookSource; +use codex_protocol::protocol::SandboxPolicy; +use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SkillScope; use codex_protocol::protocol::SubAgentSource; +use codex_protocol::protocol::TokenUsage; use serde::Serialize; use std::path::PathBuf; @@ -31,6 +45,126 @@ pub fn build_track_events_context( } } +#[derive(Clone, Copy, Debug, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum TurnSubmissionType { + Default, + Queued, +} + +#[derive(Clone)] +pub struct TurnResolvedConfigFact { + pub turn_id: String, + pub thread_id: String, + pub num_input_images: usize, + pub submission_type: Option, + pub ephemeral: bool, + pub session_source: SessionSource, + pub model: String, + pub model_provider: String, + pub sandbox_policy: SandboxPolicy, + pub reasoning_effort: Option, + pub reasoning_summary: Option, + pub service_tier: Option, + pub approval_policy: AskForApproval, + pub approvals_reviewer: ApprovalsReviewer, + pub sandbox_network_access: bool, + pub collaboration_mode: ModeKind, + pub personality: Option, + pub is_first_turn: bool, +} + +#[derive(Clone, Copy, Debug, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum ThreadInitializationMode { + New, + Forked, + Resumed, +} + +#[derive(Clone)] +pub struct TurnTokenUsageFact { + pub turn_id: String, + pub thread_id: String, + pub token_usage: TokenUsage, +} + +#[derive(Clone, Copy, Debug, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum TurnStatus { + Completed, + Failed, + Interrupted, +} + +#[derive(Clone, Copy, Debug, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum TurnSteerResult { + Accepted, + Rejected, +} + +#[derive(Clone, Copy, Debug, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum TurnSteerRejectionReason { + NoActiveTurn, + ExpectedTurnMismatch, + NonSteerableReview, + NonSteerableCompact, + EmptyInput, + InputTooLarge, +} + +#[derive(Clone)] +pub struct CodexTurnSteerEvent { + pub expected_turn_id: Option, + pub accepted_turn_id: Option, + pub num_input_images: usize, + pub result: TurnSteerResult, + pub rejection_reason: Option, + pub created_at: u64, +} + +#[derive(Clone, Copy, Debug)] +pub enum AnalyticsJsonRpcError { + TurnSteer(TurnSteerRequestError), + Input(InputError), +} + +#[derive(Clone, Copy, Debug)] +pub enum TurnSteerRequestError { + NoActiveTurn, + ExpectedTurnMismatch, + NonSteerableReview, + NonSteerableCompact, +} + +#[derive(Clone, Copy, Debug)] +pub enum InputError { + Empty, + TooLarge, +} + +impl From for TurnSteerRejectionReason { + fn from(error: TurnSteerRequestError) -> Self { + match error { + TurnSteerRequestError::NoActiveTurn => Self::NoActiveTurn, + TurnSteerRequestError::ExpectedTurnMismatch => Self::ExpectedTurnMismatch, + TurnSteerRequestError::NonSteerableReview => Self::NonSteerableReview, + TurnSteerRequestError::NonSteerableCompact => Self::NonSteerableCompact, + } + } +} + +impl From for TurnSteerRejectionReason { + fn from(error: InputError) -> Self { + match error { + InputError::Empty => Self::EmptyInput, + InputError::TooLarge => Self::InputTooLarge, + } + } +} + #[derive(Clone, Debug)] pub struct SkillInvocation { pub skill_name: String, @@ -146,6 +280,12 @@ pub(crate) enum AnalyticsFact { connection_id: u64, response: Box, }, + ErrorResponse { + connection_id: u64, + request_id: RequestId, + error: JSONRPCErrorError, + error_type: Option, + }, Notification(Box), // Facts that do not naturally exist on the app-server protocol surface, or // would require non-trivial protocol reshaping on this branch. @@ -156,9 +296,12 @@ pub(crate) enum CustomAnalyticsFact { SubAgentThreadStarted(SubAgentThreadStartedInput), Compaction(Box), GuardianReview(Box), + TurnResolvedConfig(Box), + TurnTokenUsage(Box), SkillInvoked(SkillInvokedInput), AppMentioned(AppMentionedInput), AppUsed(AppUsedInput), + HookRun(HookRunInput), PluginUsed(PluginUsedInput), PluginStateChanged(PluginStateChangedInput), } @@ -178,6 +321,17 @@ pub(crate) struct AppUsedInput { pub app: AppInvocation, } +pub(crate) struct HookRunInput { + pub tracking: TrackEventsContext, + pub hook: HookRunFact, +} + +pub struct HookRunFact { + pub event_name: HookEventName, + pub hook_source: HookSource, + pub status: HookRunStatus, +} + pub(crate) struct PluginUsedInput { pub tracking: TrackEventsContext, pub plugin: PluginTelemetryMetadata, diff --git a/codex-rs/analytics/src/lib.rs b/codex-rs/analytics/src/lib.rs index 9b4cc1e9bc..03f485a1c1 100644 --- a/codex-rs/analytics/src/lib.rs +++ b/codex-rs/analytics/src/lib.rs @@ -3,6 +3,9 @@ mod events; mod facts; mod reducer; +use std::time::SystemTime; +use std::time::UNIX_EPOCH; + pub use client::AnalyticsEventsClient; pub use events::AppServerRpcTransport; pub use events::GuardianApprovalRequestSource; @@ -16,19 +19,37 @@ pub use events::GuardianReviewSessionKind; pub use events::GuardianReviewTerminalStatus; pub use events::GuardianReviewUserAuthorization; pub use events::GuardianReviewedAction; +pub use facts::AnalyticsJsonRpcError; pub use facts::AppInvocation; pub use facts::CodexCompactionEvent; +pub use facts::CodexTurnSteerEvent; pub use facts::CompactionImplementation; pub use facts::CompactionPhase; pub use facts::CompactionReason; pub use facts::CompactionStatus; pub use facts::CompactionStrategy; pub use facts::CompactionTrigger; +pub use facts::HookRunFact; +pub use facts::InputError; pub use facts::InvocationType; pub use facts::SkillInvocation; pub use facts::SubAgentThreadStartedInput; +pub use facts::ThreadInitializationMode; pub use facts::TrackEventsContext; +pub use facts::TurnResolvedConfigFact; +pub use facts::TurnStatus; +pub use facts::TurnSteerRejectionReason; +pub use facts::TurnSteerRequestError; +pub use facts::TurnSteerResult; +pub use facts::TurnTokenUsageFact; pub use facts::build_track_events_context; #[cfg(test)] mod analytics_client_tests; + +pub fn now_unix_seconds() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs() +} diff --git a/codex-rs/analytics/src/reducer.rs b/codex-rs/analytics/src/reducer.rs index 0ed2899762..a6ce3fc831 100644 --- a/codex-rs/analytics/src/reducer.rs +++ b/codex-rs/analytics/src/reducer.rs @@ -3,50 +3,76 @@ use crate::events::CodexAppMentionedEventRequest; use crate::events::CodexAppServerClientMetadata; use crate::events::CodexAppUsedEventRequest; use crate::events::CodexCompactionEventRequest; +use crate::events::CodexHookRunEventRequest; use crate::events::CodexPluginEventRequest; use crate::events::CodexPluginUsedEventRequest; use crate::events::CodexRuntimeMetadata; +use crate::events::CodexTurnEventParams; +use crate::events::CodexTurnEventRequest; +use crate::events::CodexTurnSteerEventParams; +use crate::events::CodexTurnSteerEventRequest; use crate::events::GuardianReviewEventParams; use crate::events::GuardianReviewEventPayload; use crate::events::GuardianReviewEventRequest; use crate::events::SkillInvocationEventParams; use crate::events::SkillInvocationEventRequest; -use crate::events::ThreadInitializationMode; use crate::events::ThreadInitializedEvent; use crate::events::ThreadInitializedEventParams; use crate::events::TrackEventRequest; use crate::events::codex_app_metadata; use crate::events::codex_compaction_event_params; +use crate::events::codex_hook_run_metadata; use crate::events::codex_plugin_metadata; use crate::events::codex_plugin_used_metadata; use crate::events::plugin_state_event_type; use crate::events::subagent_parent_thread_id; use crate::events::subagent_source_name; use crate::events::subagent_thread_started_event_request; -use crate::events::thread_source_name; use crate::facts::AnalyticsFact; +use crate::facts::AnalyticsJsonRpcError; use crate::facts::AppMentionedInput; use crate::facts::AppUsedInput; use crate::facts::CodexCompactionEvent; use crate::facts::CustomAnalyticsFact; +use crate::facts::HookRunInput; use crate::facts::PluginState; use crate::facts::PluginStateChangedInput; use crate::facts::PluginUsedInput; use crate::facts::SkillInvokedInput; use crate::facts::SubAgentThreadStartedInput; +use crate::facts::ThreadInitializationMode; +use crate::facts::TurnResolvedConfigFact; +use crate::facts::TurnStatus; +use crate::facts::TurnSteerRejectionReason; +use crate::facts::TurnSteerResult; +use crate::facts::TurnTokenUsageFact; +use crate::now_unix_seconds; +use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::ClientResponse; +use codex_app_server_protocol::CodexErrorInfo; use codex_app_server_protocol::InitializeParams; +use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ServerNotification; +use codex_app_server_protocol::TurnSteerResponse; +use codex_app_server_protocol::UserInput; use codex_git_utils::collect_git_info; use codex_git_utils::get_git_repo_root; use codex_login::default_client::originator; +use codex_protocol::config_types::ModeKind; +use codex_protocol::config_types::Personality; +use codex_protocol::config_types::ReasoningSummary; +use codex_protocol::protocol::SandboxPolicy; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SkillScope; +use codex_protocol::protocol::TokenUsage; use sha1::Digest; use std::collections::HashMap; use std::path::Path; #[derive(Default)] pub(crate) struct AnalyticsReducer { + requests: HashMap<(u64, RequestId), RequestState>, + turns: HashMap, connections: HashMap, thread_connections: HashMap, thread_metadata: HashMap, @@ -60,12 +86,16 @@ struct ConnectionState { #[derive(Clone)] struct ThreadMetadataState { thread_source: Option<&'static str>, + initialization_mode: ThreadInitializationMode, subagent_source: Option, parent_thread_id: Option, } impl ThreadMetadataState { - fn from_session_source(session_source: &SessionSource) -> Self { + fn from_thread_metadata( + session_source: &SessionSource, + initialization_mode: ThreadInitializationMode, + ) -> Self { let (subagent_source, parent_thread_id) = match session_source { SessionSource::SubAgent(subagent_source) => ( Some(subagent_source_name(subagent_source)), @@ -79,13 +109,50 @@ impl ThreadMetadataState { | SessionSource::Unknown => (None, None), }; Self { - thread_source: thread_source_name(session_source), + thread_source: session_source.thread_source_name(), + initialization_mode, subagent_source, parent_thread_id, } } } +enum RequestState { + TurnStart(PendingTurnStartState), + TurnSteer(PendingTurnSteerState), +} + +struct PendingTurnStartState { + thread_id: String, + num_input_images: usize, +} + +struct PendingTurnSteerState { + thread_id: String, + expected_turn_id: String, + num_input_images: usize, + created_at: u64, +} + +#[derive(Clone)] +struct CompletedTurnState { + status: Option, + turn_error: Option, + completed_at: u64, + duration_ms: Option, +} + +struct TurnState { + connection_id: Option, + thread_id: Option, + num_input_images: Option, + resolved_config: Option, + started_at: Option, + token_usage: Option, + completed: Option, + steer_count: usize, +} + impl AnalyticsReducer { pub(crate) async fn ingest(&mut self, input: AnalyticsFact, out: &mut Vec) { match input { @@ -105,17 +172,29 @@ impl AnalyticsReducer { ); } AnalyticsFact::Request { - connection_id: _connection_id, - request_id: _request_id, - request: _request, - } => {} + connection_id, + request_id, + request, + } => { + self.ingest_request(connection_id, request_id, *request); + } AnalyticsFact::Response { connection_id, response, } => { self.ingest_response(connection_id, *response, out); } - AnalyticsFact::Notification(_notification) => {} + AnalyticsFact::ErrorResponse { + connection_id, + request_id, + error: _, + error_type, + } => { + self.ingest_error_response(connection_id, request_id, error_type, out); + } + AnalyticsFact::Notification(notification) => { + self.ingest_notification(*notification, out); + } AnalyticsFact::Custom(input) => match input { CustomAnalyticsFact::SubAgentThreadStarted(input) => { self.ingest_subagent_thread_started(input, out); @@ -126,6 +205,12 @@ impl AnalyticsReducer { CustomAnalyticsFact::GuardianReview(input) => { self.ingest_guardian_review(*input, out); } + CustomAnalyticsFact::TurnResolvedConfig(input) => { + self.ingest_turn_resolved_config(*input, out); + } + CustomAnalyticsFact::TurnTokenUsage(input) => { + self.ingest_turn_token_usage(*input, out); + } CustomAnalyticsFact::SkillInvoked(input) => { self.ingest_skill_invoked(input, out).await; } @@ -135,6 +220,9 @@ impl AnalyticsReducer { CustomAnalyticsFact::AppUsed(input) => { self.ingest_app_used(input, out); } + CustomAnalyticsFact::HookRun(input) => { + self.ingest_hook_run(input, out); + } CustomAnalyticsFact::PluginUsed(input) => { self.ingest_plugin_used(input, out); } @@ -216,6 +304,82 @@ impl AnalyticsReducer { ))); } + fn ingest_request( + &mut self, + connection_id: u64, + request_id: RequestId, + request: ClientRequest, + ) { + match request { + ClientRequest::TurnStart { params, .. } => { + self.requests.insert( + (connection_id, request_id), + RequestState::TurnStart(PendingTurnStartState { + thread_id: params.thread_id, + num_input_images: num_input_images(¶ms.input), + }), + ); + } + ClientRequest::TurnSteer { params, .. } => { + self.requests.insert( + (connection_id, request_id), + RequestState::TurnSteer(PendingTurnSteerState { + thread_id: params.thread_id, + expected_turn_id: params.expected_turn_id, + num_input_images: num_input_images(¶ms.input), + created_at: now_unix_seconds(), + }), + ); + } + _ => {} + } + } + + fn ingest_turn_resolved_config( + &mut self, + input: TurnResolvedConfigFact, + out: &mut Vec, + ) { + let turn_id = input.turn_id.clone(); + let thread_id = input.thread_id.clone(); + let num_input_images = input.num_input_images; + let turn_state = self.turns.entry(turn_id.clone()).or_insert(TurnState { + connection_id: None, + thread_id: None, + num_input_images: None, + resolved_config: None, + started_at: None, + token_usage: None, + completed: None, + steer_count: 0, + }); + turn_state.thread_id = Some(thread_id); + turn_state.num_input_images = Some(num_input_images); + turn_state.resolved_config = Some(input); + self.maybe_emit_turn_event(&turn_id, out); + } + + fn ingest_turn_token_usage( + &mut self, + input: TurnTokenUsageFact, + out: &mut Vec, + ) { + let turn_id = input.turn_id.clone(); + let turn_state = self.turns.entry(turn_id.clone()).or_insert(TurnState { + connection_id: None, + thread_id: None, + num_input_images: None, + resolved_config: None, + started_at: None, + token_usage: None, + completed: None, + steer_count: 0, + }); + turn_state.thread_id = Some(input.thread_id); + turn_state.token_usage = Some(input.token_usage); + self.maybe_emit_turn_event(&turn_id, out); + } + async fn ingest_skill_invoked( &mut self, input: SkillInvokedInput, @@ -284,6 +448,14 @@ impl AnalyticsReducer { })); } + fn ingest_hook_run(&mut self, input: HookRunInput, out: &mut Vec) { + let HookRunInput { tracking, hook } = input; + out.push(TrackEventRequest::HookRun(CodexHookRunEventRequest { + event_type: "codex_hook_run", + event_params: codex_hook_run_metadata(&tracking, hook), + })); + } + fn ingest_plugin_used(&mut self, input: PluginUsedInput, out: &mut Vec) { let PluginUsedInput { tracking, plugin } = input; out.push(TrackEventRequest::PluginUsed(CodexPluginUsedEventRequest { @@ -316,30 +488,193 @@ impl AnalyticsReducer { response: ClientResponse, out: &mut Vec, ) { - let (thread, model, initialization_mode) = match response { - ClientResponse::ThreadStart { response, .. } => ( - response.thread, - response.model, - ThreadInitializationMode::New, - ), - ClientResponse::ThreadResume { response, .. } => ( - response.thread, - response.model, - ThreadInitializationMode::Resumed, - ), - ClientResponse::ThreadFork { response, .. } => ( - response.thread, - response.model, - ThreadInitializationMode::Forked, - ), - _ => return, + match response { + ClientResponse::ThreadStart { response, .. } => { + self.emit_thread_initialized( + connection_id, + response.thread, + response.model, + ThreadInitializationMode::New, + out, + ); + } + ClientResponse::ThreadResume { response, .. } => { + self.emit_thread_initialized( + connection_id, + response.thread, + response.model, + ThreadInitializationMode::Resumed, + out, + ); + } + ClientResponse::ThreadFork { response, .. } => { + self.emit_thread_initialized( + connection_id, + response.thread, + response.model, + ThreadInitializationMode::Forked, + out, + ); + } + ClientResponse::TurnStart { + request_id, + response, + } => { + let turn_id = response.turn.id; + let Some(RequestState::TurnStart(pending_request)) = + self.requests.remove(&(connection_id, request_id)) + else { + return; + }; + let turn_state = self.turns.entry(turn_id.clone()).or_insert(TurnState { + connection_id: None, + thread_id: None, + num_input_images: None, + resolved_config: None, + started_at: None, + token_usage: None, + completed: None, + steer_count: 0, + }); + turn_state.connection_id = Some(connection_id); + turn_state.thread_id = Some(pending_request.thread_id); + turn_state.num_input_images = Some(pending_request.num_input_images); + self.maybe_emit_turn_event(&turn_id, out); + } + ClientResponse::TurnSteer { + request_id, + response, + } => { + self.ingest_turn_steer_response(connection_id, request_id, response, out); + } + _ => {} + } + } + + fn ingest_error_response( + &mut self, + connection_id: u64, + request_id: RequestId, + error_type: Option, + out: &mut Vec, + ) { + let Some(request) = self.requests.remove(&(connection_id, request_id)) else { + return; }; + self.ingest_request_error_response(connection_id, request, error_type, out); + } + + fn ingest_request_error_response( + &mut self, + connection_id: u64, + request: RequestState, + error_type: Option, + out: &mut Vec, + ) { + match request { + RequestState::TurnStart(_) => {} + RequestState::TurnSteer(pending_request) => { + self.ingest_turn_steer_error_response( + connection_id, + pending_request, + error_type, + out, + ); + } + } + } + + fn ingest_turn_steer_error_response( + &mut self, + connection_id: u64, + pending_request: PendingTurnSteerState, + error_type: Option, + out: &mut Vec, + ) { + self.emit_turn_steer_event( + connection_id, + pending_request, + /*accepted_turn_id*/ None, + TurnSteerResult::Rejected, + rejection_reason_from_error_type(error_type), + out, + ); + } + + fn ingest_notification( + &mut self, + notification: ServerNotification, + out: &mut Vec, + ) { + match notification { + ServerNotification::TurnStarted(notification) => { + let turn_state = self.turns.entry(notification.turn.id).or_insert(TurnState { + connection_id: None, + thread_id: None, + num_input_images: None, + resolved_config: None, + started_at: None, + token_usage: None, + completed: None, + steer_count: 0, + }); + turn_state.started_at = notification + .turn + .started_at + .and_then(|started_at| u64::try_from(started_at).ok()); + } + ServerNotification::TurnCompleted(notification) => { + let turn_state = + self.turns + .entry(notification.turn.id.clone()) + .or_insert(TurnState { + connection_id: None, + thread_id: None, + num_input_images: None, + resolved_config: None, + started_at: None, + token_usage: None, + completed: None, + steer_count: 0, + }); + turn_state.completed = Some(CompletedTurnState { + status: analytics_turn_status(notification.turn.status), + turn_error: notification + .turn + .error + .and_then(|error| error.codex_error_info), + completed_at: notification + .turn + .completed_at + .and_then(|completed_at| u64::try_from(completed_at).ok()) + .unwrap_or_default(), + duration_ms: notification + .turn + .duration_ms + .and_then(|duration_ms| u64::try_from(duration_ms).ok()), + }); + let turn_id = notification.turn.id; + self.maybe_emit_turn_event(&turn_id, out); + } + _ => {} + } + } + + fn emit_thread_initialized( + &mut self, + connection_id: u64, + thread: codex_app_server_protocol::Thread, + model: String, + initialization_mode: ThreadInitializationMode, + out: &mut Vec, + ) { let thread_source: SessionSource = thread.source.into(); let thread_id = thread.id; let Some(connection_state) = self.connections.get(&connection_id) else { return; }; - let thread_metadata = ThreadMetadataState::from_session_source(&thread_source); + let thread_metadata = + ThreadMetadataState::from_thread_metadata(&thread_source, initialization_mode); self.thread_connections .insert(thread_id.clone(), connection_id); self.thread_metadata @@ -403,6 +738,275 @@ impl AnalyticsReducer { }, ))); } + + fn ingest_turn_steer_response( + &mut self, + connection_id: u64, + request_id: RequestId, + response: TurnSteerResponse, + out: &mut Vec, + ) { + let Some(RequestState::TurnSteer(pending_request)) = + self.requests.remove(&(connection_id, request_id)) + else { + return; + }; + if let Some(turn_state) = self.turns.get_mut(&response.turn_id) { + turn_state.steer_count += 1; + } + self.emit_turn_steer_event( + connection_id, + pending_request, + Some(response.turn_id), + TurnSteerResult::Accepted, + /*rejection_reason*/ None, + out, + ); + } + + fn emit_turn_steer_event( + &mut self, + connection_id: u64, + pending_request: PendingTurnSteerState, + accepted_turn_id: Option, + result: TurnSteerResult, + rejection_reason: Option, + out: &mut Vec, + ) { + let Some(connection_state) = self.connections.get(&connection_id) else { + return; + }; + let Some(thread_metadata) = self.thread_metadata.get(&pending_request.thread_id) else { + tracing::warn!( + thread_id = %pending_request.thread_id, + "dropping turn steer analytics event: missing thread lifecycle metadata" + ); + return; + }; + out.push(TrackEventRequest::TurnSteer(CodexTurnSteerEventRequest { + event_type: "codex_turn_steer_event", + event_params: CodexTurnSteerEventParams { + thread_id: pending_request.thread_id, + expected_turn_id: Some(pending_request.expected_turn_id), + accepted_turn_id, + app_server_client: connection_state.app_server_client.clone(), + runtime: connection_state.runtime.clone(), + thread_source: thread_metadata.thread_source.map(str::to_string), + subagent_source: thread_metadata.subagent_source.clone(), + parent_thread_id: thread_metadata.parent_thread_id.clone(), + num_input_images: pending_request.num_input_images, + result, + rejection_reason, + created_at: pending_request.created_at, + }, + })); + } + + fn maybe_emit_turn_event(&mut self, turn_id: &str, out: &mut Vec) { + let Some(turn_state) = self.turns.get(turn_id) else { + return; + }; + if turn_state.thread_id.is_none() + || turn_state.num_input_images.is_none() + || turn_state.resolved_config.is_none() + || turn_state.completed.is_none() + { + return; + } + let connection_metadata = turn_state + .connection_id + .and_then(|connection_id| self.connections.get(&connection_id)) + .map(|connection_state| { + ( + connection_state.app_server_client.clone(), + connection_state.runtime.clone(), + ) + }); + let Some((app_server_client, runtime)) = connection_metadata else { + if let Some(connection_id) = turn_state.connection_id { + tracing::warn!( + turn_id, + connection_id, + "dropping turn analytics event: missing connection metadata" + ); + } + return; + }; + let Some(thread_id) = turn_state.thread_id.as_ref() else { + return; + }; + let Some(thread_metadata) = self.thread_metadata.get(thread_id) else { + tracing::warn!( + thread_id, + turn_id, + "dropping turn analytics event: missing thread lifecycle metadata" + ); + return; + }; + out.push(TrackEventRequest::TurnEvent(Box::new( + CodexTurnEventRequest { + event_type: "codex_turn_event", + event_params: codex_turn_event_params( + app_server_client, + runtime, + turn_id.to_string(), + turn_state, + thread_metadata, + ), + }, + ))); + self.turns.remove(turn_id); + } +} + +fn codex_turn_event_params( + app_server_client: CodexAppServerClientMetadata, + runtime: CodexRuntimeMetadata, + turn_id: String, + turn_state: &TurnState, + thread_metadata: &ThreadMetadataState, +) -> CodexTurnEventParams { + let (Some(thread_id), Some(num_input_images), Some(resolved_config), Some(completed)) = ( + turn_state.thread_id.clone(), + turn_state.num_input_images, + turn_state.resolved_config.clone(), + turn_state.completed.clone(), + ) else { + unreachable!("turn event params require a fully populated turn state"); + }; + let started_at = turn_state.started_at; + let TurnResolvedConfigFact { + turn_id: _resolved_turn_id, + thread_id: _resolved_thread_id, + num_input_images: _resolved_num_input_images, + submission_type, + ephemeral, + session_source: _session_source, + model, + model_provider, + sandbox_policy, + reasoning_effort, + reasoning_summary, + service_tier, + approval_policy, + approvals_reviewer, + sandbox_network_access, + collaboration_mode, + personality, + is_first_turn, + } = resolved_config; + let token_usage = turn_state.token_usage.clone(); + CodexTurnEventParams { + thread_id, + turn_id, + app_server_client, + runtime, + submission_type, + ephemeral, + thread_source: thread_metadata.thread_source.map(str::to_string), + initialization_mode: thread_metadata.initialization_mode, + subagent_source: thread_metadata.subagent_source.clone(), + parent_thread_id: thread_metadata.parent_thread_id.clone(), + model: Some(model), + model_provider, + sandbox_policy: Some(sandbox_policy_mode(&sandbox_policy)), + reasoning_effort: reasoning_effort.map(|value| value.to_string()), + reasoning_summary: reasoning_summary_mode(reasoning_summary), + service_tier: service_tier + .map(|value| value.to_string()) + .unwrap_or_else(|| "default".to_string()), + approval_policy: approval_policy.to_string(), + approvals_reviewer: approvals_reviewer.to_string(), + sandbox_network_access, + collaboration_mode: Some(collaboration_mode_mode(collaboration_mode)), + personality: personality_mode(personality), + num_input_images, + is_first_turn, + status: completed.status, + turn_error: completed.turn_error, + steer_count: Some(turn_state.steer_count), + total_tool_call_count: None, + shell_command_count: None, + file_change_count: None, + mcp_tool_call_count: None, + dynamic_tool_call_count: None, + subagent_tool_call_count: None, + web_search_count: None, + image_generation_count: None, + input_tokens: token_usage + .as_ref() + .map(|token_usage| token_usage.input_tokens), + cached_input_tokens: token_usage + .as_ref() + .map(|token_usage| token_usage.cached_input_tokens), + output_tokens: token_usage + .as_ref() + .map(|token_usage| token_usage.output_tokens), + reasoning_output_tokens: token_usage + .as_ref() + .map(|token_usage| token_usage.reasoning_output_tokens), + total_tokens: token_usage + .as_ref() + .map(|token_usage| token_usage.total_tokens), + duration_ms: completed.duration_ms, + started_at, + completed_at: Some(completed.completed_at), + } +} + +fn sandbox_policy_mode(sandbox_policy: &SandboxPolicy) -> &'static str { + match sandbox_policy { + SandboxPolicy::DangerFullAccess => "full_access", + SandboxPolicy::ReadOnly { .. } => "read_only", + SandboxPolicy::WorkspaceWrite { .. } => "workspace_write", + SandboxPolicy::ExternalSandbox { .. } => "external_sandbox", + } +} + +fn collaboration_mode_mode(mode: ModeKind) -> &'static str { + match mode { + ModeKind::Plan => "plan", + ModeKind::Default | ModeKind::PairProgramming | ModeKind::Execute => "default", + } +} + +fn reasoning_summary_mode(summary: Option) -> Option { + match summary { + Some(ReasoningSummary::None) | None => None, + Some(summary) => Some(summary.to_string()), + } +} + +fn personality_mode(personality: Option) -> Option { + match personality { + Some(Personality::None) | None => None, + Some(personality) => Some(personality.to_string()), + } +} + +fn analytics_turn_status(status: codex_app_server_protocol::TurnStatus) -> Option { + match status { + codex_app_server_protocol::TurnStatus::Completed => Some(TurnStatus::Completed), + codex_app_server_protocol::TurnStatus::Failed => Some(TurnStatus::Failed), + codex_app_server_protocol::TurnStatus::Interrupted => Some(TurnStatus::Interrupted), + codex_app_server_protocol::TurnStatus::InProgress => None, + } +} + +fn num_input_images(input: &[UserInput]) -> usize { + input + .iter() + .filter(|item| matches!(item, UserInput::Image { .. } | UserInput::LocalImage { .. })) + .count() +} + +fn rejection_reason_from_error_type( + error_type: Option, +) -> Option { + match error_type? { + AnalyticsJsonRpcError::TurnSteer(error) => Some(error.into()), + AnalyticsJsonRpcError::Input(error) => Some(error.into()), + } } pub(crate) fn skill_id_for_local_skill( diff --git a/codex-rs/app-server-client/src/lib.rs b/codex-rs/app-server-client/src/lib.rs index 4eadb0924d..71a8784e0a 100644 --- a/codex-rs/app-server-client/src/lib.rs +++ b/codex-rs/app-server-client/src/lib.rs @@ -28,6 +28,7 @@ use std::time::Duration; pub use codex_app_server::in_process::DEFAULT_IN_PROCESS_CHANNEL_CAPACITY; pub use codex_app_server::in_process::InProcessServerEvent; use codex_app_server::in_process::InProcessStartArgs; +use codex_app_server::in_process::LogDbLayer; use codex_app_server_protocol::ClientInfo; use codex_app_server_protocol::ClientNotification; use codex_app_server_protocol::ClientRequest; @@ -44,6 +45,7 @@ use codex_core::config::Config; use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::LoaderOverrides; pub use codex_exec_server::EnvironmentManager; +pub use codex_exec_server::ExecServerRuntimePaths; use codex_feedback::CodexFeedback; use codex_protocol::protocol::SessionSource; use serde::de::DeserializeOwned; @@ -63,9 +65,9 @@ pub use crate::remote::RemoteAppServerConnectArgs; /// while legacy startup/config paths are migrated to RPCs. pub mod legacy_core { pub use codex_core::Cursor; - pub use codex_core::DEFAULT_PROJECT_DOC_FILENAME; + pub use codex_core::DEFAULT_AGENTS_MD_FILENAME; pub use codex_core::INTERACTIVE_SESSION_SOURCES; - pub use codex_core::LOCAL_PROJECT_DOC_FILENAME; + pub use codex_core::LOCAL_AGENTS_MD_FILENAME; pub use codex_core::McpManager; pub use codex_core::PLUGIN_TEXT_MENTION_SIGIL; pub use codex_core::RolloutRecorder; @@ -75,7 +77,6 @@ pub mod legacy_core { pub use codex_core::ThreadsPage; pub use codex_core::append_message_history_entry; pub use codex_core::check_execpolicy_for_warnings; - pub use codex_core::discover_project_doc_paths; pub use codex_core::find_thread_meta_by_name_str; pub use codex_core::find_thread_name_by_id; pub use codex_core::find_thread_names_by_ids; @@ -353,6 +354,8 @@ pub struct InProcessClientStartArgs { pub cloud_requirements: CloudRequirementsLoader, /// Feedback sink used by app-server/core telemetry and logs. pub feedback: CodexFeedback, + /// SQLite tracing layer used to flush recently emitted logs before feedback upload. + pub log_db: Option, /// Environment manager used by core execution and filesystem operations. pub environment_manager: Arc, /// Startup warnings emitted after initialize succeeds. @@ -404,6 +407,7 @@ impl InProcessClientStartArgs { loader_overrides: self.loader_overrides, cloud_requirements: self.cloud_requirements, feedback: self.feedback, + log_db: self.log_db, environment_manager: self.environment_manager, config_warnings: self.config_warnings, session_source: self.session_source, @@ -966,6 +970,7 @@ mod tests { match ConfigBuilder::default().build().await { Ok(config) => config, Err(_) => Config::load_default_with_cli_overrides(Vec::new()) + .await .expect("default config should load"), } } @@ -981,6 +986,7 @@ mod tests { loader_overrides: LoaderOverrides::default(), cloud_requirements: CloudRequirementsLoader::default(), feedback: CodexFeedback::new(), + log_db: None, environment_manager: Arc::new(EnvironmentManager::new(/*exec_server_url*/ None)), config_warnings: Vec::new(), session_source, @@ -1993,6 +1999,7 @@ mod tests { loader_overrides: LoaderOverrides::default(), cloud_requirements: CloudRequirementsLoader::default(), feedback: CodexFeedback::new(), + log_db: None, environment_manager: environment_manager.clone(), config_warnings: Vec::new(), session_source: SessionSource::Exec, diff --git a/codex-rs/app-server-protocol/Cargo.toml b/codex-rs/app-server-protocol/Cargo.toml index d9ed5e8730..0cb50d8549 100644 --- a/codex-rs/app-server-protocol/Cargo.toml +++ b/codex-rs/app-server-protocol/Cargo.toml @@ -15,7 +15,6 @@ workspace = true anyhow = { workspace = true } clap = { workspace = true, features = ["derive"] } codex-experimental-api-macros = { workspace = true } -codex-git-utils = { workspace = true } codex-protocol = { workspace = true } codex-shell-command = { workspace = true } codex-utils-absolute-path = { workspace = true } diff --git a/codex-rs/app-server-protocol/schema/json/ClientRequest.json b/codex-rs/app-server-protocol/schema/json/ClientRequest.json index 76f7abdfd2..113d82c94c 100644 --- a/codex-rs/app-server-protocol/schema/json/ClientRequest.json +++ b/codex-rs/app-server-protocol/schema/json/ClientRequest.json @@ -605,6 +605,16 @@ "description": { "type": "string" }, + "details": { + "anyOf": [ + { + "$ref": "#/definitions/MigrationDetails" + }, + { + "type": "null" + } + ] + }, "itemType": { "$ref": "#/definitions/ExternalAgentConfigMigrationItemType" } @@ -620,6 +630,7 @@ "AGENTS_MD", "CONFIG", "SKILLS", + "PLUGINS", "MCP_SERVER_CONFIG" ], "type": "string" @@ -1233,6 +1244,32 @@ } ] }, + "MarketplaceAddParams": { + "properties": { + "refName": { + "type": [ + "string", + "null" + ] + }, + "source": { + "type": "string" + }, + "sparsePaths": { + "items": { + "type": "string" + }, + "type": [ + "array", + "null" + ] + } + }, + "required": [ + "source" + ], + "type": "object" + }, "McpResourceReadParams": { "properties": { "server": { @@ -1333,6 +1370,20 @@ } ] }, + "MigrationDetails": { + "properties": { + "plugins": { + "items": { + "$ref": "#/definitions/PluginsMigration" + }, + "type": "array" + } + }, + "required": [ + "plugins" + ], + "type": "object" + }, "ModeKind": { "description": "Initial collaboration mode to use when the TUI starts.", "enum": [ @@ -1452,6 +1503,24 @@ ], "type": "object" }, + "PluginsMigration": { + "properties": { + "marketplaceName": { + "type": "string" + }, + "pluginNames": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "marketplaceName", + "pluginNames" + ], + "type": "object" + }, "ReadOnlyAccess": { "oneOf": [ { @@ -1499,6 +1568,13 @@ } ] }, + "RealtimeOutputModality": { + "enum": [ + "text", + "audio" + ], + "type": "string" + }, "RealtimeVoice": { "enum": [ "alloy", @@ -2715,6 +2791,23 @@ ], "type": "object" }, + "ThreadInjectItemsParams": { + "properties": { + "items": { + "description": "Raw Responses API items to append to the thread's model-visible history.", + "items": true, + "type": "array" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "items", + "threadId" + ], + "type": "object" + }, "ThreadListParams": { "properties": { "archived": { @@ -2809,6 +2902,13 @@ }, "type": "object" }, + "ThreadMemoryMode": { + "enum": [ + "enabled", + "disabled" + ], + "type": "string" + }, "ThreadMetadataGitInfoUpdateParams": { "properties": { "branch": { @@ -3952,6 +4052,31 @@ "title": "Thread/readRequest", "type": "object" }, + { + "description": "Append raw Responses API items to the thread history without starting a user turn.", + "properties": { + "id": { + "$ref": "#/definitions/RequestId" + }, + "method": { + "enum": [ + "thread/inject_items" + ], + "title": "Thread/injectItemsRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/ThreadInjectItemsParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "Thread/injectItemsRequest", + "type": "object" + }, { "properties": { "id": { @@ -3976,6 +4101,30 @@ "title": "Skills/listRequest", "type": "object" }, + { + "properties": { + "id": { + "$ref": "#/definitions/RequestId" + }, + "method": { + "enum": [ + "marketplace/add" + ], + "title": "Marketplace/addRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/MarketplaceAddParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "Marketplace/addRequest", + "type": "object" + }, { "properties": { "id": { diff --git a/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json b/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json index 617fa1f3cb..e5287e1c65 100644 --- a/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json +++ b/codex-rs/app-server-protocol/schema/json/CommandExecutionRequestApprovalParams.json @@ -75,7 +75,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -326,11 +326,15 @@ ] }, "cwd": { - "description": "The command's working directory.", - "type": [ - "string", - "null" - ] + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } + ], + "description": "The command's working directory." }, "itemId": { "type": "string" diff --git a/codex-rs/app-server-protocol/schema/json/ServerNotification.json b/codex-rs/app-server-protocol/schema/json/ServerNotification.json index c3ab83766a..d02a28e370 100644 --- a/codex-rs/app-server-protocol/schema/json/ServerNotification.json +++ b/codex-rs/app-server-protocol/schema/json/ServerNotification.json @@ -608,7 +608,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1180,7 +1180,7 @@ "type": "string" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "source": { "$ref": "#/definitions/GuardianCommandSource" @@ -1211,7 +1211,7 @@ "type": "array" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "program": { "type": "string" @@ -1240,11 +1240,11 @@ { "properties": { "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "files": { "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, @@ -1517,8 +1517,16 @@ "scope": { "$ref": "#/definitions/HookScope" }, + "source": { + "allOf": [ + { + "$ref": "#/definitions/HookSource" + } + ], + "default": "unknown" + }, "sourcePath": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "startedAt": { "format": "int64", @@ -1555,6 +1563,19 @@ ], "type": "string" }, + "HookSource": { + "enum": [ + "system", + "user", + "project", + "mdm", + "sessionFlags", + "legacyManagedConfigFile", + "legacyManagedConfigMdm", + "unknown" + ], + "type": "string" + }, "HookStartedNotification": { "properties": { "run": { @@ -2461,8 +2482,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -2769,8 +2794,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -2883,6 +2912,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -3099,7 +3134,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -3132,9 +3167,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { @@ -3384,13 +3423,35 @@ ], "type": "object" }, - "ThreadRealtimeTranscriptUpdatedNotification": { + "ThreadRealtimeTranscriptDeltaNotification": { "description": "EXPERIMENTAL - flat transcript delta emitted whenever realtime transcript text changes.", + "properties": { + "delta": { + "description": "Live transcript delta from the realtime event.", + "type": "string" + }, + "role": { + "type": "string" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "delta", + "role", + "threadId" + ], + "type": "object" + }, + "ThreadRealtimeTranscriptDoneNotification": { + "description": "EXPERIMENTAL - final transcript text emitted when realtime completes a transcript part.", "properties": { "role": { "type": "string" }, "text": { + "description": "Final complete text for the transcript part.", "type": "string" }, "threadId": { @@ -4949,20 +5010,40 @@ "properties": { "method": { "enum": [ - "thread/realtime/transcriptUpdated" + "thread/realtime/transcript/delta" ], - "title": "Thread/realtime/transcriptUpdatedNotificationMethod", + "title": "Thread/realtime/transcript/deltaNotificationMethod", "type": "string" }, "params": { - "$ref": "#/definitions/ThreadRealtimeTranscriptUpdatedNotification" + "$ref": "#/definitions/ThreadRealtimeTranscriptDeltaNotification" } }, "required": [ "method", "params" ], - "title": "Thread/realtime/transcriptUpdatedNotification", + "title": "Thread/realtime/transcript/deltaNotification", + "type": "object" + }, + { + "properties": { + "method": { + "enum": [ + "thread/realtime/transcript/done" + ], + "title": "Thread/realtime/transcript/doneNotificationMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/ThreadRealtimeTranscriptDoneNotification" + } + }, + "required": [ + "method", + "params" + ], + "title": "Thread/realtime/transcript/doneNotification", "type": "object" }, { diff --git a/codex-rs/app-server-protocol/schema/json/ServerRequest.json b/codex-rs/app-server-protocol/schema/json/ServerRequest.json index 7c11a4c02b..b31e69f203 100644 --- a/codex-rs/app-server-protocol/schema/json/ServerRequest.json +++ b/codex-rs/app-server-protocol/schema/json/ServerRequest.json @@ -141,7 +141,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -346,11 +346,15 @@ ] }, "cwd": { - "description": "The command's working directory.", - "type": [ - "string", - "null" - ] + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } + ], + "description": "The command's working directory." }, "itemId": { "type": "string" diff --git a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json index 15b31cc0ef..e87fa17928 100644 --- a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json +++ b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json @@ -578,6 +578,31 @@ "title": "Thread/readRequest", "type": "object" }, + { + "description": "Append raw Responses API items to the thread history without starting a user turn.", + "properties": { + "id": { + "$ref": "#/definitions/v2/RequestId" + }, + "method": { + "enum": [ + "thread/inject_items" + ], + "title": "Thread/injectItemsRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/v2/ThreadInjectItemsParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "Thread/injectItemsRequest", + "type": "object" + }, { "properties": { "id": { @@ -602,6 +627,30 @@ "title": "Skills/listRequest", "type": "object" }, + { + "properties": { + "id": { + "$ref": "#/definitions/v2/RequestId" + }, + "method": { + "enum": [ + "marketplace/add" + ], + "title": "Marketplace/addRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/v2/MarketplaceAddParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "Marketplace/addRequest", + "type": "object" + }, { "properties": { "id": { @@ -1792,11 +1841,15 @@ ] }, "cwd": { - "description": "The command's working directory.", - "type": [ - "string", - "null" - ] + "anyOf": [ + { + "$ref": "#/definitions/v2/AbsolutePathBuf" + }, + { + "type": "null" + } + ], + "description": "The command's working directory." }, "itemId": { "type": "string" @@ -4330,20 +4383,40 @@ "properties": { "method": { "enum": [ - "thread/realtime/transcriptUpdated" + "thread/realtime/transcript/delta" ], - "title": "Thread/realtime/transcriptUpdatedNotificationMethod", + "title": "Thread/realtime/transcript/deltaNotificationMethod", "type": "string" }, "params": { - "$ref": "#/definitions/v2/ThreadRealtimeTranscriptUpdatedNotification" + "$ref": "#/definitions/v2/ThreadRealtimeTranscriptDeltaNotification" } }, "required": [ "method", "params" ], - "title": "Thread/realtime/transcriptUpdatedNotification", + "title": "Thread/realtime/transcript/deltaNotification", + "type": "object" + }, + { + "properties": { + "method": { + "enum": [ + "thread/realtime/transcript/done" + ], + "title": "Thread/realtime/transcript/doneNotificationMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/v2/ThreadRealtimeTranscriptDoneNotification" + } + }, + "required": [ + "method", + "params" + ], + "title": "Thread/realtime/transcript/doneNotification", "type": "object" }, { @@ -5881,7 +5954,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "type": { "enum": [ @@ -7436,6 +7509,16 @@ "description": { "type": "string" }, + "details": { + "anyOf": [ + { + "$ref": "#/definitions/v2/MigrationDetails" + }, + { + "type": "null" + } + ] + }, "itemType": { "$ref": "#/definitions/v2/ExternalAgentConfigMigrationItemType" } @@ -7451,6 +7534,7 @@ "AGENTS_MD", "CONFIG", "SKILLS", + "PLUGINS", "MCP_SERVER_CONFIG" ], "type": "string" @@ -7688,11 +7772,15 @@ "type": "integer" }, "isDirectory": { - "description": "Whether the path currently resolves to a directory.", + "description": "Whether the path resolves to a directory.", "type": "boolean" }, "isFile": { - "description": "Whether the path currently resolves to a regular file.", + "description": "Whether the path resolves to a regular file.", + "type": "boolean" + }, + "isSymlink": { + "description": "Whether the path itself is a symbolic link.", "type": "boolean" }, "modifiedAtMs": { @@ -7705,6 +7793,7 @@ "createdAtMs", "isDirectory", "isFile", + "isSymlink", "modifiedAtMs" ], "title": "FsGetMetadataResponse", @@ -8169,7 +8258,7 @@ "type": "string" }, "cwd": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "source": { "$ref": "#/definitions/v2/GuardianCommandSource" @@ -8200,7 +8289,7 @@ "type": "array" }, "cwd": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "program": { "type": "string" @@ -8229,11 +8318,11 @@ { "properties": { "cwd": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "files": { "items": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "type": "array" }, @@ -8508,8 +8597,16 @@ "scope": { "$ref": "#/definitions/v2/HookScope" }, + "source": { + "allOf": [ + { + "$ref": "#/definitions/v2/HookSource" + } + ], + "default": "unknown" + }, "sourcePath": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "startedAt": { "format": "int64", @@ -8546,6 +8643,19 @@ ], "type": "string" }, + "HookSource": { + "enum": [ + "system", + "user", + "project", + "mdm", + "sessionFlags", + "legacyManagedConfigFile", + "legacyManagedConfigMdm", + "unknown" + ], + "type": "string" + }, "HookStartedNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { @@ -9030,6 +9140,55 @@ "title": "LogoutAccountResponse", "type": "object" }, + "MarketplaceAddParams": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "refName": { + "type": [ + "string", + "null" + ] + }, + "source": { + "type": "string" + }, + "sparsePaths": { + "items": { + "type": "string" + }, + "type": [ + "array", + "null" + ] + } + }, + "required": [ + "source" + ], + "title": "MarketplaceAddParams", + "type": "object" + }, + "MarketplaceAddResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "alreadyAdded": { + "type": "boolean" + }, + "installedRoot": { + "$ref": "#/definitions/v2/AbsolutePathBuf" + }, + "marketplaceName": { + "type": "string" + } + }, + "required": [ + "alreadyAdded", + "installedRoot", + "marketplaceName" + ], + "title": "MarketplaceAddResponse", + "type": "object" + }, "MarketplaceInterface": { "properties": { "displayName": { @@ -9424,6 +9583,20 @@ } ] }, + "MigrationDetails": { + "properties": { + "plugins": { + "items": { + "$ref": "#/definitions/v2/PluginsMigration" + }, + "type": "array" + } + }, + "required": [ + "plugins" + ], + "type": "object" + }, "ModeKind": { "description": "Initial collaboration mode to use when the TUI starts.", "enum": [ @@ -9706,12 +9879,6 @@ "null" ] }, - "dangerFullAccessDenylistOnly": { - "type": [ - "boolean", - "null" - ] - }, "dangerouslyAllowAllUnixSockets": { "type": [ "boolean", @@ -10352,6 +10519,24 @@ "title": "PluginUninstallResponse", "type": "object" }, + "PluginsMigration": { + "properties": { + "marketplaceName": { + "type": "string" + }, + "pluginNames": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "marketplaceName", + "pluginNames" + ], + "type": "object" + }, "ProfileV2": { "additionalProperties": true, "properties": { @@ -10615,6 +10800,13 @@ ], "type": "string" }, + "RealtimeOutputModality": { + "enum": [ + "text", + "audio" + ], + "type": "string" + }, "RealtimeVoice": { "enum": [ "alloy", @@ -12034,15 +12226,23 @@ ] }, "iconLarge": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/v2/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "iconSmall": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/v2/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "shortDescription": { @@ -12086,7 +12286,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "scope": { "$ref": "#/definitions/v2/SkillScope" @@ -12139,7 +12339,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "shortDescription": { "type": [ @@ -12523,8 +12723,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/v2/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -12804,13 +13008,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "type": "array" }, @@ -12862,6 +13066,30 @@ "ThreadId": { "type": "string" }, + "ThreadInjectItemsParams": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "items": { + "description": "Raw Responses API items to append to the thread's model-visible history.", + "items": true, + "type": "array" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "items", + "threadId" + ], + "title": "ThreadInjectItemsParams", + "type": "object" + }, + "ThreadInjectItemsResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ThreadInjectItemsResponse", + "type": "object" + }, "ThreadItem": { "oneOf": [ { @@ -13044,8 +13272,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/v2/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -13158,6 +13390,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -13374,7 +13612,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "type": { "enum": [ @@ -13407,9 +13645,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/v2/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { @@ -13647,6 +13889,13 @@ "title": "ThreadLoadedListResponse", "type": "object" }, + "ThreadMemoryMode": { + "enum": [ + "enabled", + "disabled" + ], + "type": "string" + }, "ThreadMetadataGitInfoUpdateParams": { "properties": { "branch": { @@ -13954,14 +14203,38 @@ "title": "ThreadRealtimeStartedNotification", "type": "object" }, - "ThreadRealtimeTranscriptUpdatedNotification": { + "ThreadRealtimeTranscriptDeltaNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "description": "EXPERIMENTAL - flat transcript delta emitted whenever realtime transcript text changes.", + "properties": { + "delta": { + "description": "Live transcript delta from the realtime event.", + "type": "string" + }, + "role": { + "type": "string" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "delta", + "role", + "threadId" + ], + "title": "ThreadRealtimeTranscriptDeltaNotification", + "type": "object" + }, + "ThreadRealtimeTranscriptDoneNotification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "EXPERIMENTAL - final transcript text emitted when realtime completes a transcript part.", "properties": { "role": { "type": "string" }, "text": { + "description": "Final complete text for the transcript part.", "type": "string" }, "threadId": { @@ -13973,7 +14246,7 @@ "text", "threadId" ], - "title": "ThreadRealtimeTranscriptUpdatedNotification", + "title": "ThreadRealtimeTranscriptDoneNotification", "type": "object" }, "ThreadResumeParams": { @@ -14101,13 +14374,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "type": "array" }, @@ -14400,13 +14673,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/v2/AbsolutePathBuf" }, "type": "array" }, diff --git a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json index dd053d77d2..8254ad0127 100644 --- a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json +++ b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json @@ -1160,6 +1160,31 @@ "title": "Thread/readRequest", "type": "object" }, + { + "description": "Append raw Responses API items to the thread history without starting a user turn.", + "properties": { + "id": { + "$ref": "#/definitions/RequestId" + }, + "method": { + "enum": [ + "thread/inject_items" + ], + "title": "Thread/injectItemsRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/ThreadInjectItemsParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "Thread/injectItemsRequest", + "type": "object" + }, { "properties": { "id": { @@ -1184,6 +1209,30 @@ "title": "Skills/listRequest", "type": "object" }, + { + "properties": { + "id": { + "$ref": "#/definitions/RequestId" + }, + "method": { + "enum": [ + "marketplace/add" + ], + "title": "Marketplace/addRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/MarketplaceAddParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "Marketplace/addRequest", + "type": "object" + }, { "properties": { "id": { @@ -2522,7 +2571,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -4077,6 +4126,16 @@ "description": { "type": "string" }, + "details": { + "anyOf": [ + { + "$ref": "#/definitions/MigrationDetails" + }, + { + "type": "null" + } + ] + }, "itemType": { "$ref": "#/definitions/ExternalAgentConfigMigrationItemType" } @@ -4092,6 +4151,7 @@ "AGENTS_MD", "CONFIG", "SKILLS", + "PLUGINS", "MCP_SERVER_CONFIG" ], "type": "string" @@ -4329,11 +4389,15 @@ "type": "integer" }, "isDirectory": { - "description": "Whether the path currently resolves to a directory.", + "description": "Whether the path resolves to a directory.", "type": "boolean" }, "isFile": { - "description": "Whether the path currently resolves to a regular file.", + "description": "Whether the path resolves to a regular file.", + "type": "boolean" + }, + "isSymlink": { + "description": "Whether the path itself is a symbolic link.", "type": "boolean" }, "modifiedAtMs": { @@ -4346,6 +4410,7 @@ "createdAtMs", "isDirectory", "isFile", + "isSymlink", "modifiedAtMs" ], "title": "FsGetMetadataResponse", @@ -4921,7 +4986,7 @@ "type": "string" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "source": { "$ref": "#/definitions/GuardianCommandSource" @@ -4952,7 +5017,7 @@ "type": "array" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "program": { "type": "string" @@ -4981,11 +5046,11 @@ { "properties": { "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "files": { "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, @@ -5260,8 +5325,16 @@ "scope": { "$ref": "#/definitions/HookScope" }, + "source": { + "allOf": [ + { + "$ref": "#/definitions/HookSource" + } + ], + "default": "unknown" + }, "sourcePath": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "startedAt": { "format": "int64", @@ -5298,6 +5371,19 @@ ], "type": "string" }, + "HookSource": { + "enum": [ + "system", + "user", + "project", + "mdm", + "sessionFlags", + "legacyManagedConfigFile", + "legacyManagedConfigMdm", + "unknown" + ], + "type": "string" + }, "HookStartedNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { @@ -5826,6 +5912,55 @@ "title": "LogoutAccountResponse", "type": "object" }, + "MarketplaceAddParams": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "refName": { + "type": [ + "string", + "null" + ] + }, + "source": { + "type": "string" + }, + "sparsePaths": { + "items": { + "type": "string" + }, + "type": [ + "array", + "null" + ] + } + }, + "required": [ + "source" + ], + "title": "MarketplaceAddParams", + "type": "object" + }, + "MarketplaceAddResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "alreadyAdded": { + "type": "boolean" + }, + "installedRoot": { + "$ref": "#/definitions/AbsolutePathBuf" + }, + "marketplaceName": { + "type": "string" + } + }, + "required": [ + "alreadyAdded", + "installedRoot", + "marketplaceName" + ], + "title": "MarketplaceAddResponse", + "type": "object" + }, "MarketplaceInterface": { "properties": { "displayName": { @@ -6220,6 +6355,20 @@ } ] }, + "MigrationDetails": { + "properties": { + "plugins": { + "items": { + "$ref": "#/definitions/PluginsMigration" + }, + "type": "array" + } + }, + "required": [ + "plugins" + ], + "type": "object" + }, "ModeKind": { "description": "Initial collaboration mode to use when the TUI starts.", "enum": [ @@ -6502,12 +6651,6 @@ "null" ] }, - "dangerFullAccessDenylistOnly": { - "type": [ - "boolean", - "null" - ] - }, "dangerouslyAllowAllUnixSockets": { "type": [ "boolean", @@ -7148,6 +7291,24 @@ "title": "PluginUninstallResponse", "type": "object" }, + "PluginsMigration": { + "properties": { + "marketplaceName": { + "type": "string" + }, + "pluginNames": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "marketplaceName", + "pluginNames" + ], + "type": "object" + }, "ProfileV2": { "additionalProperties": true, "properties": { @@ -7411,6 +7572,13 @@ ], "type": "string" }, + "RealtimeOutputModality": { + "enum": [ + "text", + "audio" + ], + "type": "string" + }, "RealtimeVoice": { "enum": [ "alloy", @@ -9580,20 +9748,40 @@ "properties": { "method": { "enum": [ - "thread/realtime/transcriptUpdated" + "thread/realtime/transcript/delta" ], - "title": "Thread/realtime/transcriptUpdatedNotificationMethod", + "title": "Thread/realtime/transcript/deltaNotificationMethod", "type": "string" }, "params": { - "$ref": "#/definitions/ThreadRealtimeTranscriptUpdatedNotification" + "$ref": "#/definitions/ThreadRealtimeTranscriptDeltaNotification" } }, "required": [ "method", "params" ], - "title": "Thread/realtime/transcriptUpdatedNotification", + "title": "Thread/realtime/transcript/deltaNotification", + "type": "object" + }, + { + "properties": { + "method": { + "enum": [ + "thread/realtime/transcript/done" + ], + "title": "Thread/realtime/transcript/doneNotificationMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/ThreadRealtimeTranscriptDoneNotification" + } + }, + "required": [ + "method", + "params" + ], + "title": "Thread/realtime/transcript/doneNotification", "type": "object" }, { @@ -9882,15 +10070,23 @@ ] }, "iconLarge": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "iconSmall": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "shortDescription": { @@ -9934,7 +10130,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "scope": { "$ref": "#/definitions/SkillScope" @@ -9987,7 +10183,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "shortDescription": { "type": [ @@ -10371,8 +10567,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -10652,13 +10852,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, @@ -10710,6 +10910,30 @@ "ThreadId": { "type": "string" }, + "ThreadInjectItemsParams": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "items": { + "description": "Raw Responses API items to append to the thread's model-visible history.", + "items": true, + "type": "array" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "items", + "threadId" + ], + "title": "ThreadInjectItemsParams", + "type": "object" + }, + "ThreadInjectItemsResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ThreadInjectItemsResponse", + "type": "object" + }, "ThreadItem": { "oneOf": [ { @@ -10892,8 +11116,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -11006,6 +11234,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -11222,7 +11456,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -11255,9 +11489,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { @@ -11495,6 +11733,13 @@ "title": "ThreadLoadedListResponse", "type": "object" }, + "ThreadMemoryMode": { + "enum": [ + "enabled", + "disabled" + ], + "type": "string" + }, "ThreadMetadataGitInfoUpdateParams": { "properties": { "branch": { @@ -11802,14 +12047,38 @@ "title": "ThreadRealtimeStartedNotification", "type": "object" }, - "ThreadRealtimeTranscriptUpdatedNotification": { + "ThreadRealtimeTranscriptDeltaNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "description": "EXPERIMENTAL - flat transcript delta emitted whenever realtime transcript text changes.", + "properties": { + "delta": { + "description": "Live transcript delta from the realtime event.", + "type": "string" + }, + "role": { + "type": "string" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "delta", + "role", + "threadId" + ], + "title": "ThreadRealtimeTranscriptDeltaNotification", + "type": "object" + }, + "ThreadRealtimeTranscriptDoneNotification": { + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "EXPERIMENTAL - final transcript text emitted when realtime completes a transcript part.", "properties": { "role": { "type": "string" }, "text": { + "description": "Final complete text for the transcript part.", "type": "string" }, "threadId": { @@ -11821,7 +12090,7 @@ "text", "threadId" ], - "title": "ThreadRealtimeTranscriptUpdatedNotification", + "title": "ThreadRealtimeTranscriptDoneNotification", "type": "object" }, "ThreadResumeParams": { @@ -11949,13 +12218,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, @@ -12248,13 +12517,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json index ae6eb1dc7d..614575a955 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json @@ -151,12 +151,6 @@ "null" ] }, - "dangerFullAccessDenylistOnly": { - "type": [ - "boolean", - "null" - ] - }, "dangerouslyAllowAllUnixSockets": { "type": [ "boolean", diff --git a/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigDetectResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigDetectResponse.json index a73e515c2b..ad8f0f9bdd 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigDetectResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigDetectResponse.json @@ -13,6 +13,16 @@ "description": { "type": "string" }, + "details": { + "anyOf": [ + { + "$ref": "#/definitions/MigrationDetails" + }, + { + "type": "null" + } + ] + }, "itemType": { "$ref": "#/definitions/ExternalAgentConfigMigrationItemType" } @@ -28,9 +38,42 @@ "AGENTS_MD", "CONFIG", "SKILLS", + "PLUGINS", "MCP_SERVER_CONFIG" ], "type": "string" + }, + "MigrationDetails": { + "properties": { + "plugins": { + "items": { + "$ref": "#/definitions/PluginsMigration" + }, + "type": "array" + } + }, + "required": [ + "plugins" + ], + "type": "object" + }, + "PluginsMigration": { + "properties": { + "marketplaceName": { + "type": "string" + }, + "pluginNames": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "marketplaceName", + "pluginNames" + ], + "type": "object" } }, "properties": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigImportParams.json b/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigImportParams.json index 85af249590..4b7ac826cc 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigImportParams.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ExternalAgentConfigImportParams.json @@ -13,6 +13,16 @@ "description": { "type": "string" }, + "details": { + "anyOf": [ + { + "$ref": "#/definitions/MigrationDetails" + }, + { + "type": "null" + } + ] + }, "itemType": { "$ref": "#/definitions/ExternalAgentConfigMigrationItemType" } @@ -28,9 +38,42 @@ "AGENTS_MD", "CONFIG", "SKILLS", + "PLUGINS", "MCP_SERVER_CONFIG" ], "type": "string" + }, + "MigrationDetails": { + "properties": { + "plugins": { + "items": { + "$ref": "#/definitions/PluginsMigration" + }, + "type": "array" + } + }, + "required": [ + "plugins" + ], + "type": "object" + }, + "PluginsMigration": { + "properties": { + "marketplaceName": { + "type": "string" + }, + "pluginNames": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "marketplaceName", + "pluginNames" + ], + "type": "object" } }, "properties": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/FsGetMetadataResponse.json b/codex-rs/app-server-protocol/schema/json/v2/FsGetMetadataResponse.json index 95eeb63924..82481f579e 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/FsGetMetadataResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/FsGetMetadataResponse.json @@ -8,11 +8,15 @@ "type": "integer" }, "isDirectory": { - "description": "Whether the path currently resolves to a directory.", + "description": "Whether the path resolves to a directory.", "type": "boolean" }, "isFile": { - "description": "Whether the path currently resolves to a regular file.", + "description": "Whether the path resolves to a regular file.", + "type": "boolean" + }, + "isSymlink": { + "description": "Whether the path itself is a symbolic link.", "type": "boolean" }, "modifiedAtMs": { @@ -25,6 +29,7 @@ "createdAtMs", "isDirectory", "isFile", + "isSymlink", "modifiedAtMs" ], "title": "FsGetMetadataResponse", diff --git a/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json index bce797086c..4f444961a9 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/HookCompletedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "HookEventName": { "enum": [ "preToolUse", @@ -102,8 +106,16 @@ "scope": { "$ref": "#/definitions/HookScope" }, + "source": { + "allOf": [ + { + "$ref": "#/definitions/HookSource" + } + ], + "default": "unknown" + }, "sourcePath": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "startedAt": { "format": "int64", @@ -139,6 +151,19 @@ "turn" ], "type": "string" + }, + "HookSource": { + "enum": [ + "system", + "user", + "project", + "mdm", + "sessionFlags", + "legacyManagedConfigFile", + "legacyManagedConfigMdm", + "unknown" + ], + "type": "string" } }, "properties": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json index 72f32d0d9d..6d439c95ca 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/HookStartedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "HookEventName": { "enum": [ "preToolUse", @@ -102,8 +106,16 @@ "scope": { "$ref": "#/definitions/HookScope" }, + "source": { + "allOf": [ + { + "$ref": "#/definitions/HookSource" + } + ], + "default": "unknown" + }, "sourcePath": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "startedAt": { "format": "int64", @@ -139,6 +151,19 @@ "turn" ], "type": "string" + }, + "HookSource": { + "enum": [ + "system", + "user", + "project", + "mdm", + "sessionFlags", + "legacyManagedConfigFile", + "legacyManagedConfigMdm", + "unknown" + ], + "type": "string" } }, "properties": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json index 2883670c88..c5a84c4272 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "ByteRange": { "properties": { "end": { @@ -78,7 +82,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -665,8 +669,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -779,6 +787,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -995,7 +1009,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1028,9 +1042,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewCompletedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewCompletedNotification.json index 590a7a5d65..2b223c8bb1 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewCompletedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewCompletedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "AutoReviewDecisionSource": { "description": "[UNSTABLE] Source that produced a terminal guardian approval review decision.", "enum": [ @@ -54,7 +58,7 @@ "type": "string" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "source": { "$ref": "#/definitions/GuardianCommandSource" @@ -85,7 +89,7 @@ "type": "array" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "program": { "type": "string" @@ -114,11 +118,11 @@ { "properties": { "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "files": { "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewStartedNotification.json index fdb01f27e5..e505f13320 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ItemGuardianApprovalReviewStartedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "GuardianApprovalReview": { "description": "[UNSTABLE] Temporary guardian approval review payload used by `item/autoApprovalReview/*` notifications. This shape is expected to change soon.", "properties": { @@ -47,7 +51,7 @@ "type": "string" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "source": { "$ref": "#/definitions/GuardianCommandSource" @@ -78,7 +82,7 @@ "type": "array" }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "program": { "type": "string" @@ -107,11 +111,11 @@ { "properties": { "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "files": { "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json index c2e71ccba9..cb9201f0a6 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "ByteRange": { "properties": { "end": { @@ -78,7 +82,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -665,8 +669,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -779,6 +787,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -995,7 +1009,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1028,9 +1042,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/MarketplaceAddParams.json b/codex-rs/app-server-protocol/schema/json/v2/MarketplaceAddParams.json new file mode 100644 index 0000000000..704e5bbc2a --- /dev/null +++ b/codex-rs/app-server-protocol/schema/json/v2/MarketplaceAddParams.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "refName": { + "type": [ + "string", + "null" + ] + }, + "source": { + "type": "string" + }, + "sparsePaths": { + "items": { + "type": "string" + }, + "type": [ + "array", + "null" + ] + } + }, + "required": [ + "source" + ], + "title": "MarketplaceAddParams", + "type": "object" +} \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/MarketplaceAddResponse.json b/codex-rs/app-server-protocol/schema/json/v2/MarketplaceAddResponse.json new file mode 100644 index 0000000000..d00db0d6be --- /dev/null +++ b/codex-rs/app-server-protocol/schema/json/v2/MarketplaceAddResponse.json @@ -0,0 +1,27 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + } + }, + "properties": { + "alreadyAdded": { + "type": "boolean" + }, + "installedRoot": { + "$ref": "#/definitions/AbsolutePathBuf" + }, + "marketplaceName": { + "type": "string" + } + }, + "required": [ + "alreadyAdded", + "installedRoot", + "marketplaceName" + ], + "title": "MarketplaceAddResponse", + "type": "object" +} \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/PluginReadResponse.json b/codex-rs/app-server-protocol/schema/json/v2/PluginReadResponse.json index 1917935a2e..abe36390c9 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/PluginReadResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/PluginReadResponse.json @@ -293,15 +293,23 @@ ] }, "iconLarge": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "iconSmall": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "shortDescription": { @@ -335,7 +343,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "shortDescription": { "type": [ diff --git a/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json index a7fe2e8d60..05dfaa4b72 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "ByteRange": { "properties": { "end": { @@ -214,7 +218,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -808,8 +812,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -922,6 +930,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1138,7 +1152,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1171,9 +1185,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/SkillsListResponse.json b/codex-rs/app-server-protocol/schema/json/v2/SkillsListResponse.json index b4ec51ba78..6c72bfbb68 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/SkillsListResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/SkillsListResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "SkillDependencies": { "properties": { "tools": { @@ -51,15 +55,23 @@ ] }, "iconLarge": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "iconSmall": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "shortDescription": { @@ -103,7 +115,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "scope": { "$ref": "#/definitions/SkillScope" diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json index 5af02d344e..909c2af4d8 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json @@ -279,7 +279,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1036,8 +1036,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1322,8 +1326,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1436,6 +1444,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1652,7 +1666,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1685,9 +1699,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { @@ -2185,13 +2203,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadInjectItemsParams.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadInjectItemsParams.json new file mode 100644 index 0000000000..d117f3ae0e --- /dev/null +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadInjectItemsParams.json @@ -0,0 +1,19 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "items": { + "description": "Raw Responses API items to append to the thread's model-visible history.", + "items": true, + "type": "array" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "items", + "threadId" + ], + "title": "ThreadInjectItemsParams", + "type": "object" +} \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadInjectItemsResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadInjectItemsResponse.json new file mode 100644 index 0000000000..2ba62b2214 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadInjectItemsResponse.json @@ -0,0 +1,5 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ThreadInjectItemsResponse", + "type": "object" +} \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json index 426f34ce35..8ebebd39f9 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "AgentPath": { "type": "string" }, @@ -217,7 +221,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -794,8 +798,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1080,8 +1088,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1194,6 +1206,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1410,7 +1428,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1443,9 +1461,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json index c869a79749..40da3390ec 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "AgentPath": { "type": "string" }, @@ -217,7 +221,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -794,8 +798,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1080,8 +1088,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1194,6 +1206,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1410,7 +1428,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1443,9 +1461,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json index 9569860c38..400c0b661a 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "AgentPath": { "type": "string" }, @@ -217,7 +221,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -794,8 +798,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1080,8 +1088,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1194,6 +1206,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1410,7 +1428,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1443,9 +1461,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptUpdatedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptDeltaNotification.json similarity index 70% rename from codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptUpdatedNotification.json rename to codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptDeltaNotification.json index 2c6860fa31..22ad778eb2 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptUpdatedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptDeltaNotification.json @@ -2,10 +2,11 @@ "$schema": "http://json-schema.org/draft-07/schema#", "description": "EXPERIMENTAL - flat transcript delta emitted whenever realtime transcript text changes.", "properties": { - "role": { + "delta": { + "description": "Live transcript delta from the realtime event.", "type": "string" }, - "text": { + "role": { "type": "string" }, "threadId": { @@ -13,10 +14,10 @@ } }, "required": [ + "delta", "role", - "text", "threadId" ], - "title": "ThreadRealtimeTranscriptUpdatedNotification", + "title": "ThreadRealtimeTranscriptDeltaNotification", "type": "object" } \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptDoneNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptDoneNotification.json new file mode 100644 index 0000000000..2f4199fdb9 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadRealtimeTranscriptDoneNotification.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "EXPERIMENTAL - final transcript text emitted when realtime completes a transcript part.", + "properties": { + "role": { + "type": "string" + }, + "text": { + "description": "Final complete text for the transcript part.", + "type": "string" + }, + "threadId": { + "type": "string" + } + }, + "required": [ + "role", + "text", + "threadId" + ], + "title": "ThreadRealtimeTranscriptDoneNotification", + "type": "object" +} \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json index d888485d15..aef0cd737c 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json @@ -279,7 +279,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1036,8 +1036,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1322,8 +1326,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1436,6 +1444,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1652,7 +1666,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1685,9 +1699,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { @@ -2185,13 +2203,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json index 502dd3961f..6a50d6d978 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "AgentPath": { "type": "string" }, @@ -217,7 +221,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -794,8 +798,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1080,8 +1088,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1194,6 +1206,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1410,7 +1428,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1443,9 +1461,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json index 7a0e083093..a3d20555f1 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json @@ -279,7 +279,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1036,8 +1036,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1322,8 +1326,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1436,6 +1444,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1652,7 +1666,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1685,9 +1699,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { @@ -2185,13 +2203,13 @@ "description": "Reviewer currently used for approval requests on this thread." }, "cwd": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "instructionSources": { "default": [], "description": "Instruction source files currently loaded for this thread.", "items": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": "array" }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json index ff87af2069..c4b1e54490 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "AgentPath": { "type": "string" }, @@ -217,7 +221,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -794,8 +798,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1080,8 +1088,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1194,6 +1206,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1410,7 +1428,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1443,9 +1461,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json index daf821c374..e55c9651fa 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "AgentPath": { "type": "string" }, @@ -217,7 +221,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -794,8 +798,12 @@ "type": "integer" }, "cwd": { - "description": "Working directory captured for the thread.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Working directory captured for the thread." }, "ephemeral": { "description": "Whether the thread is ephemeral and should not be materialized on disk.", @@ -1080,8 +1088,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -1194,6 +1206,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1410,7 +1428,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1443,9 +1461,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json index 82c2b3c76c..2848940085 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "ByteRange": { "properties": { "end": { @@ -214,7 +218,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -808,8 +812,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -922,6 +930,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1138,7 +1152,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1171,9 +1185,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json b/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json index ebb2065cb8..c3f31e5322 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "ByteRange": { "properties": { "end": { @@ -214,7 +218,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -808,8 +812,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -922,6 +930,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1138,7 +1152,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1171,9 +1185,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json index 8b7c2bc410..7e8ca749c0 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json @@ -1,6 +1,10 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "AbsolutePathBuf": { + "description": "A path that is guaranteed to be absolute and normalized (though it is not guaranteed to be canonicalized or exist on the filesystem).\n\nIMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set using [AbsolutePathBufGuard::new]. If no base path is set, the deserialization will fail unless the path being deserialized is already absolute.", + "type": "string" + }, "ByteRange": { "properties": { "end": { @@ -214,7 +218,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -808,8 +812,12 @@ "type": "array" }, "cwd": { - "description": "The command's working directory.", - "type": "string" + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "The command's working directory." }, "durationMs": { "description": "The duration of the command execution in milliseconds.", @@ -922,6 +930,12 @@ "id": { "type": "string" }, + "mcpAppResourceUri": { + "type": [ + "string", + "null" + ] + }, "result": { "anyOf": [ { @@ -1138,7 +1152,7 @@ "type": "string" }, "path": { - "type": "string" + "$ref": "#/definitions/AbsolutePathBuf" }, "type": { "enum": [ @@ -1171,9 +1185,13 @@ ] }, "savedPath": { - "type": [ - "string", - "null" + "anyOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + }, + { + "type": "null" + } ] }, "status": { diff --git a/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts b/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts index 1bbc9b7ac9..9d9a823408 100644 --- a/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts +++ b/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts @@ -33,6 +33,7 @@ import type { FsWriteFileParams } from "./v2/FsWriteFileParams"; import type { GetAccountParams } from "./v2/GetAccountParams"; import type { ListMcpServerStatusParams } from "./v2/ListMcpServerStatusParams"; import type { LoginAccountParams } from "./v2/LoginAccountParams"; +import type { MarketplaceAddParams } from "./v2/MarketplaceAddParams"; import type { McpResourceReadParams } from "./v2/McpResourceReadParams"; import type { McpServerOauthLoginParams } from "./v2/McpServerOauthLoginParams"; import type { McpServerToolCallParams } from "./v2/McpServerToolCallParams"; @@ -47,6 +48,7 @@ import type { SkillsListParams } from "./v2/SkillsListParams"; import type { ThreadArchiveParams } from "./v2/ThreadArchiveParams"; import type { ThreadCompactStartParams } from "./v2/ThreadCompactStartParams"; import type { ThreadForkParams } from "./v2/ThreadForkParams"; +import type { ThreadInjectItemsParams } from "./v2/ThreadInjectItemsParams"; import type { ThreadListParams } from "./v2/ThreadListParams"; import type { ThreadLoadedListParams } from "./v2/ThreadLoadedListParams"; import type { ThreadMetadataUpdateParams } from "./v2/ThreadMetadataUpdateParams"; @@ -66,4 +68,4 @@ import type { WindowsSandboxSetupStartParams } from "./v2/WindowsSandboxSetupSta /** * Request from the client to the server. */ -export type ClientRequest ={ "method": "initialize", id: RequestId, params: InitializeParams, } | { "method": "thread/start", id: RequestId, params: ThreadStartParams, } | { "method": "thread/resume", id: RequestId, params: ThreadResumeParams, } | { "method": "thread/fork", id: RequestId, params: ThreadForkParams, } | { "method": "thread/archive", id: RequestId, params: ThreadArchiveParams, } | { "method": "thread/unsubscribe", id: RequestId, params: ThreadUnsubscribeParams, } | { "method": "thread/name/set", id: RequestId, params: ThreadSetNameParams, } | { "method": "thread/metadata/update", id: RequestId, params: ThreadMetadataUpdateParams, } | { "method": "thread/unarchive", id: RequestId, params: ThreadUnarchiveParams, } | { "method": "thread/compact/start", id: RequestId, params: ThreadCompactStartParams, } | { "method": "thread/shellCommand", id: RequestId, params: ThreadShellCommandParams, } | { "method": "thread/rollback", id: RequestId, params: ThreadRollbackParams, } | { "method": "thread/list", id: RequestId, params: ThreadListParams, } | { "method": "thread/loaded/list", id: RequestId, params: ThreadLoadedListParams, } | { "method": "thread/read", id: RequestId, params: ThreadReadParams, } | { "method": "skills/list", id: RequestId, params: SkillsListParams, } | { "method": "plugin/list", id: RequestId, params: PluginListParams, } | { "method": "plugin/read", id: RequestId, params: PluginReadParams, } | { "method": "app/list", id: RequestId, params: AppsListParams, } | { "method": "fs/readFile", id: RequestId, params: FsReadFileParams, } | { "method": "fs/writeFile", id: RequestId, params: FsWriteFileParams, } | { "method": "fs/createDirectory", id: RequestId, params: FsCreateDirectoryParams, } | { "method": "fs/getMetadata", id: RequestId, params: FsGetMetadataParams, } | { "method": "fs/readDirectory", id: RequestId, params: FsReadDirectoryParams, } | { "method": "fs/remove", id: RequestId, params: FsRemoveParams, } | { "method": "fs/copy", id: RequestId, params: FsCopyParams, } | { "method": "fs/watch", id: RequestId, params: FsWatchParams, } | { "method": "fs/unwatch", id: RequestId, params: FsUnwatchParams, } | { "method": "skills/config/write", id: RequestId, params: SkillsConfigWriteParams, } | { "method": "plugin/install", id: RequestId, params: PluginInstallParams, } | { "method": "plugin/uninstall", id: RequestId, params: PluginUninstallParams, } | { "method": "turn/start", id: RequestId, params: TurnStartParams, } | { "method": "turn/steer", id: RequestId, params: TurnSteerParams, } | { "method": "turn/interrupt", id: RequestId, params: TurnInterruptParams, } | { "method": "review/start", id: RequestId, params: ReviewStartParams, } | { "method": "model/list", id: RequestId, params: ModelListParams, } | { "method": "experimentalFeature/list", id: RequestId, params: ExperimentalFeatureListParams, } | { "method": "experimentalFeature/enablement/set", id: RequestId, params: ExperimentalFeatureEnablementSetParams, } | { "method": "mcpServer/oauth/login", id: RequestId, params: McpServerOauthLoginParams, } | { "method": "config/mcpServer/reload", id: RequestId, params: undefined, } | { "method": "mcpServerStatus/list", id: RequestId, params: ListMcpServerStatusParams, } | { "method": "mcpServer/resource/read", id: RequestId, params: McpResourceReadParams, } | { "method": "mcpServer/tool/call", id: RequestId, params: McpServerToolCallParams, } | { "method": "windowsSandbox/setupStart", id: RequestId, params: WindowsSandboxSetupStartParams, } | { "method": "account/login/start", id: RequestId, params: LoginAccountParams, } | { "method": "account/login/cancel", id: RequestId, params: CancelLoginAccountParams, } | { "method": "account/logout", id: RequestId, params: undefined, } | { "method": "account/rateLimits/read", id: RequestId, params: undefined, } | { "method": "feedback/upload", id: RequestId, params: FeedbackUploadParams, } | { "method": "command/exec", id: RequestId, params: CommandExecParams, } | { "method": "command/exec/write", id: RequestId, params: CommandExecWriteParams, } | { "method": "command/exec/terminate", id: RequestId, params: CommandExecTerminateParams, } | { "method": "command/exec/resize", id: RequestId, params: CommandExecResizeParams, } | { "method": "config/read", id: RequestId, params: ConfigReadParams, } | { "method": "externalAgentConfig/detect", id: RequestId, params: ExternalAgentConfigDetectParams, } | { "method": "externalAgentConfig/import", id: RequestId, params: ExternalAgentConfigImportParams, } | { "method": "config/value/write", id: RequestId, params: ConfigValueWriteParams, } | { "method": "config/batchWrite", id: RequestId, params: ConfigBatchWriteParams, } | { "method": "configRequirements/read", id: RequestId, params: undefined, } | { "method": "account/read", id: RequestId, params: GetAccountParams, } | { "method": "getConversationSummary", id: RequestId, params: GetConversationSummaryParams, } | { "method": "gitDiffToRemote", id: RequestId, params: GitDiffToRemoteParams, } | { "method": "getAuthStatus", id: RequestId, params: GetAuthStatusParams, } | { "method": "fuzzyFileSearch", id: RequestId, params: FuzzyFileSearchParams, }; +export type ClientRequest ={ "method": "initialize", id: RequestId, params: InitializeParams, } | { "method": "thread/start", id: RequestId, params: ThreadStartParams, } | { "method": "thread/resume", id: RequestId, params: ThreadResumeParams, } | { "method": "thread/fork", id: RequestId, params: ThreadForkParams, } | { "method": "thread/archive", id: RequestId, params: ThreadArchiveParams, } | { "method": "thread/unsubscribe", id: RequestId, params: ThreadUnsubscribeParams, } | { "method": "thread/name/set", id: RequestId, params: ThreadSetNameParams, } | { "method": "thread/metadata/update", id: RequestId, params: ThreadMetadataUpdateParams, } | { "method": "thread/unarchive", id: RequestId, params: ThreadUnarchiveParams, } | { "method": "thread/compact/start", id: RequestId, params: ThreadCompactStartParams, } | { "method": "thread/shellCommand", id: RequestId, params: ThreadShellCommandParams, } | { "method": "thread/rollback", id: RequestId, params: ThreadRollbackParams, } | { "method": "thread/list", id: RequestId, params: ThreadListParams, } | { "method": "thread/loaded/list", id: RequestId, params: ThreadLoadedListParams, } | { "method": "thread/read", id: RequestId, params: ThreadReadParams, } | { "method": "thread/inject_items", id: RequestId, params: ThreadInjectItemsParams, } | { "method": "skills/list", id: RequestId, params: SkillsListParams, } | { "method": "marketplace/add", id: RequestId, params: MarketplaceAddParams, } | { "method": "plugin/list", id: RequestId, params: PluginListParams, } | { "method": "plugin/read", id: RequestId, params: PluginReadParams, } | { "method": "app/list", id: RequestId, params: AppsListParams, } | { "method": "fs/readFile", id: RequestId, params: FsReadFileParams, } | { "method": "fs/writeFile", id: RequestId, params: FsWriteFileParams, } | { "method": "fs/createDirectory", id: RequestId, params: FsCreateDirectoryParams, } | { "method": "fs/getMetadata", id: RequestId, params: FsGetMetadataParams, } | { "method": "fs/readDirectory", id: RequestId, params: FsReadDirectoryParams, } | { "method": "fs/remove", id: RequestId, params: FsRemoveParams, } | { "method": "fs/copy", id: RequestId, params: FsCopyParams, } | { "method": "fs/watch", id: RequestId, params: FsWatchParams, } | { "method": "fs/unwatch", id: RequestId, params: FsUnwatchParams, } | { "method": "skills/config/write", id: RequestId, params: SkillsConfigWriteParams, } | { "method": "plugin/install", id: RequestId, params: PluginInstallParams, } | { "method": "plugin/uninstall", id: RequestId, params: PluginUninstallParams, } | { "method": "turn/start", id: RequestId, params: TurnStartParams, } | { "method": "turn/steer", id: RequestId, params: TurnSteerParams, } | { "method": "turn/interrupt", id: RequestId, params: TurnInterruptParams, } | { "method": "review/start", id: RequestId, params: ReviewStartParams, } | { "method": "model/list", id: RequestId, params: ModelListParams, } | { "method": "experimentalFeature/list", id: RequestId, params: ExperimentalFeatureListParams, } | { "method": "experimentalFeature/enablement/set", id: RequestId, params: ExperimentalFeatureEnablementSetParams, } | { "method": "mcpServer/oauth/login", id: RequestId, params: McpServerOauthLoginParams, } | { "method": "config/mcpServer/reload", id: RequestId, params: undefined, } | { "method": "mcpServerStatus/list", id: RequestId, params: ListMcpServerStatusParams, } | { "method": "mcpServer/resource/read", id: RequestId, params: McpResourceReadParams, } | { "method": "mcpServer/tool/call", id: RequestId, params: McpServerToolCallParams, } | { "method": "windowsSandbox/setupStart", id: RequestId, params: WindowsSandboxSetupStartParams, } | { "method": "account/login/start", id: RequestId, params: LoginAccountParams, } | { "method": "account/login/cancel", id: RequestId, params: CancelLoginAccountParams, } | { "method": "account/logout", id: RequestId, params: undefined, } | { "method": "account/rateLimits/read", id: RequestId, params: undefined, } | { "method": "feedback/upload", id: RequestId, params: FeedbackUploadParams, } | { "method": "command/exec", id: RequestId, params: CommandExecParams, } | { "method": "command/exec/write", id: RequestId, params: CommandExecWriteParams, } | { "method": "command/exec/terminate", id: RequestId, params: CommandExecTerminateParams, } | { "method": "command/exec/resize", id: RequestId, params: CommandExecResizeParams, } | { "method": "config/read", id: RequestId, params: ConfigReadParams, } | { "method": "externalAgentConfig/detect", id: RequestId, params: ExternalAgentConfigDetectParams, } | { "method": "externalAgentConfig/import", id: RequestId, params: ExternalAgentConfigImportParams, } | { "method": "config/value/write", id: RequestId, params: ConfigValueWriteParams, } | { "method": "config/batchWrite", id: RequestId, params: ConfigBatchWriteParams, } | { "method": "configRequirements/read", id: RequestId, params: undefined, } | { "method": "account/read", id: RequestId, params: GetAccountParams, } | { "method": "getConversationSummary", id: RequestId, params: GetConversationSummaryParams, } | { "method": "gitDiffToRemote", id: RequestId, params: GitDiffToRemoteParams, } | { "method": "getAuthStatus", id: RequestId, params: GetAuthStatusParams, } | { "method": "fuzzyFileSearch", id: RequestId, params: FuzzyFileSearchParams, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/RealtimeOutputModality.ts b/codex-rs/app-server-protocol/schema/typescript/RealtimeOutputModality.ts new file mode 100644 index 0000000000..78e00e7143 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/RealtimeOutputModality.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type RealtimeOutputModality = "text" | "audio"; diff --git a/codex-rs/app-server-protocol/schema/typescript/ServerNotification.ts b/codex-rs/app-server-protocol/schema/typescript/ServerNotification.ts index a985914134..1db7027feb 100644 --- a/codex-rs/app-server-protocol/schema/typescript/ServerNotification.ts +++ b/codex-rs/app-server-protocol/schema/typescript/ServerNotification.ts @@ -43,7 +43,8 @@ import type { ThreadRealtimeItemAddedNotification } from "./v2/ThreadRealtimeIte import type { ThreadRealtimeOutputAudioDeltaNotification } from "./v2/ThreadRealtimeOutputAudioDeltaNotification"; import type { ThreadRealtimeSdpNotification } from "./v2/ThreadRealtimeSdpNotification"; import type { ThreadRealtimeStartedNotification } from "./v2/ThreadRealtimeStartedNotification"; -import type { ThreadRealtimeTranscriptUpdatedNotification } from "./v2/ThreadRealtimeTranscriptUpdatedNotification"; +import type { ThreadRealtimeTranscriptDeltaNotification } from "./v2/ThreadRealtimeTranscriptDeltaNotification"; +import type { ThreadRealtimeTranscriptDoneNotification } from "./v2/ThreadRealtimeTranscriptDoneNotification"; import type { ThreadStartedNotification } from "./v2/ThreadStartedNotification"; import type { ThreadStatusChangedNotification } from "./v2/ThreadStatusChangedNotification"; import type { ThreadTokenUsageUpdatedNotification } from "./v2/ThreadTokenUsageUpdatedNotification"; @@ -58,4 +59,4 @@ import type { WindowsWorldWritableWarningNotification } from "./v2/WindowsWorldW /** * Notification sent from the server to the client. */ -export type ServerNotification = { "method": "error", "params": ErrorNotification } | { "method": "thread/started", "params": ThreadStartedNotification } | { "method": "thread/status/changed", "params": ThreadStatusChangedNotification } | { "method": "thread/archived", "params": ThreadArchivedNotification } | { "method": "thread/unarchived", "params": ThreadUnarchivedNotification } | { "method": "thread/closed", "params": ThreadClosedNotification } | { "method": "skills/changed", "params": SkillsChangedNotification } | { "method": "thread/name/updated", "params": ThreadNameUpdatedNotification } | { "method": "thread/tokenUsage/updated", "params": ThreadTokenUsageUpdatedNotification } | { "method": "turn/started", "params": TurnStartedNotification } | { "method": "hook/started", "params": HookStartedNotification } | { "method": "turn/completed", "params": TurnCompletedNotification } | { "method": "hook/completed", "params": HookCompletedNotification } | { "method": "turn/diff/updated", "params": TurnDiffUpdatedNotification } | { "method": "turn/plan/updated", "params": TurnPlanUpdatedNotification } | { "method": "item/started", "params": ItemStartedNotification } | { "method": "item/autoApprovalReview/started", "params": ItemGuardianApprovalReviewStartedNotification } | { "method": "item/autoApprovalReview/completed", "params": ItemGuardianApprovalReviewCompletedNotification } | { "method": "item/completed", "params": ItemCompletedNotification } | { "method": "rawResponseItem/completed", "params": RawResponseItemCompletedNotification } | { "method": "item/agentMessage/delta", "params": AgentMessageDeltaNotification } | { "method": "item/plan/delta", "params": PlanDeltaNotification } | { "method": "command/exec/outputDelta", "params": CommandExecOutputDeltaNotification } | { "method": "item/commandExecution/outputDelta", "params": CommandExecutionOutputDeltaNotification } | { "method": "item/commandExecution/terminalInteraction", "params": TerminalInteractionNotification } | { "method": "item/fileChange/outputDelta", "params": FileChangeOutputDeltaNotification } | { "method": "serverRequest/resolved", "params": ServerRequestResolvedNotification } | { "method": "item/mcpToolCall/progress", "params": McpToolCallProgressNotification } | { "method": "mcpServer/oauthLogin/completed", "params": McpServerOauthLoginCompletedNotification } | { "method": "mcpServer/startupStatus/updated", "params": McpServerStatusUpdatedNotification } | { "method": "account/updated", "params": AccountUpdatedNotification } | { "method": "account/rateLimits/updated", "params": AccountRateLimitsUpdatedNotification } | { "method": "app/list/updated", "params": AppListUpdatedNotification } | { "method": "fs/changed", "params": FsChangedNotification } | { "method": "item/reasoning/summaryTextDelta", "params": ReasoningSummaryTextDeltaNotification } | { "method": "item/reasoning/summaryPartAdded", "params": ReasoningSummaryPartAddedNotification } | { "method": "item/reasoning/textDelta", "params": ReasoningTextDeltaNotification } | { "method": "thread/compacted", "params": ContextCompactedNotification } | { "method": "model/rerouted", "params": ModelReroutedNotification } | { "method": "deprecationNotice", "params": DeprecationNoticeNotification } | { "method": "configWarning", "params": ConfigWarningNotification } | { "method": "fuzzyFileSearch/sessionUpdated", "params": FuzzyFileSearchSessionUpdatedNotification } | { "method": "fuzzyFileSearch/sessionCompleted", "params": FuzzyFileSearchSessionCompletedNotification } | { "method": "thread/realtime/started", "params": ThreadRealtimeStartedNotification } | { "method": "thread/realtime/itemAdded", "params": ThreadRealtimeItemAddedNotification } | { "method": "thread/realtime/transcriptUpdated", "params": ThreadRealtimeTranscriptUpdatedNotification } | { "method": "thread/realtime/outputAudio/delta", "params": ThreadRealtimeOutputAudioDeltaNotification } | { "method": "thread/realtime/sdp", "params": ThreadRealtimeSdpNotification } | { "method": "thread/realtime/error", "params": ThreadRealtimeErrorNotification } | { "method": "thread/realtime/closed", "params": ThreadRealtimeClosedNotification } | { "method": "windows/worldWritableWarning", "params": WindowsWorldWritableWarningNotification } | { "method": "windowsSandbox/setupCompleted", "params": WindowsSandboxSetupCompletedNotification } | { "method": "account/login/completed", "params": AccountLoginCompletedNotification }; +export type ServerNotification = { "method": "error", "params": ErrorNotification } | { "method": "thread/started", "params": ThreadStartedNotification } | { "method": "thread/status/changed", "params": ThreadStatusChangedNotification } | { "method": "thread/archived", "params": ThreadArchivedNotification } | { "method": "thread/unarchived", "params": ThreadUnarchivedNotification } | { "method": "thread/closed", "params": ThreadClosedNotification } | { "method": "skills/changed", "params": SkillsChangedNotification } | { "method": "thread/name/updated", "params": ThreadNameUpdatedNotification } | { "method": "thread/tokenUsage/updated", "params": ThreadTokenUsageUpdatedNotification } | { "method": "turn/started", "params": TurnStartedNotification } | { "method": "hook/started", "params": HookStartedNotification } | { "method": "turn/completed", "params": TurnCompletedNotification } | { "method": "hook/completed", "params": HookCompletedNotification } | { "method": "turn/diff/updated", "params": TurnDiffUpdatedNotification } | { "method": "turn/plan/updated", "params": TurnPlanUpdatedNotification } | { "method": "item/started", "params": ItemStartedNotification } | { "method": "item/autoApprovalReview/started", "params": ItemGuardianApprovalReviewStartedNotification } | { "method": "item/autoApprovalReview/completed", "params": ItemGuardianApprovalReviewCompletedNotification } | { "method": "item/completed", "params": ItemCompletedNotification } | { "method": "rawResponseItem/completed", "params": RawResponseItemCompletedNotification } | { "method": "item/agentMessage/delta", "params": AgentMessageDeltaNotification } | { "method": "item/plan/delta", "params": PlanDeltaNotification } | { "method": "command/exec/outputDelta", "params": CommandExecOutputDeltaNotification } | { "method": "item/commandExecution/outputDelta", "params": CommandExecutionOutputDeltaNotification } | { "method": "item/commandExecution/terminalInteraction", "params": TerminalInteractionNotification } | { "method": "item/fileChange/outputDelta", "params": FileChangeOutputDeltaNotification } | { "method": "serverRequest/resolved", "params": ServerRequestResolvedNotification } | { "method": "item/mcpToolCall/progress", "params": McpToolCallProgressNotification } | { "method": "mcpServer/oauthLogin/completed", "params": McpServerOauthLoginCompletedNotification } | { "method": "mcpServer/startupStatus/updated", "params": McpServerStatusUpdatedNotification } | { "method": "account/updated", "params": AccountUpdatedNotification } | { "method": "account/rateLimits/updated", "params": AccountRateLimitsUpdatedNotification } | { "method": "app/list/updated", "params": AppListUpdatedNotification } | { "method": "fs/changed", "params": FsChangedNotification } | { "method": "item/reasoning/summaryTextDelta", "params": ReasoningSummaryTextDeltaNotification } | { "method": "item/reasoning/summaryPartAdded", "params": ReasoningSummaryPartAddedNotification } | { "method": "item/reasoning/textDelta", "params": ReasoningTextDeltaNotification } | { "method": "thread/compacted", "params": ContextCompactedNotification } | { "method": "model/rerouted", "params": ModelReroutedNotification } | { "method": "deprecationNotice", "params": DeprecationNoticeNotification } | { "method": "configWarning", "params": ConfigWarningNotification } | { "method": "fuzzyFileSearch/sessionUpdated", "params": FuzzyFileSearchSessionUpdatedNotification } | { "method": "fuzzyFileSearch/sessionCompleted", "params": FuzzyFileSearchSessionCompletedNotification } | { "method": "thread/realtime/started", "params": ThreadRealtimeStartedNotification } | { "method": "thread/realtime/itemAdded", "params": ThreadRealtimeItemAddedNotification } | { "method": "thread/realtime/transcript/delta", "params": ThreadRealtimeTranscriptDeltaNotification } | { "method": "thread/realtime/transcript/done", "params": ThreadRealtimeTranscriptDoneNotification } | { "method": "thread/realtime/outputAudio/delta", "params": ThreadRealtimeOutputAudioDeltaNotification } | { "method": "thread/realtime/sdp", "params": ThreadRealtimeSdpNotification } | { "method": "thread/realtime/error", "params": ThreadRealtimeErrorNotification } | { "method": "thread/realtime/closed", "params": ThreadRealtimeClosedNotification } | { "method": "windows/worldWritableWarning", "params": WindowsWorldWritableWarningNotification } | { "method": "windowsSandbox/setupCompleted", "params": WindowsSandboxSetupCompletedNotification } | { "method": "account/login/completed", "params": AccountLoginCompletedNotification }; diff --git a/codex-rs/app-server-protocol/schema/typescript/ThreadMemoryMode.ts b/codex-rs/app-server-protocol/schema/typescript/ThreadMemoryMode.ts new file mode 100644 index 0000000000..74a7e759e7 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/ThreadMemoryMode.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type ThreadMemoryMode = "enabled" | "disabled"; diff --git a/codex-rs/app-server-protocol/schema/typescript/index.ts b/codex-rs/app-server-protocol/schema/typescript/index.ts index 2a35207896..7bbb417fdc 100644 --- a/codex-rs/app-server-protocol/schema/typescript/index.ts +++ b/codex-rs/app-server-protocol/schema/typescript/index.ts @@ -49,6 +49,7 @@ export type { ParsedCommand } from "./ParsedCommand"; export type { Personality } from "./Personality"; export type { PlanType } from "./PlanType"; export type { RealtimeConversationVersion } from "./RealtimeConversationVersion"; +export type { RealtimeOutputModality } from "./RealtimeOutputModality"; export type { RealtimeVoice } from "./RealtimeVoice"; export type { RealtimeVoicesList } from "./RealtimeVoicesList"; export type { ReasoningEffort } from "./ReasoningEffort"; @@ -68,6 +69,7 @@ export type { SessionSource } from "./SessionSource"; export type { Settings } from "./Settings"; export type { SubAgentSource } from "./SubAgentSource"; export type { ThreadId } from "./ThreadId"; +export type { ThreadMemoryMode } from "./ThreadMemoryMode"; export type { Tool } from "./Tool"; export type { Verbosity } from "./Verbosity"; export type { WebSearchAction } from "./WebSearchAction"; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/CommandAction.ts b/codex-rs/app-server-protocol/schema/typescript/v2/CommandAction.ts index ac1314c89b..a17fb06a0c 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/CommandAction.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/CommandAction.ts @@ -1,5 +1,6 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; -export type CommandAction = { "type": "read", command: string, name: string, path: string, } | { "type": "listFiles", command: string, path: string | null, } | { "type": "search", command: string, query: string | null, path: string | null, } | { "type": "unknown", command: string, }; +export type CommandAction = { "type": "read", command: string, name: string, path: AbsolutePathBuf, } | { "type": "listFiles", command: string, path: string | null, } | { "type": "search", command: string, query: string | null, path: string | null, } | { "type": "unknown", command: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts index e1330e2591..59da1de945 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/CommandExecutionRequestApprovalParams.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { AdditionalPermissionProfile } from "./AdditionalPermissionProfile"; import type { CommandAction } from "./CommandAction"; import type { CommandExecutionApprovalDecision } from "./CommandExecutionApprovalDecision"; @@ -34,7 +35,7 @@ command?: string | null, /** * The command's working directory. */ -cwd?: string | null, +cwd?: AbsolutePathBuf | null, /** * Best-effort parsed command actions for friendly display. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItem.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItem.ts index 45e5585a77..c9921ccbc6 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItem.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItem.ts @@ -2,9 +2,10 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. import type { ExternalAgentConfigMigrationItemType } from "./ExternalAgentConfigMigrationItemType"; +import type { MigrationDetails } from "./MigrationDetails"; export type ExternalAgentConfigMigrationItem = { itemType: ExternalAgentConfigMigrationItemType, description: string, /** * Null or empty means home-scoped migration; non-empty means repo-scoped migration. */ -cwd: string | null, }; +cwd: string | null, details: MigrationDetails | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItemType.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItemType.ts index c9bd160b1c..dedc124f04 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItemType.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ExternalAgentConfigMigrationItemType.ts @@ -2,4 +2,4 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -export type ExternalAgentConfigMigrationItemType = "AGENTS_MD" | "CONFIG" | "SKILLS" | "MCP_SERVER_CONFIG"; +export type ExternalAgentConfigMigrationItemType = "AGENTS_MD" | "CONFIG" | "SKILLS" | "PLUGINS" | "MCP_SERVER_CONFIG"; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/FsGetMetadataResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/FsGetMetadataResponse.ts index 14b4db7e3f..a1a127e192 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/FsGetMetadataResponse.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/FsGetMetadataResponse.ts @@ -7,13 +7,17 @@ */ export type FsGetMetadataResponse = { /** - * Whether the path currently resolves to a directory. + * Whether the path resolves to a directory. */ isDirectory: boolean, /** - * Whether the path currently resolves to a regular file. + * Whether the path resolves to a regular file. */ isFile: boolean, +/** + * Whether the path itself is a symbolic link. + */ +isSymlink: boolean, /** * File creation time in Unix milliseconds when available, otherwise `0`. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/GuardianApprovalReviewAction.ts b/codex-rs/app-server-protocol/schema/typescript/v2/GuardianApprovalReviewAction.ts index 101fe3f3ef..4bbfe24190 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/GuardianApprovalReviewAction.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/GuardianApprovalReviewAction.ts @@ -1,7 +1,8 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { GuardianCommandSource } from "./GuardianCommandSource"; import type { NetworkApprovalProtocol } from "./NetworkApprovalProtocol"; -export type GuardianApprovalReviewAction = { "type": "command", source: GuardianCommandSource, command: string, cwd: string, } | { "type": "execve", source: GuardianCommandSource, program: string, argv: Array, cwd: string, } | { "type": "applyPatch", cwd: string, files: Array, } | { "type": "networkAccess", target: string, host: string, protocol: NetworkApprovalProtocol, port: number, } | { "type": "mcpToolCall", server: string, toolName: string, connectorId: string | null, connectorName: string | null, toolTitle: string | null, }; +export type GuardianApprovalReviewAction = { "type": "command", source: GuardianCommandSource, command: string, cwd: AbsolutePathBuf, } | { "type": "execve", source: GuardianCommandSource, program: string, argv: Array, cwd: AbsolutePathBuf, } | { "type": "applyPatch", cwd: AbsolutePathBuf, files: Array, } | { "type": "networkAccess", target: string, host: string, protocol: NetworkApprovalProtocol, port: number, } | { "type": "mcpToolCall", server: string, toolName: string, connectorId: string | null, connectorName: string | null, toolTitle: string | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/HookRunSummary.ts b/codex-rs/app-server-protocol/schema/typescript/v2/HookRunSummary.ts index 68fb4e10af..75ab780b93 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/HookRunSummary.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/HookRunSummary.ts @@ -1,11 +1,13 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { HookEventName } from "./HookEventName"; import type { HookExecutionMode } from "./HookExecutionMode"; import type { HookHandlerType } from "./HookHandlerType"; import type { HookOutputEntry } from "./HookOutputEntry"; import type { HookRunStatus } from "./HookRunStatus"; import type { HookScope } from "./HookScope"; +import type { HookSource } from "./HookSource"; -export type HookRunSummary = { id: string, eventName: HookEventName, handlerType: HookHandlerType, executionMode: HookExecutionMode, scope: HookScope, sourcePath: string, displayOrder: bigint, status: HookRunStatus, statusMessage: string | null, startedAt: bigint, completedAt: bigint | null, durationMs: bigint | null, entries: Array, }; +export type HookRunSummary = { id: string, eventName: HookEventName, handlerType: HookHandlerType, executionMode: HookExecutionMode, scope: HookScope, sourcePath: AbsolutePathBuf, source: HookSource, displayOrder: bigint, status: HookRunStatus, statusMessage: string | null, startedAt: bigint, completedAt: bigint | null, durationMs: bigint | null, entries: Array, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/HookSource.ts b/codex-rs/app-server-protocol/schema/typescript/v2/HookSource.ts new file mode 100644 index 0000000000..7edf61f918 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/HookSource.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type HookSource = "system" | "user" | "project" | "mdm" | "sessionFlags" | "legacyManagedConfigFile" | "legacyManagedConfigMdm" | "unknown"; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/MarketplaceAddParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/MarketplaceAddParams.ts new file mode 100644 index 0000000000..23d1604812 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/MarketplaceAddParams.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type MarketplaceAddParams = { source: string, refName?: string | null, sparsePaths?: Array | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/MarketplaceAddResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/MarketplaceAddResponse.ts new file mode 100644 index 0000000000..8657d44c3d --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/MarketplaceAddResponse.ts @@ -0,0 +1,6 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; + +export type MarketplaceAddResponse = { marketplaceName: string, installedRoot: AbsolutePathBuf, alreadyAdded: boolean, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/MigrationDetails.ts b/codex-rs/app-server-protocol/schema/typescript/v2/MigrationDetails.ts new file mode 100644 index 0000000000..9305335d9c --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/MigrationDetails.ts @@ -0,0 +1,6 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { PluginsMigration } from "./PluginsMigration"; + +export type MigrationDetails = { plugins: Array, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts b/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts index d1cd1ab298..04e07ef1de 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts @@ -29,4 +29,4 @@ unixSockets: { [key in string]?: NetworkUnixSocketPermission } | null, /** * Legacy compatibility view derived from `unix_sockets`. */ -allowUnixSockets: Array | null, allowLocalBinding: boolean | null, dangerFullAccessDenylistOnly: boolean | null, }; +allowUnixSockets: Array | null, allowLocalBinding: boolean | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/PluginsMigration.ts b/codex-rs/app-server-protocol/schema/typescript/v2/PluginsMigration.ts new file mode 100644 index 0000000000..0dce06d9ab --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/PluginsMigration.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type PluginsMigration = { marketplaceName: string, pluginNames: Array, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/SkillInterface.ts b/codex-rs/app-server-protocol/schema/typescript/v2/SkillInterface.ts index 86c37a0bd7..2361afcf0f 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/SkillInterface.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/SkillInterface.ts @@ -1,5 +1,6 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; -export type SkillInterface = { displayName?: string, shortDescription?: string, iconSmall?: string, iconLarge?: string, brandColor?: string, defaultPrompt?: string, }; +export type SkillInterface = { displayName?: string, shortDescription?: string, iconSmall?: AbsolutePathBuf, iconLarge?: AbsolutePathBuf, brandColor?: string, defaultPrompt?: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/SkillMetadata.ts b/codex-rs/app-server-protocol/schema/typescript/v2/SkillMetadata.ts index b620fffbdb..e43484d1f4 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/SkillMetadata.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/SkillMetadata.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { SkillDependencies } from "./SkillDependencies"; import type { SkillInterface } from "./SkillInterface"; import type { SkillScope } from "./SkillScope"; @@ -9,4 +10,4 @@ export type SkillMetadata = { name: string, description: string, /** * Legacy short_description from SKILL.md. Prefer SKILL.json interface.short_description. */ -shortDescription?: string, interface?: SkillInterface, dependencies?: SkillDependencies, path: string, scope: SkillScope, enabled: boolean, }; +shortDescription?: string, interface?: SkillInterface, dependencies?: SkillDependencies, path: AbsolutePathBuf, scope: SkillScope, enabled: boolean, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/SkillSummary.ts b/codex-rs/app-server-protocol/schema/typescript/v2/SkillSummary.ts index ea37393536..05aa4031a8 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/SkillSummary.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/SkillSummary.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { SkillInterface } from "./SkillInterface"; -export type SkillSummary = { name: string, description: string, shortDescription: string | null, interface: SkillInterface | null, path: string, enabled: boolean, }; +export type SkillSummary = { name: string, description: string, shortDescription: string | null, interface: SkillInterface | null, path: AbsolutePathBuf, enabled: boolean, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/Thread.ts b/codex-rs/app-server-protocol/schema/typescript/v2/Thread.ts index 57ef3c1075..8c4c9394bf 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/Thread.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/Thread.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { GitInfo } from "./GitInfo"; import type { SessionSource } from "./SessionSource"; import type { ThreadStatus } from "./ThreadStatus"; @@ -42,7 +43,7 @@ path: string | null, /** * Working directory captured for the thread. */ -cwd: string, +cwd: AbsolutePathBuf, /** * Version of the CLI that created the thread. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkResponse.ts index df6c50227d..470e98c9b8 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkResponse.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkResponse.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { ReasoningEffort } from "../ReasoningEffort"; import type { ServiceTier } from "../ServiceTier"; import type { ApprovalsReviewer } from "./ApprovalsReviewer"; @@ -8,11 +9,11 @@ import type { AskForApproval } from "./AskForApproval"; import type { SandboxPolicy } from "./SandboxPolicy"; import type { Thread } from "./Thread"; -export type ThreadForkResponse = { thread: Thread, model: string, modelProvider: string, serviceTier: ServiceTier | null, cwd: string, +export type ThreadForkResponse = { thread: Thread, model: string, modelProvider: string, serviceTier: ServiceTier | null, cwd: AbsolutePathBuf, /** * Instruction source files currently loaded for this thread. */ -instructionSources: Array, approvalPolicy: AskForApproval, +instructionSources: Array, approvalPolicy: AskForApproval, /** * Reviewer currently used for approval requests on this thread. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadInjectItemsParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadInjectItemsParams.ts new file mode 100644 index 0000000000..4a49224a39 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadInjectItemsParams.ts @@ -0,0 +1,10 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { JsonValue } from "../serde_json/JsonValue"; + +export type ThreadInjectItemsParams = { threadId: string, +/** + * Raw Responses API items to append to the thread's model-visible history. + */ +items: Array, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadInjectItemsResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadInjectItemsResponse.ts new file mode 100644 index 0000000000..60dcf0d0b3 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadInjectItemsResponse.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type ThreadInjectItemsResponse = Record; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadItem.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadItem.ts index 54d3eaaa8b..747a70c54d 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadItem.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadItem.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { MessagePhase } from "../MessagePhase"; import type { ReasoningEffort } from "../ReasoningEffort"; import type { JsonValue } from "../serde_json/JsonValue"; @@ -30,7 +31,7 @@ command: string, /** * The command's working directory. */ -cwd: string, +cwd: AbsolutePathBuf, /** * Identifier for the underlying PTY process (when available). */ @@ -52,7 +53,7 @@ exitCode: number | null, /** * The duration of the command execution in milliseconds. */ -durationMs: number | null, } | { "type": "fileChange", id: string, changes: Array, status: PatchApplyStatus, } | { "type": "mcpToolCall", id: string, server: string, tool: string, status: McpToolCallStatus, arguments: JsonValue, result: McpToolCallResult | null, error: McpToolCallError | null, +durationMs: number | null, } | { "type": "fileChange", id: string, changes: Array, status: PatchApplyStatus, } | { "type": "mcpToolCall", id: string, server: string, tool: string, status: McpToolCallStatus, arguments: JsonValue, mcpAppResourceUri?: string, result: McpToolCallResult | null, error: McpToolCallError | null, /** * The duration of the MCP tool call in milliseconds. */ @@ -97,4 +98,4 @@ reasoningEffort: ReasoningEffort | null, /** * Last known status of the target agents, when available. */ -agentsStates: { [key in string]?: CollabAgentState }, } | { "type": "webSearch", id: string, query: string, action: WebSearchAction | null, } | { "type": "imageView", id: string, path: string, } | { "type": "imageGeneration", id: string, status: string, revisedPrompt: string | null, result: string, savedPath?: string, } | { "type": "enteredReviewMode", id: string, review: string, } | { "type": "exitedReviewMode", id: string, review: string, } | { "type": "contextCompaction", id: string, }; +agentsStates: { [key in string]?: CollabAgentState }, } | { "type": "webSearch", id: string, query: string, action: WebSearchAction | null, } | { "type": "imageView", id: string, path: AbsolutePathBuf, } | { "type": "imageGeneration", id: string, status: string, revisedPrompt: string | null, result: string, savedPath?: AbsolutePathBuf, } | { "type": "enteredReviewMode", id: string, review: string, } | { "type": "exitedReviewMode", id: string, review: string, } | { "type": "contextCompaction", id: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptUpdatedNotification.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptDeltaNotification.ts similarity index 60% rename from codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptUpdatedNotification.ts rename to codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptDeltaNotification.ts index d2940029f2..805eeddd76 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptUpdatedNotification.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptDeltaNotification.ts @@ -6,4 +6,8 @@ * EXPERIMENTAL - flat transcript delta emitted whenever realtime * transcript text changes. */ -export type ThreadRealtimeTranscriptUpdatedNotification = { threadId: string, role: string, text: string, }; +export type ThreadRealtimeTranscriptDeltaNotification = { threadId: string, role: string, +/** + * Live transcript delta from the realtime event. + */ +delta: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptDoneNotification.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptDoneNotification.ts new file mode 100644 index 0000000000..d4667ad039 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadRealtimeTranscriptDoneNotification.ts @@ -0,0 +1,13 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +/** + * EXPERIMENTAL - final transcript text emitted when realtime completes + * a transcript part. + */ +export type ThreadRealtimeTranscriptDoneNotification = { threadId: string, role: string, +/** + * Final complete text for the transcript part. + */ +text: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeResponse.ts index 3234e8b4b3..177add8350 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeResponse.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeResponse.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { ReasoningEffort } from "../ReasoningEffort"; import type { ServiceTier } from "../ServiceTier"; import type { ApprovalsReviewer } from "./ApprovalsReviewer"; @@ -8,11 +9,11 @@ import type { AskForApproval } from "./AskForApproval"; import type { SandboxPolicy } from "./SandboxPolicy"; import type { Thread } from "./Thread"; -export type ThreadResumeResponse = { thread: Thread, model: string, modelProvider: string, serviceTier: ServiceTier | null, cwd: string, +export type ThreadResumeResponse = { thread: Thread, model: string, modelProvider: string, serviceTier: ServiceTier | null, cwd: AbsolutePathBuf, /** * Instruction source files currently loaded for this thread. */ -instructionSources: Array, approvalPolicy: AskForApproval, +instructionSources: Array, approvalPolicy: AskForApproval, /** * Reviewer currently used for approval requests on this thread. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartResponse.ts index e3355b9108..fd84a41ae8 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartResponse.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartResponse.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AbsolutePathBuf } from "../AbsolutePathBuf"; import type { ReasoningEffort } from "../ReasoningEffort"; import type { ServiceTier } from "../ServiceTier"; import type { ApprovalsReviewer } from "./ApprovalsReviewer"; @@ -8,11 +9,11 @@ import type { AskForApproval } from "./AskForApproval"; import type { SandboxPolicy } from "./SandboxPolicy"; import type { Thread } from "./Thread"; -export type ThreadStartResponse = { thread: Thread, model: string, modelProvider: string, serviceTier: ServiceTier | null, cwd: string, +export type ThreadStartResponse = { thread: Thread, model: string, modelProvider: string, serviceTier: ServiceTier | null, cwd: AbsolutePathBuf, /** * Instruction source files currently loaded for this thread. */ -instructionSources: Array, approvalPolicy: AskForApproval, +instructionSources: Array, approvalPolicy: AskForApproval, /** * Reviewer currently used for approval requests on this thread. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/index.ts b/codex-rs/app-server-protocol/schema/typescript/v2/index.ts index f815fee3e9..69cec75e77 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/index.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/index.ts @@ -139,6 +139,7 @@ export type { HookPromptFragment } from "./HookPromptFragment"; export type { HookRunStatus } from "./HookRunStatus"; export type { HookRunSummary } from "./HookRunSummary"; export type { HookScope } from "./HookScope"; +export type { HookSource } from "./HookSource"; export type { HookStartedNotification } from "./HookStartedNotification"; export type { ItemCompletedNotification } from "./ItemCompletedNotification"; export type { ItemGuardianApprovalReviewCompletedNotification } from "./ItemGuardianApprovalReviewCompletedNotification"; @@ -149,6 +150,8 @@ export type { ListMcpServerStatusResponse } from "./ListMcpServerStatusResponse" export type { LoginAccountParams } from "./LoginAccountParams"; export type { LoginAccountResponse } from "./LoginAccountResponse"; export type { LogoutAccountResponse } from "./LogoutAccountResponse"; +export type { MarketplaceAddParams } from "./MarketplaceAddParams"; +export type { MarketplaceAddResponse } from "./MarketplaceAddResponse"; export type { MarketplaceInterface } from "./MarketplaceInterface"; export type { MarketplaceLoadErrorInfo } from "./MarketplaceLoadErrorInfo"; export type { McpAuthStatus } from "./McpAuthStatus"; @@ -196,6 +199,7 @@ export type { McpToolCallStatus } from "./McpToolCallStatus"; export type { MemoryCitation } from "./MemoryCitation"; export type { MemoryCitationEntry } from "./MemoryCitationEntry"; export type { MergeStrategy } from "./MergeStrategy"; +export type { MigrationDetails } from "./MigrationDetails"; export type { Model } from "./Model"; export type { ModelAvailabilityNux } from "./ModelAvailabilityNux"; export type { ModelListParams } from "./ModelListParams"; @@ -234,6 +238,7 @@ export type { PluginSource } from "./PluginSource"; export type { PluginSummary } from "./PluginSummary"; export type { PluginUninstallParams } from "./PluginUninstallParams"; export type { PluginUninstallResponse } from "./PluginUninstallResponse"; +export type { PluginsMigration } from "./PluginsMigration"; export type { ProfileV2 } from "./ProfileV2"; export type { RateLimitSnapshot } from "./RateLimitSnapshot"; export type { RateLimitWindow } from "./RateLimitWindow"; @@ -282,6 +287,8 @@ export type { ThreadCompactStartParams } from "./ThreadCompactStartParams"; export type { ThreadCompactStartResponse } from "./ThreadCompactStartResponse"; export type { ThreadForkParams } from "./ThreadForkParams"; export type { ThreadForkResponse } from "./ThreadForkResponse"; +export type { ThreadInjectItemsParams } from "./ThreadInjectItemsParams"; +export type { ThreadInjectItemsResponse } from "./ThreadInjectItemsResponse"; export type { ThreadItem } from "./ThreadItem"; export type { ThreadListParams } from "./ThreadListParams"; export type { ThreadListResponse } from "./ThreadListResponse"; @@ -301,7 +308,8 @@ export type { ThreadRealtimeOutputAudioDeltaNotification } from "./ThreadRealtim export type { ThreadRealtimeSdpNotification } from "./ThreadRealtimeSdpNotification"; export type { ThreadRealtimeStartTransport } from "./ThreadRealtimeStartTransport"; export type { ThreadRealtimeStartedNotification } from "./ThreadRealtimeStartedNotification"; -export type { ThreadRealtimeTranscriptUpdatedNotification } from "./ThreadRealtimeTranscriptUpdatedNotification"; +export type { ThreadRealtimeTranscriptDeltaNotification } from "./ThreadRealtimeTranscriptDeltaNotification"; +export type { ThreadRealtimeTranscriptDoneNotification } from "./ThreadRealtimeTranscriptDoneNotification"; export type { ThreadResumeParams } from "./ThreadResumeParams"; export type { ThreadResumeResponse } from "./ThreadResumeResponse"; export type { ThreadRollbackParams } from "./ThreadRollbackParams"; diff --git a/codex-rs/app-server-protocol/src/lib.rs b/codex-rs/app-server-protocol/src/lib.rs index d5c2f4b243..46f0c9ae41 100644 --- a/codex-rs/app-server-protocol/src/lib.rs +++ b/codex-rs/app-server-protocol/src/lib.rs @@ -4,7 +4,6 @@ mod jsonrpc_lite; mod protocol; mod schema_fixtures; -pub use codex_git_utils::GitSha; pub use experimental_api::*; pub use export::GenerateTsOptions; pub use export::generate_internal_json_schema; @@ -30,6 +29,7 @@ pub use protocol::v1::GetConversationSummaryParams; pub use protocol::v1::GetConversationSummaryResponse; pub use protocol::v1::GitDiffToRemoteParams; pub use protocol::v1::GitDiffToRemoteResponse; +pub use protocol::v1::GitSha; pub use protocol::v1::InitializeCapabilities; pub use protocol::v1::InitializeParams; pub use protocol::v1::InitializeResponse; diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index 3450e41534..7f555e0688 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -284,6 +284,16 @@ client_request_definitions! { params: v2::ThreadMetadataUpdateParams, response: v2::ThreadMetadataUpdateResponse, }, + #[experimental("thread/memoryMode/set")] + ThreadMemoryModeSet => "thread/memoryMode/set" { + params: v2::ThreadMemoryModeSetParams, + response: v2::ThreadMemoryModeSetResponse, + }, + #[experimental("memory/reset")] + MemoryReset => "memory/reset" { + params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>, + response: v2::MemoryResetResponse, + }, ThreadUnarchive => "thread/unarchive" { params: v2::ThreadUnarchiveParams, response: v2::ThreadUnarchiveResponse, @@ -317,10 +327,19 @@ client_request_definitions! { params: v2::ThreadReadParams, response: v2::ThreadReadResponse, }, + /// Append raw Responses API items to the thread history without starting a user turn. + ThreadInjectItems => "thread/inject_items" { + params: v2::ThreadInjectItemsParams, + response: v2::ThreadInjectItemsResponse, + }, SkillsList => "skills/list" { params: v2::SkillsListParams, response: v2::SkillsListResponse, }, + MarketplaceAdd => "marketplace/add" { + params: v2::MarketplaceAddParams, + response: v2::MarketplaceAddResponse, + }, PluginList => "plugin/list" { params: v2::PluginListParams, response: v2::PluginListResponse, @@ -745,6 +764,7 @@ macro_rules! server_notification_definitions { Display, ExperimentalApi, )] + #[allow(clippy::large_enum_variant)] #[serde(tag = "method", content = "params", rename_all = "camelCase")] #[strum(serialize_all = "camelCase")] pub enum ServerNotification { @@ -1012,8 +1032,10 @@ server_notification_definitions! { ThreadRealtimeStarted => "thread/realtime/started" (v2::ThreadRealtimeStartedNotification), #[experimental("thread/realtime/itemAdded")] ThreadRealtimeItemAdded => "thread/realtime/itemAdded" (v2::ThreadRealtimeItemAddedNotification), - #[experimental("thread/realtime/transcriptUpdated")] - ThreadRealtimeTranscriptUpdated => "thread/realtime/transcriptUpdated" (v2::ThreadRealtimeTranscriptUpdatedNotification), + #[experimental("thread/realtime/transcript/delta")] + ThreadRealtimeTranscriptDelta => "thread/realtime/transcript/delta" (v2::ThreadRealtimeTranscriptDeltaNotification), + #[experimental("thread/realtime/transcript/done")] + ThreadRealtimeTranscriptDone => "thread/realtime/transcript/done" (v2::ThreadRealtimeTranscriptDoneNotification), #[experimental("thread/realtime/outputAudio/delta")] ThreadRealtimeOutputAudioDelta => "thread/realtime/outputAudio/delta" (v2::ThreadRealtimeOutputAudioDeltaNotification), #[experimental("thread/realtime/sdp")] @@ -1046,22 +1068,23 @@ mod tests { use codex_protocol::account::PlanType; use codex_protocol::parse_command::ParsedCommand; use codex_protocol::protocol::RealtimeConversationVersion; + use codex_protocol::protocol::RealtimeOutputModality; + use codex_protocol::protocol::RealtimeVoice; use codex_utils_absolute_path::AbsolutePathBuf; + use codex_utils_absolute_path::test_support::PathBufExt; + use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use serde_json::json; use std::path::PathBuf; fn absolute_path_string(path: &str) -> String { - let trimmed = path.trim_start_matches('/'); - if cfg!(windows) { - format!(r"C:\{}", trimmed.replace('/', "\\")) - } else { - format!("/{trimmed}") - } + let path = format!("/{}", path.trim_start_matches('/')); + test_path_buf(&path).display().to_string() } fn absolute_path(path: &str) -> AbsolutePathBuf { - AbsolutePathBuf::from_absolute_path(absolute_path_string(path)).expect("absolute path") + let path = format!("/{}", path.trim_start_matches('/')); + test_path_buf(&path).abs() } #[test] @@ -1392,7 +1415,7 @@ mod tests { updated_at: 2, status: v2::ThreadStatus::Idle, path: None, - cwd: PathBuf::from("/tmp"), + cwd: absolute_path("/tmp"), cli_version: "0.0.0".to_string(), source: v2::SessionSource::Exec, agent_nickname: None, @@ -1404,8 +1427,8 @@ mod tests { model: "gpt-5".to_string(), model_provider: "openai".to_string(), service_tier: None, - cwd: PathBuf::from("/tmp"), - instruction_sources: vec![PathBuf::from("/tmp/AGENTS.md")], + cwd: absolute_path("/tmp"), + instruction_sources: vec![absolute_path("/tmp/AGENTS.md")], approval_policy: v2::AskForApproval::OnFailure, approvals_reviewer: v2::ApprovalsReviewer::User, sandbox: v2::SandboxPolicy::DangerFullAccess, @@ -1432,7 +1455,7 @@ mod tests { "type": "idle" }, "path": null, - "cwd": "/tmp", + "cwd": absolute_path_string("tmp"), "cliVersion": "0.0.0", "source": "exec", "agentNickname": null, @@ -1444,8 +1467,8 @@ mod tests { "model": "gpt-5", "modelProvider": "openai", "serviceTier": null, - "cwd": "/tmp", - "instructionSources": ["/tmp/AGENTS.md"], + "cwd": absolute_path_string("tmp"), + "instructionSources": [absolute_path_string("tmp/AGENTS.md")], "approvalPolicy": "on-failure", "approvalsReviewer": "user", "sandbox": { @@ -1774,10 +1797,11 @@ mod tests { request_id: RequestId::Integer(9), params: v2::ThreadRealtimeStartParams { thread_id: "thr_123".to_string(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("You are on a call".to_string())), session_id: Some("sess_456".to_string()), transport: None, - voice: Some(codex_protocol::protocol::RealtimeVoice::Marin), + voice: Some(RealtimeVoice::Marin), }, }; assert_eq!( @@ -1786,6 +1810,7 @@ mod tests { "id": 9, "params": { "threadId": "thr_123", + "outputModality": "audio", "prompt": "You are on a call", "sessionId": "sess_456", "transport": null, @@ -1803,6 +1828,7 @@ mod tests { request_id: RequestId::Integer(9), params: v2::ThreadRealtimeStartParams { thread_id: "thr_123".to_string(), + output_modality: RealtimeOutputModality::Audio, prompt: None, session_id: None, transport: None, @@ -1815,6 +1841,7 @@ mod tests { "id": 9, "params": { "threadId": "thr_123", + "outputModality": "audio", "sessionId": null, "transport": null, "voice": null @@ -1827,6 +1854,7 @@ mod tests { request_id: RequestId::Integer(9), params: v2::ThreadRealtimeStartParams { thread_id: "thr_123".to_string(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(None), session_id: None, transport: None, @@ -1839,6 +1867,7 @@ mod tests { "id": 9, "params": { "threadId": "thr_123", + "outputModality": "audio", "prompt": null, "sessionId": null, "transport": null, @@ -1853,6 +1882,7 @@ mod tests { "id": 9, "params": { "threadId": "thr_123", + "outputModality": "audio", "sessionId": null, "transport": null, "voice": null @@ -1868,6 +1898,7 @@ mod tests { "id": 9, "params": { "threadId": "thr_123", + "outputModality": "audio", "prompt": null, "sessionId": null, "transport": null, @@ -1952,6 +1983,7 @@ mod tests { request_id: RequestId::Integer(1), params: v2::ThreadRealtimeStartParams { thread_id: "thr_123".to_string(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("You are on a call".to_string())), session_id: None, transport: None, diff --git a/codex-rs/app-server-protocol/src/protocol/item_builders.rs b/codex-rs/app-server-protocol/src/protocol/item_builders.rs index e6d9588f1c..f69c414b02 100644 --- a/codex-rs/app-server-protocol/src/protocol/item_builders.rs +++ b/codex-rs/app-server-protocol/src/protocol/item_builders.rs @@ -78,7 +78,7 @@ pub fn build_command_execution_approval_request_item( .parsed_cmd .iter() .cloned() - .map(CommandAction::from) + .map(|parsed| CommandAction::from_core_with_cwd(parsed, &payload.cwd)) .collect(), aggregated_output: None, exit_code: None, @@ -98,7 +98,7 @@ pub fn build_command_execution_begin_item(payload: &ExecCommandBeginEvent) -> Th .parsed_cmd .iter() .cloned() - .map(CommandAction::from) + .map(|parsed| CommandAction::from_core_with_cwd(parsed, &payload.cwd)) .collect(), aggregated_output: None, exit_code: None, @@ -125,7 +125,7 @@ pub fn build_command_execution_end_item(payload: &ExecCommandEndEvent) -> Thread .parsed_cmd .iter() .cloned() - .map(CommandAction::from) + .map(|parsed| CommandAction::from_core_with_cwd(parsed, &payload.cwd)) .collect(), aggregated_output, exit_code: Some(payload.exit_code), @@ -179,7 +179,10 @@ pub fn build_item_from_guardian_event( command: command.clone(), }] } else { - parsed_cmd.into_iter().map(CommandAction::from).collect() + parsed_cmd + .into_iter() + .map(|parsed| CommandAction::from_core_with_cwd(parsed, cwd)) + .collect() }; Some(ThreadItem::CommandExecution { id: id.clone(), diff --git a/codex-rs/app-server-protocol/src/protocol/thread_history.rs b/codex-rs/app-server-protocol/src/protocol/thread_history.rs index d2296c75c5..5c327187e9 100644 --- a/codex-rs/app-server-protocol/src/protocol/thread_history.rs +++ b/codex-rs/app-server-protocol/src/protocol/thread_history.rs @@ -123,6 +123,20 @@ impl ThreadHistoryBuilder { .or_else(|| self.turns.last().cloned()) } + /// Returns the index of the active turn snapshot within the finished turn list. + /// + /// When a turn is still open, this is the index it will occupy after + /// `finish`. When no turn is open, it is the index of the last finished turn. + pub fn active_turn_position(&self) -> Option { + if self.current_turn.is_some() { + Some(self.turns.len()) + } else if self.turns.is_empty() { + None + } else { + Some(self.turns.len() - 1) + } + } + pub fn has_active_turn(&self) -> bool { self.current_turn.is_some() } @@ -502,6 +516,7 @@ impl ThreadHistoryBuilder { .arguments .clone() .unwrap_or(serde_json::Value::Null), + mcp_app_resource_uri: payload.mcp_app_resource_uri.clone(), result: None, error: None, duration_ms: None, @@ -518,11 +533,11 @@ impl ThreadHistoryBuilder { let duration_ms = i64::try_from(payload.duration.as_millis()).ok(); let (result, error) = match &payload.result { Ok(value) => ( - Some(McpToolCallResult { + Some(Box::new(McpToolCallResult { content: value.content.clone(), structured_content: value.structured_content.clone(), meta: value.meta.clone(), - }), + })), None, ), Err(message) => ( @@ -542,6 +557,7 @@ impl ThreadHistoryBuilder { .arguments .clone() .unwrap_or(serde_json::Value::Null), + mcp_app_resource_uri: payload.mcp_app_resource_uri.clone(), result, error, duration_ms, @@ -552,7 +568,7 @@ impl ThreadHistoryBuilder { fn handle_view_image_tool_call(&mut self, payload: &ViewImageToolCallEvent) { let item = ThreadItem::ImageView { id: payload.call_id.clone(), - path: payload.path.to_string_lossy().into_owned(), + path: payload.path.clone(), }; self.upsert_item_in_current_turn(item); } @@ -1193,6 +1209,8 @@ mod tests { use codex_protocol::protocol::TurnStartedEvent; use codex_protocol::protocol::UserMessageEvent; use codex_protocol::protocol::WebSearchEndEvent; + use codex_utils_absolute_path::test_support::PathBufExt; + use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use std::path::PathBuf; use std::time::Duration; @@ -1397,7 +1415,7 @@ mod tests { status: "completed".into(), revised_prompt: Some("final prompt".into()), result: "Zm9v".into(), - saved_path: Some("/tmp/ig_123.png".into()), + saved_path: Some(test_path_buf("/tmp/ig_123.png").abs()), })), RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-image".into(), @@ -1431,7 +1449,7 @@ mod tests { status: "completed".into(), revised_prompt: Some("final prompt".into()), result: "Zm9v".into(), - saved_path: Some("/tmp/ig_123.png".into()), + saved_path: Some(test_path_buf("/tmp/ig_123.png").abs()), }, ], } @@ -1786,7 +1804,7 @@ mod tests { process_id: Some("pid-1".into()), turn_id: "turn-1".into(), command: vec!["echo".into(), "hello world".into()], - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), parsed_cmd: vec![ParsedCommand::Unknown { cmd: "echo hello world".into(), }], @@ -1807,6 +1825,7 @@ mod tests { tool: "lookup".into(), arguments: Some(serde_json::json!({"id":"123"})), }, + mcp_app_resource_uri: None, duration: Duration::from_millis(8), result: Err("boom".into()), }), @@ -1835,7 +1854,7 @@ mod tests { ThreadItem::CommandExecution { id: "exec-1".into(), command: "echo 'hello world'".into(), - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), process_id: Some("pid-1".into()), source: CommandExecutionSource::Agent, status: CommandExecutionStatus::Completed, @@ -1855,6 +1874,7 @@ mod tests { tool: "lookup".into(), status: McpToolCallStatus::Failed, arguments: serde_json::json!({"id":"123"}), + mcp_app_resource_uri: None, result: None, error: Some(McpToolCallError { message: "boom".into(), @@ -1880,6 +1900,7 @@ mod tests { tool: "lookup".into(), arguments: Some(serde_json::json!({"id":"123"})), }, + mcp_app_resource_uri: Some("ui://widget/lookup.html".into()), duration: Duration::from_millis(8), result: Ok(CallToolResult { content: vec![serde_json::json!({ @@ -1909,7 +1930,8 @@ mod tests { tool: "lookup".into(), status: McpToolCallStatus::Completed, arguments: serde_json::json!({"id":"123"}), - result: Some(McpToolCallResult { + mcp_app_resource_uri: Some("ui://widget/lookup.html".into()), + result: Some(Box::new(McpToolCallResult { content: vec![serde_json::json!({ "type": "text", "text": "result" @@ -1918,7 +1940,7 @@ mod tests { meta: Some(serde_json::json!({ "ui/resourceUri": "ui://widget/lookup.html" })), - }), + })), error: None, duration_ms: Some(8), } @@ -2005,7 +2027,7 @@ mod tests { process_id: Some("pid-2".into()), turn_id: "turn-1".into(), command: vec!["ls".into()], - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), parsed_cmd: vec![ParsedCommand::Unknown { cmd: "ls".into() }], source: ExecCommandSource::Agent, interaction_input: None, @@ -2047,7 +2069,7 @@ mod tests { ThreadItem::CommandExecution { id: "exec-declined".into(), command: "ls".into(), - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), process_id: Some("pid-2".into()), source: CommandExecutionSource::Agent, status: CommandExecutionStatus::Declined, @@ -2101,7 +2123,7 @@ mod tests { "type": "command", "source": "shell", "command": "rm -rf /tmp/guardian", - "cwd": "/tmp", + "cwd": test_path_buf("/tmp"), })) .expect("guardian action"), }), @@ -2120,7 +2142,7 @@ mod tests { "type": "command", "source": "shell", "command": "rm -rf /tmp/guardian", - "cwd": "/tmp", + "cwd": test_path_buf("/tmp"), })) .expect("guardian action"), }), @@ -2138,7 +2160,7 @@ mod tests { ThreadItem::CommandExecution { id: "guardian-exec".into(), command: "rm -rf /tmp/guardian".into(), - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), process_id: None, source: CommandExecutionSource::Agent, status: CommandExecutionStatus::Declined, @@ -2181,7 +2203,7 @@ mod tests { "source": "shell", "program": "/bin/rm", "argv": ["/usr/bin/rm", "-f", "/tmp/file.sqlite"], - "cwd": "/tmp", + "cwd": test_path_buf("/tmp"), })) .expect("guardian action"), }), @@ -2199,7 +2221,7 @@ mod tests { ThreadItem::CommandExecution { id: "guardian-execve".into(), command: "/bin/rm -f /tmp/file.sqlite".into(), - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), process_id: None, source: CommandExecutionSource::Agent, status: CommandExecutionStatus::InProgress, @@ -2251,7 +2273,7 @@ mod tests { process_id: Some("pid-42".into()), turn_id: "turn-a".into(), command: vec!["echo".into(), "done".into()], - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), parsed_cmd: vec![ParsedCommand::Unknown { cmd: "echo done".into(), }], @@ -2288,7 +2310,7 @@ mod tests { ThreadItem::CommandExecution { id: "exec-late".into(), command: "echo done".into(), - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), process_id: Some("pid-42".into()), source: CommandExecutionSource::Agent, status: CommandExecutionStatus::Completed, @@ -2340,7 +2362,7 @@ mod tests { process_id: Some("pid-42".into()), turn_id: "turn-missing".into(), command: vec!["echo".into(), "done".into()], - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), parsed_cmd: vec![ParsedCommand::Unknown { cmd: "echo done".into(), }], diff --git a/codex-rs/app-server-protocol/src/protocol/v1.rs b/codex-rs/app-server-protocol/src/protocol/v1.rs index 6aa2e9fa30..d642e7fab9 100644 --- a/codex-rs/app-server-protocol/src/protocol/v1.rs +++ b/codex-rs/app-server-protocol/src/protocol/v1.rs @@ -1,7 +1,6 @@ use std::collections::HashMap; use std::path::PathBuf; -use codex_git_utils::GitSha; use codex_protocol::ThreadId; use codex_protocol::config_types::ForcedLoginMethod; use codex_protocol::config_types::ReasoningSummary; @@ -11,6 +10,7 @@ use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::parse_command::ParsedCommand; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::FileChange; +pub use codex_protocol::protocol::GitSha; use codex_protocol::protocol::ReviewDecision; use codex_protocol::protocol::SandboxPolicy; use codex_protocol::protocol::SessionSource; diff --git a/codex-rs/app-server-protocol/src/protocol/v2.rs b/codex-rs/app-server-protocol/src/protocol/v2.rs index 30adc152ea..5173549326 100644 --- a/codex-rs/app-server-protocol/src/protocol/v2.rs +++ b/codex-rs/app-server-protocol/src/protocol/v2.rs @@ -65,6 +65,7 @@ use codex_protocol::protocol::HookOutputEntryKind as CoreHookOutputEntryKind; use codex_protocol::protocol::HookRunStatus as CoreHookRunStatus; use codex_protocol::protocol::HookRunSummary as CoreHookRunSummary; use codex_protocol::protocol::HookScope as CoreHookScope; +use codex_protocol::protocol::HookSource as CoreHookSource; use codex_protocol::protocol::ModelRerouteReason as CoreModelRerouteReason; use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess; use codex_protocol::protocol::NonSteerableTurnKind as CoreNonSteerableTurnKind; @@ -74,12 +75,12 @@ use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow; use codex_protocol::protocol::ReadOnlyAccess as CoreReadOnlyAccess; use codex_protocol::protocol::RealtimeAudioFrame as CoreRealtimeAudioFrame; use codex_protocol::protocol::RealtimeConversationVersion; +use codex_protocol::protocol::RealtimeOutputModality; use codex_protocol::protocol::RealtimeVoice; use codex_protocol::protocol::RealtimeVoicesList; use codex_protocol::protocol::ReviewDecision as CoreReviewDecision; use codex_protocol::protocol::SessionSource as CoreSessionSource; use codex_protocol::protocol::SkillDependencies as CoreSkillDependencies; -use codex_protocol::protocol::SkillErrorInfo as CoreSkillErrorInfo; use codex_protocol::protocol::SkillInterface as CoreSkillInterface; use codex_protocol::protocol::SkillMetadata as CoreSkillMetadata; use codex_protocol::protocol::SkillScope as CoreSkillScope; @@ -402,6 +403,23 @@ v2_enum_from_core!( } ); +v2_enum_from_core!( + pub enum HookSource from CoreHookSource { + System, + User, + Project, + Mdm, + SessionFlags, + LegacyManagedConfigFile, + LegacyManagedConfigMdm, + Unknown, + } +); + +fn default_hook_source() -> HookSource { + HookSource::Unknown +} + v2_enum_from_core!( pub enum HookRunStatus from CoreHookRunStatus { Running, Completed, Failed, Blocked, Stopped @@ -448,7 +466,9 @@ pub struct HookRunSummary { pub handler_type: HookHandlerType, pub execution_mode: HookExecutionMode, pub scope: HookScope, - pub source_path: PathBuf, + pub source_path: AbsolutePathBuf, + #[serde(default = "default_hook_source")] + pub source: HookSource, pub display_order: i64, pub status: HookRunStatus, pub status_message: Option, @@ -467,6 +487,7 @@ impl From for HookRunSummary { execution_mode: value.execution_mode.into(), scope: value.scope.into(), source_path: value.source_path, + source: value.source.into(), display_order: value.display_order, status: value.status.into(), status_message: value.status_message, @@ -898,7 +919,6 @@ pub struct NetworkRequirements { /// Legacy compatibility view derived from `unix_sockets`. pub allow_unix_sockets: Option>, pub allow_local_binding: Option, - pub danger_full_access_denylist_only: Option, } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)] @@ -945,11 +965,33 @@ pub enum ExternalAgentConfigMigrationItemType { #[serde(rename = "SKILLS")] #[ts(rename = "SKILLS")] Skills, + #[serde(rename = "PLUGINS")] + #[ts(rename = "PLUGINS")] + Plugins, #[serde(rename = "MCP_SERVER_CONFIG")] #[ts(rename = "MCP_SERVER_CONFIG")] McpServerConfig, } +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct PluginsMigration { + #[serde(rename = "marketplaceName")] + #[ts(rename = "marketplaceName")] + pub marketplace_name: String, + #[serde(rename = "pluginNames")] + #[ts(rename = "pluginNames")] + pub plugin_names: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct MigrationDetails { + pub plugins: Vec, +} + #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] @@ -958,6 +1000,7 @@ pub struct ExternalAgentConfigMigrationItem { pub description: String, /// Null or empty means home-scoped migration; non-empty means repo-scoped migration. pub cwd: Option, + pub details: Option, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] @@ -1467,7 +1510,7 @@ pub enum CommandAction { Read { command: String, name: String, - path: PathBuf, + path: AbsolutePathBuf, }, ListFiles { command: String, @@ -1545,7 +1588,11 @@ impl CommandAction { command: cmd, name, path, - } => CoreParsedCommand::Read { cmd, name, path }, + } => CoreParsedCommand::Read { + cmd, + name, + path: path.into_path_buf(), + }, CommandAction::ListFiles { command: cmd, path } => { CoreParsedCommand::ListFiles { cmd, path } } @@ -1559,13 +1606,13 @@ impl CommandAction { } } -impl From for CommandAction { - fn from(value: CoreParsedCommand) -> Self { +impl CommandAction { + pub fn from_core_with_cwd(value: CoreParsedCommand, cwd: &AbsolutePathBuf) -> Self { match value { CoreParsedCommand::Read { cmd, name, path } => CommandAction::Read { command: cmd, name, - path, + path: cwd.join(path), }, CoreParsedCommand::ListFiles { cmd, path } => { CommandAction::ListFiles { command: cmd, path } @@ -2320,10 +2367,12 @@ pub struct FsGetMetadataParams { #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] pub struct FsGetMetadataResponse { - /// Whether the path currently resolves to a directory. + /// Whether the path resolves to a directory. pub is_directory: bool, - /// Whether the path currently resolves to a regular file. + /// Whether the path resolves to a regular file. pub is_file: bool, + /// Whether the path itself is a symbolic link. + pub is_symlink: bool, /// File creation time in Unix milliseconds when available, otherwise `0`. #[ts(type = "number")] pub created_at_ms: i64, @@ -2718,10 +2767,10 @@ pub struct ThreadStartResponse { pub model: String, pub model_provider: String, pub service_tier: Option, - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, /// Instruction source files currently loaded for this thread. #[serde(default)] - pub instruction_sources: Vec, + pub instruction_sources: Vec, #[experimental(nested)] pub approval_policy: AskForApproval, /// Reviewer currently used for approval requests on this thread. @@ -2807,10 +2856,10 @@ pub struct ThreadResumeResponse { pub model: String, pub model_provider: String, pub service_tier: Option, - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, /// Instruction source files currently loaded for this thread. #[serde(default)] - pub instruction_sources: Vec, + pub instruction_sources: Vec, #[experimental(nested)] pub approval_policy: AskForApproval, /// Reviewer currently used for approval requests on this thread. @@ -2887,10 +2936,10 @@ pub struct ThreadForkResponse { pub model: String, pub model_provider: String, pub service_tier: Option, - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, /// Instruction source files currently loaded for this thread. #[serde(default)] - pub instruction_sources: Vec, + pub instruction_sources: Vec, #[experimental(nested)] pub approval_policy: AskForApproval, /// Reviewer currently used for approval requests on this thread. @@ -3049,6 +3098,48 @@ pub struct ThreadMetadataUpdateResponse { pub thread: Thread, } +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "lowercase")] +#[ts(rename_all = "lowercase")] +pub enum ThreadMemoryMode { + Enabled, + Disabled, +} + +impl ThreadMemoryMode { + pub fn as_str(self) -> &'static str { + match self { + Self::Enabled => "enabled", + Self::Disabled => "disabled", + } + } + + pub fn to_core(self) -> codex_protocol::protocol::ThreadMemoryMode { + match self { + Self::Enabled => codex_protocol::protocol::ThreadMemoryMode::Enabled, + Self::Disabled => codex_protocol::protocol::ThreadMemoryMode::Disabled, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct ThreadMemoryModeSetParams { + pub thread_id: String, + pub mode: ThreadMemoryMode, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct ThreadMemoryModeSetResponse {} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct MemoryResetResponse {} + #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] @@ -3287,6 +3378,26 @@ pub struct SkillsListResponse { pub data: Vec, } +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct MarketplaceAddParams { + pub source: String, + #[ts(optional = nullable)] + pub ref_name: Option, + #[ts(optional = nullable)] + pub sparse_paths: Option>, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct MarketplaceAddResponse { + pub marketplace_name: String, + pub installed_root: AbsolutePathBuf, + pub already_added: bool, +} + #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] @@ -3363,7 +3474,7 @@ pub struct SkillMetadata { #[serde(default, skip_serializing_if = "Option::is_none")] #[ts(optional)] pub dependencies: Option, - pub path: PathBuf, + pub path: AbsolutePathBuf, pub scope: SkillScope, pub enabled: bool, } @@ -3377,9 +3488,9 @@ pub struct SkillInterface { #[ts(optional)] pub short_description: Option, #[ts(optional)] - pub icon_small: Option, + pub icon_small: Option, #[ts(optional)] - pub icon_large: Option, + pub icon_large: Option, #[ts(optional)] pub brand_color: Option, #[ts(optional)] @@ -3509,7 +3620,7 @@ pub struct SkillSummary { pub description: String, pub short_description: Option, pub interface: Option, - pub path: PathBuf, + pub path: AbsolutePathBuf, pub enabled: bool, } @@ -3663,15 +3774,6 @@ impl From for SkillScope { } } -impl From for SkillErrorInfo { - fn from(value: CoreSkillErrorInfo) -> Self { - Self { - path: value.path, - message: value.message, - } - } -} - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] @@ -3696,7 +3798,7 @@ pub struct Thread { /// [UNSTABLE] Path to the thread on disk. pub path: Option, /// Working directory captured for the thread. - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, /// Version of the CLI that created the thread. pub cli_version: String, /// Origin of the thread (CLI, VSCode, codex exec, codex app-server, etc.). @@ -3917,11 +4019,14 @@ impl From for CoreRealtimeAudioFrame { } /// EXPERIMENTAL - start a thread-scoped realtime session. -#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] pub struct ThreadRealtimeStartParams { pub thread_id: String, + /// Selects text or audio output for the realtime session. Transport and voice stay + /// independent so clients can choose how they connect separately from what the model emits. + pub output_modality: RealtimeOutputModality, #[serde( default, deserialize_with = "super::serde_helpers::deserialize_double_option", @@ -4039,9 +4144,22 @@ pub struct ThreadRealtimeItemAddedNotification { #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] -pub struct ThreadRealtimeTranscriptUpdatedNotification { +pub struct ThreadRealtimeTranscriptDeltaNotification { pub thread_id: String, pub role: String, + /// Live transcript delta from the realtime event. + pub delta: String, +} + +/// EXPERIMENTAL - final transcript text emitted when realtime completes +/// a transcript part. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct ThreadRealtimeTranscriptDoneNotification { + pub thread_id: String, + pub role: String, + /// Final complete text for the transcript part. pub text: String, } @@ -4214,6 +4332,20 @@ pub struct TurnStartResponse { pub turn: Turn, } +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct ThreadInjectItemsParams { + pub thread_id: String, + /// Raw Responses API items to append to the thread's model-visible history. + pub items: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct ThreadInjectItemsResponse {} + #[derive( Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS, ExperimentalApi, )] @@ -4441,7 +4573,7 @@ pub enum ThreadItem { /// The command to be executed. command: String, /// The command's working directory. - cwd: PathBuf, + cwd: AbsolutePathBuf, /// Identifier for the underlying PTY process (when available). process_id: Option, #[serde(default)] @@ -4474,7 +4606,10 @@ pub enum ThreadItem { tool: String, status: McpToolCallStatus, arguments: JsonValue, - result: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(optional)] + mcp_app_resource_uri: Option, + result: Option>, error: Option, /// The duration of the MCP tool call in milliseconds. #[ts(type = "number | null")] @@ -4525,7 +4660,7 @@ pub enum ThreadItem { }, #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] - ImageView { id: String, path: String }, + ImageView { id: String, path: AbsolutePathBuf }, #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] ImageGeneration { @@ -4535,7 +4670,7 @@ pub enum ThreadItem { result: String, #[serde(default, skip_serializing_if = "Option::is_none")] #[ts(optional)] - saved_path: Option, + saved_path: Option, }, #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] @@ -4697,7 +4832,7 @@ impl From for CoreGuardianCommandSource { pub struct GuardianCommandReviewAction { pub source: GuardianCommandSource, pub command: String, - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] @@ -4707,15 +4842,15 @@ pub struct GuardianExecveReviewAction { pub source: GuardianCommandSource, pub program: String, pub argv: Vec, - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] pub struct GuardianApplyPatchReviewAction { - pub cwd: PathBuf, - pub files: Vec, + pub cwd: AbsolutePathBuf, + pub files: Vec, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] @@ -4749,7 +4884,7 @@ pub enum GuardianApprovalReviewAction { Command { source: GuardianCommandSource, command: String, - cwd: PathBuf, + cwd: AbsolutePathBuf, }, #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] @@ -4757,11 +4892,14 @@ pub enum GuardianApprovalReviewAction { source: GuardianCommandSource, program: String, argv: Vec, - cwd: PathBuf, + cwd: AbsolutePathBuf, }, #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] - ApplyPatch { cwd: PathBuf, files: Vec }, + ApplyPatch { + cwd: AbsolutePathBuf, + files: Vec, + }, #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] NetworkAccess { @@ -5670,7 +5808,7 @@ pub struct CommandExecutionRequestApprovalParams { /// The command's working directory. #[serde(default, skip_serializing_if = "Option::is_none")] #[ts(optional = nullable)] - pub cwd: Option, + pub cwd: Option, /// Best-effort parsed command actions for friendly display. #[serde(default, skip_serializing_if = "Option::is_none")] #[ts(optional = nullable)] @@ -6475,22 +6613,20 @@ mod tests { use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess; use codex_protocol::protocol::ReadOnlyAccess as CoreReadOnlyAccess; use codex_protocol::user_input::UserInput as CoreUserInput; + use codex_utils_absolute_path::test_support::PathBufExt; + use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use serde_json::json; use std::path::PathBuf; fn absolute_path_string(path: &str) -> String { - let trimmed = path.trim_start_matches('/'); - if cfg!(windows) { - format!(r"C:\{}", trimmed.replace('/', "\\")) - } else { - format!("/{trimmed}") - } + let path = format!("/{}", path.trim_start_matches('/')); + test_path_buf(&path).display().to_string() } fn absolute_path(path: &str) -> AbsolutePathBuf { - AbsolutePathBuf::from_absolute_path(absolute_path_string(path)) - .expect("path must be absolute") + let path = format!("/{}", path.trim_start_matches('/')); + test_path_buf(&path).abs() } fn test_absolute_path() -> AbsolutePathBuf { @@ -6508,6 +6644,39 @@ mod tests { ); } + #[test] + fn external_agent_config_plugins_details_round_trip() { + let item: ExternalAgentConfigMigrationItem = serde_json::from_value(json!({ + "itemType": "PLUGINS", + "description": "Install supported plugins from Claude settings", + "cwd": absolute_path_string("repo"), + "details": { + "plugins": [ + { + "marketplaceName": "team-marketplace", + "pluginNames": ["asana"] + } + ] + } + })) + .expect("plugins migration item should deserialize"); + + assert_eq!( + item, + ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: "Install supported plugins from Claude settings".to_string(), + cwd: Some(PathBuf::from(absolute_path_string("repo"))), + details: Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "team-marketplace".to_string(), + plugin_names: vec!["asana".to_string()], + }], + }), + } + ); + } + #[test] fn command_execution_request_approval_rejects_relative_additional_permission_paths() { let err = serde_json::from_value::(json!({ @@ -6515,7 +6684,7 @@ mod tests { "turnId": "turn_123", "itemId": "call_123", "command": "cat file", - "cwd": "/tmp", + "cwd": absolute_path_string("tmp"), "commandActions": null, "reason": null, "networkApprovalContext": null, @@ -6714,6 +6883,7 @@ mod tests { let response = FsGetMetadataResponse { is_directory: false, is_file: true, + is_symlink: false, created_at_ms: 123, modified_at_ms: 456, }; @@ -6724,6 +6894,7 @@ mod tests { json!({ "isDirectory": false, "isFile": true, + "isSymlink": false, "createdAtMs": 123, "modifiedAtMs": 456, }) @@ -7972,7 +8143,7 @@ mod tests { "type": "command", "source": "shell", "command": "rm -rf /tmp/example.sqlite", - "cwd": "/tmp", + "cwd": absolute_path_string("tmp"), }); let action: GuardianApprovalReviewAction = serde_json::from_value(value.clone()).expect("guardian review action"); @@ -7982,7 +8153,7 @@ mod tests { GuardianApprovalReviewAction::Command { source: GuardianCommandSource::Shell, command: "rm -rf /tmp/example.sqlite".to_string(), - cwd: "/tmp".into(), + cwd: absolute_path("tmp"), } ); assert_eq!( @@ -8011,7 +8182,6 @@ mod tests { dangerously_allow_all_unix_sockets: None, domains: None, managed_allowed_domains_only: None, - danger_full_access_denylist_only: None, allowed_domains: Some(vec!["api.openai.com".to_string()]), denied_domains: Some(vec!["blocked.example.com".to_string()]), unix_sockets: None, @@ -8038,7 +8208,6 @@ mod tests { ), ])), managed_allowed_domains_only: Some(true), - danger_full_access_denylist_only: Some(true), allowed_domains: Some(vec!["api.openai.com".to_string()]), denied_domains: Some(vec!["blocked.example.com".to_string()]), unix_sockets: Some(BTreeMap::from([ @@ -8069,7 +8238,6 @@ mod tests { "blocked.example.com": "deny" }, "managedAllowedDomainsOnly": true, - "dangerFullAccessDenylistOnly": true, "allowedDomains": ["api.openai.com"], "deniedDomains": ["blocked.example.com"], "unixSockets": { @@ -8297,6 +8465,37 @@ mod tests { ); } + #[test] + fn marketplace_add_params_serialization_uses_optional_ref_name_and_sparse_paths() { + assert_eq!( + serde_json::to_value(MarketplaceAddParams { + source: "owner/repo".to_string(), + ref_name: None, + sparse_paths: None, + }) + .unwrap(), + json!({ + "source": "owner/repo", + "refName": null, + "sparsePaths": null, + }), + ); + + assert_eq!( + serde_json::to_value(MarketplaceAddParams { + source: "owner/repo".to_string(), + ref_name: Some("main".to_string()), + sparse_paths: Some(vec!["plugins/foo".to_string()]), + }) + .unwrap(), + json!({ + "source": "owner/repo", + "refName": "main", + "sparsePaths": ["plugins/foo"], + }), + ); + } + #[test] fn plugin_install_params_serialization_uses_force_remote_sync() { let marketplace_path = if cfg!(windows) { @@ -8528,7 +8727,7 @@ mod tests { "updatedAt": 1, "status": { "type": "idle" }, "path": null, - "cwd": "/tmp", + "cwd": absolute_path_string("tmp"), "cliVersion": "0.0.0", "source": "exec", "agentNickname": null, @@ -8540,7 +8739,7 @@ mod tests { "model": "gpt-5", "modelProvider": "openai", "serviceTier": null, - "cwd": "/tmp", + "cwd": absolute_path_string("tmp"), "approvalPolicy": "on-failure", "approvalsReviewer": "user", "sandbox": { "type": "dangerFullAccess" }, @@ -8554,9 +8753,9 @@ mod tests { let fork: ThreadForkResponse = serde_json::from_value(response).expect("thread/fork response"); - assert_eq!(start.instruction_sources, Vec::::new()); - assert_eq!(resume.instruction_sources, Vec::::new()); - assert_eq!(fork.instruction_sources, Vec::::new()); + assert_eq!(start.instruction_sources, Vec::::new()); + assert_eq!(resume.instruction_sources, Vec::::new()); + assert_eq!(fork.instruction_sources, Vec::::new()); } #[test] diff --git a/codex-rs/app-server/Cargo.toml b/codex-rs/app-server/Cargo.toml index 5dc3a31485..097abdb713 100644 --- a/codex-rs/app-server/Cargo.toml +++ b/codex-rs/app-server/Cargo.toml @@ -34,6 +34,7 @@ codex-arg0 = { workspace = true } codex-cloud-requirements = { workspace = true } codex-config = { workspace = true } codex-core = { workspace = true } +codex-core-plugins = { workspace = true } codex-exec-server = { workspace = true } codex-features = { workspace = true } codex-git-utils = { workspace = true } @@ -54,6 +55,7 @@ codex-rmcp-client = { workspace = true } codex-rollout = { workspace = true } codex-sandboxing = { workspace = true } codex-state = { workspace = true } +codex-thread-store = { workspace = true } codex-tools = { workspace = true } codex-utils-absolute-path = { workspace = true } codex-utils-json-to-toml = { workspace = true } diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index 94f86c7ebb..2fece44544 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -41,8 +41,11 @@ Security note: - Non-loopback websocket listeners currently allow unauthenticated connections by default during rollout. If you expose one remotely, configure websocket auth explicitly now. - Supported auth modes are app-server flags: - `--ws-auth capability-token --ws-token-file /absolute/path` + - `--ws-auth capability-token --ws-token-sha256 HEX` - `--ws-auth signed-bearer-token --ws-shared-secret-file /absolute/path` for HMAC-signed JWT/JWS bearer tokens, with optional `--ws-issuer`, `--ws-audience`, `--ws-max-clock-skew-seconds` - Clients present the credential as `Authorization: Bearer ` during the websocket handshake. Auth is enforced before JSON-RPC `initialize`. +- When starting `codex app-server` manually, prefer `--ws-token-file` over passing raw bearer tokens on the command line. Store a high-entropy token in a file readable only by your user, then have your client present that token in the websocket `Authorization` header. +- `--ws-token-sha256` is intended for clients that keep the raw token in a separate local secret store and only need the server to know the SHA-256 verifier. The hash may appear in process listings, but it is not sufficient to authenticate; clients still need the original raw token. Only use this mode with randomly generated high-entropy tokens, not passwords or other guessable values. Tracing/log output: @@ -140,9 +143,11 @@ Example with notification opt-out: - `thread/loaded/list` — list the thread ids currently loaded in memory. - `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`. The returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded. - `thread/metadata/update` — patch stored thread metadata in sqlite; currently supports updating persisted `gitInfo` fields and returns the refreshed `thread`. +- `thread/memoryMode/set` — experimental; set a thread’s persisted memory eligibility to `"enabled"` or `"disabled"` for either a loaded thread or a stored rollout; returns `{}` on success. +- `memory/reset` — experimental; clear the current `CODEX_HOME/memories` directory and reset persisted memory stage data in sqlite while preserving existing thread memory modes; returns `{}` on success. - `thread/status/changed` — notification emitted when a loaded thread’s status changes (`threadId` + new `status`). - `thread/archive` — move a thread’s rollout file into the archived directory; returns `{}` on success and emits `thread/archived`. -- `thread/unsubscribe` — unsubscribe this connection from thread turn/item events. If this was the last subscriber, the server shuts down and unloads the thread, then emits `thread/closed`. +- `thread/unsubscribe` — unsubscribe this connection from thread turn/item events. If this was the last subscriber, the server keeps the thread loaded and unloads it only after it has had no subscribers and no thread activity for 30 minutes, then emits `thread/closed`. - `thread/name/set` — set or update a thread’s user-facing name for either a loaded thread or a persisted rollout; returns `{}` on success and emits `thread/name/updated` to initialized, opted-in clients. Thread names are not required to be unique; name lookups resolve to the most recently updated thread. - `thread/unarchive` — move an archived rollout file back into the sessions directory; returns the restored `thread` on success and emits `thread/unarchived`. - `thread/compact/start` — trigger conversation history compaction for a thread; returns `{}` immediately while progress streams through standard turn/item notifications. @@ -150,9 +155,10 @@ Example with notification opt-out: - `thread/backgroundTerminals/clean` — terminate all running background terminals for a thread (experimental; requires `capabilities.experimentalApi`); returns `{}` when the cleanup request is accepted. - `thread/rollback` — drop the last N turns from the agent’s in-memory context and persist a rollback marker in the rollout so future resumes see the pruned history; returns the updated `thread` (with `turns` populated) on success. - `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications. For `collaborationMode`, `settings.developer_instructions: null` means "use built-in instructions for the selected mode". +- `thread/inject_items` — append raw Responses API items to a loaded thread’s model-visible history without starting a user turn; returns `{}` on success. - `turn/steer` — add user input to an already in-flight regular turn without starting a new turn; returns the active `turnId` that accepted the input. Review and manual compaction turns reject `turn/steer`. - `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`. -- `thread/realtime/start` — start a thread-scoped realtime session (experimental); returns `{}` and streams `thread/realtime/*` notifications. Omit `transport` for the websocket transport, or pass `{ "type": "webrtc", "sdp": "..." }` to create a WebRTC session from a browser-generated SDP offer; the remote answer SDP is emitted as `thread/realtime/sdp`. +- `thread/realtime/start` — start a thread-scoped realtime session (experimental); pass `outputModality: "text"` or `outputModality: "audio"` to choose model output, returns `{}` and streams `thread/realtime/*` notifications. Omit `transport` for the websocket transport, or pass `{ "type": "webrtc", "sdp": "..." }` to create a WebRTC session from a browser-generated SDP offer; the remote answer SDP is emitted as `thread/realtime/sdp`. - `thread/realtime/appendAudio` — append an input audio chunk to the active realtime session (experimental); returns `{}`. - `thread/realtime/appendText` — append text input to the active realtime session (experimental); returns `{}`. - `thread/realtime/stop` — stop the active realtime session for the thread (experimental); returns `{}`. @@ -165,7 +171,7 @@ Example with notification opt-out: - `fs/readFile` — read an absolute file path and return `{ dataBase64 }`. - `fs/writeFile` — write an absolute file path from base64-encoded `{ dataBase64 }`; returns `{}`. - `fs/createDirectory` — create an absolute directory path; `recursive` defaults to `true`. -- `fs/getMetadata` — return metadata for an absolute path: `isDirectory`, `isFile`, `createdAtMs`, and `modifiedAtMs`. +- `fs/getMetadata` — return metadata for an absolute path: `isDirectory`, `isFile`, `isSymlink`, `createdAtMs`, and `modifiedAtMs`. - `fs/readDirectory` — list direct child entries for an absolute directory path; each entry contains `fileName`, `isDirectory`, and `isFile`, and `fileName` is just the child name, not a path. - `fs/remove` — remove an absolute file or directory tree; `recursive` and `force` default to `true`. - `fs/copy` — copy between absolute paths; directory copies require `recursive: true`. @@ -177,6 +183,7 @@ Example with notification opt-out: - `experimentalFeature/enablement/set` — patch the in-memory process-wide runtime feature enablement for the currently supported feature keys (`apps`, `plugins`). For each feature, precedence is: cloud requirements > --enable > config.toml > experimentalFeature/enablement/set (new) > code default. - `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination). This response omits built-in developer instructions; clients should either pass `settings.developer_instructions: null` when setting a mode to use Codex's built-in instructions, or provide their own instructions explicitly. - `skills/list` — list skills for one or more `cwd` values (optional `forceReload`). +- `marketplace/add` — add a remote plugin marketplace from an HTTP(S) Git URL, SSH Git URL, or GitHub `owner/repo` shorthand, then persist it into the user marketplace config. Returns the installed root path plus whether the marketplace was already present. - `plugin/list` — list discovered plugin marketplaces and plugin state, including effective marketplace install/auth policy metadata, fail-open `marketplaceLoadErrors` entries for marketplace files that could not be parsed or loaded, and best-effort `featuredPluginIds` for the official curated marketplace. `interface.category` uses the marketplace category when present; otherwise it falls back to the plugin manifest category. Pass `forceRemoteSync: true` to refresh curated plugin state before listing (**under development; do not call from production clients yet**). - `plugin/read` — read one plugin by `marketplacePath` plus `pluginName`, returning marketplace info, a list-style `summary`, manifest descriptions/interface metadata, and bundled skills/apps/MCP server names. Returned plugin skills include their current `enabled` state after local config filtering. Plugin app summaries also include `needsAuth` when the server can determine connector accessibility (**under development; do not call from production clients yet**). - `skills/changed` — notification emitted when watched local skill files change. @@ -193,11 +200,11 @@ Example with notification opt-out: - `windowsSandbox/setupStart` — start Windows sandbox setup for the selected mode (`elevated` or `unelevated`); accepts an optional absolute `cwd` to target setup for a specific workspace, returns `{ started: true }` immediately, and later emits `windowsSandbox/setupCompleted`. - `feedback/upload` — submit a feedback report (classification + optional reason/logs, conversation_id, and optional `extraLogFiles` attachments array); returns the tracking thread id. - `config/read` — fetch the effective config on disk after resolving config layering. -- `externalAgentConfig/detect` — detect migratable external-agent artifacts with `includeHome` and optional `cwds`; each detected item includes `cwd` (`null` for home). -- `externalAgentConfig/import` — apply selected external-agent migration items by passing explicit `migrationItems` with `cwd` (`null` for home). +- `externalAgentConfig/detect` — detect migratable external-agent artifacts with `includeHome` and optional `cwds`; each detected item includes `cwd` (`null` for home), and plugin migration items may additionally include structured `details` grouping plugin ids under each detected marketplace name. +- `externalAgentConfig/import` — apply selected external-agent migration items by passing explicit `migrationItems` with `cwd` (`null` for home) and any plugin `details` returned by detect. - `config/value/write` — write a single config key/value to the user's config.toml on disk. - `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk, with optional `reloadUserConfig: true` to hot-reload loaded threads. -- `configRequirements/read` — fetch loaded requirements constraints from `requirements.toml` and/or MDM (or `null` if none are configured), including allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`), pinned feature values (`featureRequirements`), `enforceResidency`, and `network` constraints such as canonical domain/socket permissions plus `managedAllowedDomainsOnly` and `dangerFullAccessDenylistOnly`. +- `configRequirements/read` — fetch loaded requirements constraints from `requirements.toml` and/or MDM (or `null` if none are configured), including allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`), pinned feature values (`featureRequirements`), `enforceResidency`, and `network` constraints such as canonical domain/socket permissions plus `managedAllowedDomainsOnly`. ### Example: Start or resume a thread @@ -243,7 +250,7 @@ Start a fresh thread when you need a new Codex conversation. Valid `personality` values are `"friendly"`, `"pragmatic"`, and `"none"`. When `"none"` is selected, the personality placeholder is replaced with an empty string. -To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, including `approvalsReviewer`. +To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`. When the stored session includes persisted token usage, the server emits `thread/tokenUsage/updated` immediately after the response so clients can render restored usage before the next turn starts. You can also pass the same configuration overrides supported by `thread/start`, including `approvalsReviewer`. By default, resume uses the latest persisted `model` and `reasoningEffort` values associated with the thread. Supplying any of `model`, `modelProvider`, `config.model`, or `config.model_reasoning_effort` disables that persisted fallback and uses the explicit overrides plus normal config resolution instead. @@ -257,7 +264,7 @@ Example: { "id": 11, "result": { "thread": { "id": "thr_123", … } } } ``` -To branch from a stored session, call `thread/fork` with the `thread.id`. This creates a new thread id and emits a `thread/started` notification for it. If the source thread is actively running, the fork snapshots it as if the current turn had been interrupted first. Pass `ephemeral: true` when the fork should stay in-memory only: +To branch from a stored session, call `thread/fork` with the `thread.id`. This creates a new thread id and emits a `thread/started` notification for it. When the source history includes persisted token usage, the server also emits `thread/tokenUsage/updated` for the new thread immediately after the response. If the source thread is actively running, the fork snapshots it as if the current turn had been interrupted first. Pass `ephemeral: true` when the fork should stay in-memory only: ```json { "method": "thread/fork", "id": 12, "params": { "threadId": "thr_123", "ephemeral": true } } @@ -337,11 +344,16 @@ When `nextCursor` is `null`, you’ve reached the final page. - `notSubscribed` when the connection was not subscribed to that thread. - `notLoaded` when the thread is not loaded. -If this was the last subscriber, the server unloads the thread and emits `thread/closed` and a `thread/status/changed` transition to `notLoaded`. +If this was the last subscriber, the server does not unload the thread immediately. It unloads the thread after the thread has had no subscribers and no thread activity for 30 minutes, then emits `thread/closed` and a `thread/status/changed` transition to `notLoaded`. ```json { "method": "thread/unsubscribe", "id": 22, "params": { "threadId": "thr_123" } } { "id": 22, "result": { "status": "unsubscribed" } } +``` + +Later, after the idle unload timeout: + +```json { "method": "thread/status/changed", "params": { "threadId": "thr_123", "status": { "type": "notLoaded" } @@ -395,6 +407,23 @@ Use `thread/metadata/update` to patch sqlite-backed metadata for a thread withou } } ``` +Experimental: use `thread/memoryMode/set` to change whether a thread remains eligible for future memory generation. + +```json +{ "method": "thread/memoryMode/set", "id": 26, "params": { + "threadId": "thr_123", + "mode": "disabled" +} } +{ "id": 26, "result": {} } +``` + +Experimental: use `memory/reset` to clear local memory artifacts and sqlite-backed memory stage data for the current Codex home. This preserves existing thread memory modes; use `thread/memoryMode/set` separately when a thread's future memory eligibility should change. + +```json +{ "method": "memory/reset", "id": 27 } +{ "id": 27, "result": {} } +``` + ### Example: Archive a thread Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory. @@ -565,6 +594,24 @@ Invoke a plugin by including a UI mention token such as `@sample` in the text in } } } ``` +### Example: Inject raw history items + +Use `thread/inject_items` to append prebuilt Responses API items to a loaded thread’s prompt history without starting a user turn. These items are persisted to the rollout and included in subsequent model requests. + +```json +{ "method": "thread/inject_items", "id": 36, "params": { + "threadId": "thr_123", + "items": [ + { + "type": "message", + "role": "assistant", + "content": [{ "type": "output_text", "text": "Previously computed context." }] + } + ] +} } +{ "id": 36, "result": {} } +``` + ### Example: Start realtime with WebRTC Use `thread/realtime/start` with `transport.type: "webrtc"` when a browser or webview owns the `RTCPeerConnection` and app-server should create the server-side realtime session. The transport `sdp` must be the offer SDP produced by `RTCPeerConnection.createOffer()`, not a hand-written or minimal SDP string. @@ -592,6 +639,7 @@ Then send `offer.sdp` to app-server. Core uses `experimental_realtime_ws_backend ```json { "method": "thread/realtime/start", "id": 40, "params": { "threadId": "thr_123", + "outputModality": "audio", "prompt": "You are on a call.", "sessionId": null, "transport": { "type": "webrtc", "sdp": "v=0\r\no=..." } @@ -843,6 +891,7 @@ All filesystem paths in this section must be absolute. { "id": 42, "result": { "isDirectory": false, "isFile": true, + "isSymlink": false, "createdAtMs": 1730910000000, "modifiedAtMs": 1730910000000 } } @@ -854,7 +903,7 @@ All filesystem paths in this section must be absolute. } } ``` -- `fs/getMetadata` returns whether the path currently resolves to a directory or regular file, plus `createdAtMs` and `modifiedAtMs` in Unix milliseconds. If a timestamp is unavailable on the current platform, that field is `0`. +- `fs/getMetadata` returns whether the path resolves to a directory or regular file, whether the path itself is a symlink, plus `createdAtMs` and `modifiedAtMs` in Unix milliseconds. If a timestamp is unavailable on the current platform, that field is `0`. - `fs/createDirectory` defaults `recursive` to `true` when omitted. - `fs/remove` defaults both `recursive` and `force` to `true` when omitted. - `fs/readFile` always returns base64 bytes via `dataBase64`, and `fs/writeFile` always expects base64 bytes in `dataBase64`. @@ -915,7 +964,8 @@ The thread realtime API emits thread-scoped notifications for session lifecycle - `thread/realtime/started` — `{ threadId, sessionId }` once realtime starts for the thread (experimental). - `thread/realtime/itemAdded` — `{ threadId, item }` for raw non-audio realtime items that do not have a dedicated typed app-server notification, including `handoff_request` (experimental). `item` is forwarded as raw JSON while the upstream websocket item schema remains unstable. -- `thread/realtime/transcriptUpdated` — `{ threadId, role, text }` whenever realtime transcript text changes (experimental). This forwards the live transcript delta from that realtime event, not the full accumulated transcript. +- `thread/realtime/transcript/delta` — `{ threadId, role, delta }` for live realtime transcript deltas (experimental). +- `thread/realtime/transcript/done` — `{ threadId, role, text }` when realtime emits the final full text for a transcript part (experimental). - `thread/realtime/outputAudio/delta` — `{ threadId, audio }` for streamed output audio chunks (experimental). `audio` uses camelCase fields (`data`, `sampleRate`, `numChannels`, `samplesPerChannel`). - `thread/realtime/error` — `{ threadId, message }` when realtime encounters a transport or backend error (experimental). - `thread/realtime/closed` — `{ threadId, reason }` when the realtime transport closes (experimental). diff --git a/codex-rs/app-server/src/app_server_tracing.rs b/codex-rs/app-server/src/app_server_tracing.rs index b06a8e52c4..2118e77300 100644 --- a/codex-rs/app-server/src/app_server_tracing.rs +++ b/codex-rs/app-server/src/app_server_tracing.rs @@ -72,10 +72,10 @@ pub(crate) fn typed_request_span( &span, client_info .map(|(client_name, _)| client_name) - .or(session.app_server_client_name.as_deref()), + .or(session.app_server_client_name()), client_info .map(|(_, client_version)| client_version) - .or(session.client_version.as_deref()), + .or(session.client_version()), ); attach_parent_context(&span, &method, request.id(), /*parent_trace*/ None); @@ -147,7 +147,7 @@ fn client_name<'a>( if let Some(params) = initialize_client_info { return Some(params.client_info.name.as_str()); } - session.app_server_client_name.as_deref() + session.app_server_client_name() } fn client_version<'a>( @@ -157,7 +157,7 @@ fn client_version<'a>( if let Some(params) = initialize_client_info { return Some(params.client_info.version.as_str()); } - session.client_version.as_deref() + session.client_version() } fn initialize_client_info(request: &JSONRPCRequest) -> Option { diff --git a/codex-rs/app-server/src/bespoke_event_handling.rs b/codex-rs/app-server/src/bespoke_event_handling.rs index 08a51108ae..5d19bde70c 100644 --- a/codex-rs/app-server/src/bespoke_event_handling.rs +++ b/codex-rs/app-server/src/bespoke_event_handling.rs @@ -12,6 +12,7 @@ use crate::thread_state::TurnSummary; use crate::thread_state::resolve_server_request_on_thread_listener; use crate::thread_status::ThreadWatchActiveGuard; use crate::thread_status::ThreadWatchManager; +use codex_analytics::AnalyticsEventsClient; use codex_app_server_protocol::AccountRateLimitsUpdatedNotification; use codex_app_server_protocol::AdditionalPermissionProfile as V2AdditionalPermissionProfile; use codex_app_server_protocol::AgentMessageDeltaNotification; @@ -82,7 +83,8 @@ use codex_app_server_protocol::ThreadRealtimeItemAddedNotification; use codex_app_server_protocol::ThreadRealtimeOutputAudioDeltaNotification; use codex_app_server_protocol::ThreadRealtimeSdpNotification; use codex_app_server_protocol::ThreadRealtimeStartedNotification; -use codex_app_server_protocol::ThreadRealtimeTranscriptUpdatedNotification; +use codex_app_server_protocol::ThreadRealtimeTranscriptDeltaNotification; +use codex_app_server_protocol::ThreadRealtimeTranscriptDoneNotification; use codex_app_server_protocol::ThreadRollbackResponse; use codex_app_server_protocol::ThreadTokenUsage; use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification; @@ -138,9 +140,9 @@ use codex_protocol::request_user_input::RequestUserInputAnswer as CoreRequestUse use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestUserInputResponse; use codex_sandboxing::policy_transforms::intersect_permission_profiles; use codex_shell_command::parse_command::shlex_join; +use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::HashMap; use std::path::Path; -use std::path::PathBuf; use std::sync::Arc; use tokio::sync::Mutex; use tokio::sync::oneshot; @@ -157,7 +159,7 @@ enum CommandExecutionApprovalPresentation { #[derive(Debug, PartialEq)] struct CommandExecutionCompletionItem { command: String, - cwd: PathBuf, + cwd: AbsolutePathBuf, command_actions: Vec, } @@ -167,6 +169,7 @@ pub(crate) async fn apply_bespoke_event_handling( conversation_id: ThreadId, conversation: Arc, thread_manager: Arc, + analytics_events_client: Option, outgoing: ThreadScopedOutgoingMessageSender, thread_state: Arc>, thread_watch_manager: ThreadWatchManager, @@ -202,6 +205,10 @@ pub(crate) async fn apply_bespoke_event_handling( thread_id: conversation_id.to_string(), turn, }; + if let Some(analytics_events_client) = analytics_events_client.as_ref() { + analytics_events_client + .track_notification(ServerNotification::TurnStarted(notification.clone())); + } outgoing .send_server_notification(ServerNotification::TurnStarted(notification)) .await; @@ -218,6 +225,7 @@ pub(crate) async fn apply_bespoke_event_handling( conversation_id, event_turn_id, turn_complete_event, + analytics_events_client.as_ref(), &outgoing, &thread_state, ) @@ -401,26 +409,50 @@ pub(crate) async fn apply_bespoke_event_handling( .await; } RealtimeEvent::InputTranscriptDelta(event) => { - let notification = ThreadRealtimeTranscriptUpdatedNotification { + let notification = ThreadRealtimeTranscriptDeltaNotification { thread_id: conversation_id.to_string(), role: "user".to_string(), - text: event.delta, + delta: event.delta, }; outgoing .send_server_notification( - ServerNotification::ThreadRealtimeTranscriptUpdated(notification), + ServerNotification::ThreadRealtimeTranscriptDelta(notification), + ) + .await; + } + RealtimeEvent::InputTranscriptDone(event) => { + let notification = ThreadRealtimeTranscriptDoneNotification { + thread_id: conversation_id.to_string(), + role: "user".to_string(), + text: event.text, + }; + outgoing + .send_server_notification( + ServerNotification::ThreadRealtimeTranscriptDone(notification), ) .await; } RealtimeEvent::OutputTranscriptDelta(event) => { - let notification = ThreadRealtimeTranscriptUpdatedNotification { + let notification = ThreadRealtimeTranscriptDeltaNotification { thread_id: conversation_id.to_string(), role: "assistant".to_string(), - text: event.delta, + delta: event.delta, }; outgoing .send_server_notification( - ServerNotification::ThreadRealtimeTranscriptUpdated(notification), + ServerNotification::ThreadRealtimeTranscriptDelta(notification), + ) + .await; + } + RealtimeEvent::OutputTranscriptDone(event) => { + let notification = ThreadRealtimeTranscriptDoneNotification { + thread_id: conversation_id.to_string(), + role: "assistant".to_string(), + text: event.text, + }; + outgoing + .send_server_notification( + ServerNotification::ThreadRealtimeTranscriptDone(notification), ) .await; } @@ -612,7 +644,7 @@ pub(crate) async fn apply_bespoke_event_handling( call_id: call_id.clone(), approval_id, command, - cwd, + cwd: cwd.to_path_buf(), reason, parsed_cmd, }; @@ -634,7 +666,7 @@ pub(crate) async fn apply_bespoke_event_handling( let command_actions = parsed_cmd .iter() .cloned() - .map(V2ParsedCommand::from) + .map(|parsed| V2ParsedCommand::from_core_with_cwd(parsed, &cwd)) .collect::>(); let presentation = if let Some(network_approval_context) = network_approval_context.map(V2NetworkApprovalContext::from) @@ -1303,6 +1335,11 @@ pub(crate) async fn apply_bespoke_event_handling( .await; } EventMsg::ContextCompacted(..) => { + // Core still fans out this deprecated event for legacy clients; + // v2 clients receive the canonical ContextCompaction item instead. + if matches!(api_version, ApiVersion::V2) { + return; + } let notification = ContextCompactedNotification { thread_id: conversation_id.to_string(), turn_id: event_turn_id.clone(), @@ -1426,7 +1463,7 @@ pub(crate) async fn apply_bespoke_event_handling( EventMsg::ViewImageToolCall(view_image_event) => { let item = ThreadItem::ImageView { id: view_image_event.call_id.clone(), - path: view_image_event.path.to_string_lossy().into_owned(), + path: view_image_event.path.clone(), }; let started = ItemStartedNotification { thread_id: conversation_id.to_string(), @@ -1599,14 +1636,25 @@ pub(crate) async fn apply_bespoke_event_handling( .await; } EventMsg::ExecCommandBegin(exec_command_begin_event) => { + if matches!(api_version, ApiVersion::V2) + && matches!( + exec_command_begin_event.source, + codex_protocol::protocol::ExecCommandSource::UnifiedExecInteraction + ) + { + // TerminalInteraction is the v2 surface for unified exec + // stdin/poll events. Suppress the legacy CommandExecution + // item so clients do not render the same wait twice. + return; + } let item_id = exec_command_begin_event.call_id.clone(); + let cwd = exec_command_begin_event.cwd.clone(); let command_actions = exec_command_begin_event .parsed_cmd .into_iter() - .map(V2ParsedCommand::from) + .map(|parsed| V2ParsedCommand::from_core_with_cwd(parsed, &cwd)) .collect::>(); let command = shlex_join(&exec_command_begin_event.command); - let cwd = exec_command_begin_event.cwd; let process_id = exec_command_begin_event.process_id; let first_start = { let mut state = thread_state.lock().await; @@ -1702,6 +1750,17 @@ pub(crate) async fn apply_bespoke_event_handling( .command_execution_started .remove(&call_id); } + if matches!(api_version, ApiVersion::V2) + && matches!( + exec_command_end_event.source, + codex_protocol::protocol::ExecCommandSource::UnifiedExecInteraction + ) + { + // The paired begin event is suppressed above; keep the + // completion out of v2 as well so no orphan legacy item is + // emitted for unified exec interactions. + return; + } let item = build_command_execution_end_item(&exec_command_end_event); @@ -1746,6 +1805,7 @@ pub(crate) async fn apply_bespoke_event_handling( conversation_id, event_turn_id, turn_aborted_event, + analytics_events_client.as_ref(), &outgoing, &thread_state, ) @@ -1774,7 +1834,8 @@ pub(crate) async fn apply_bespoke_event_handling( .await { Ok(summary) => { - let mut thread = summary_to_thread(summary); + let fallback_cwd = conversation.config_snapshot().await.cwd; + let mut thread = summary_to_thread(summary, &fallback_cwd); match read_rollout_items_from_rollout(rollout_path.as_path()).await { Ok(items) => { thread.turns = build_turns_from_rollout_items(&items); @@ -1923,6 +1984,7 @@ async fn emit_turn_completed_with_status( conversation_id: ThreadId, event_turn_id: String, turn_completion_metadata: TurnCompletionMetadata, + analytics_events_client: Option<&AnalyticsEventsClient>, outgoing: &ThreadScopedOutgoingMessageSender, ) { let notification = TurnCompletedNotification { @@ -1937,6 +1999,10 @@ async fn emit_turn_completed_with_status( duration_ms: turn_completion_metadata.duration_ms, }, }; + if let Some(analytics_events_client) = analytics_events_client { + analytics_events_client + .track_notification(ServerNotification::TurnCompleted(notification.clone())); + } outgoing .send_server_notification(ServerNotification::TurnCompleted(notification)) .await; @@ -1970,7 +2036,7 @@ async fn start_command_execution_item( turn_id: String, item_id: String, command: String, - cwd: PathBuf, + cwd: AbsolutePathBuf, command_actions: Vec, source: CommandExecutionSource, outgoing: &ThreadScopedOutgoingMessageSender, @@ -2013,7 +2079,7 @@ async fn complete_command_execution_item( turn_id: String, item_id: String, command: String, - cwd: PathBuf, + cwd: AbsolutePathBuf, process_id: Option, source: CommandExecutionSource, command_actions: Vec, @@ -2129,6 +2195,7 @@ async fn handle_turn_complete( conversation_id: ThreadId, event_turn_id: String, turn_complete_event: TurnCompleteEvent, + analytics_events_client: Option<&AnalyticsEventsClient>, outgoing: &ThreadScopedOutgoingMessageSender, thread_state: &Arc>, ) { @@ -2149,6 +2216,7 @@ async fn handle_turn_complete( completed_at: turn_complete_event.completed_at, duration_ms: turn_complete_event.duration_ms, }, + analytics_events_client, outgoing, ) .await; @@ -2158,6 +2226,7 @@ async fn handle_turn_interrupted( conversation_id: ThreadId, event_turn_id: String, turn_aborted_event: TurnAbortedEvent, + analytics_events_client: Option<&AnalyticsEventsClient>, outgoing: &ThreadScopedOutgoingMessageSender, thread_state: &Arc>, ) { @@ -2173,6 +2242,7 @@ async fn handle_turn_interrupted( completed_at: turn_aborted_event.completed_at, duration_ms: turn_aborted_event.duration_ms, }, + analytics_events_client, outgoing, ) .await; @@ -2840,6 +2910,7 @@ async fn construct_mcp_tool_call_notification( tool: begin_event.invocation.tool, status: McpToolCallStatus::InProgress, arguments: begin_event.invocation.arguments.unwrap_or(JsonValue::Null), + mcp_app_resource_uri: begin_event.mcp_app_resource_uri, result: None, error: None, duration_ms: None, @@ -2866,11 +2937,11 @@ async fn construct_mcp_tool_call_end_notification( let (result, error) = match &end_event.result { Ok(value) => ( - Some(McpToolCallResult { + Some(Box::new(McpToolCallResult { content: value.content.clone(), structured_content: value.structured_content.clone(), meta: value.meta.clone(), - }), + })), None, ), Err(message) => ( @@ -2887,6 +2958,7 @@ async fn construct_mcp_tool_call_end_notification( tool: end_event.invocation.tool, status, arguments: end_event.invocation.arguments.unwrap_or(JsonValue::Null), + mcp_app_resource_uri: end_event.mcp_app_resource_uri, result, error, duration_ms, @@ -2913,6 +2985,7 @@ mod tests { use codex_app_server_protocol::GuardianApprovalReviewStatus; use codex_app_server_protocol::JSONRPCErrorError; use codex_app_server_protocol::TurnPlanStepStatus; + use codex_login::AuthManager; use codex_login::CodexAuth; use codex_protocol::items::HookPromptFragment; use codex_protocol::items::build_hook_prompt_message; @@ -2932,6 +3005,8 @@ mod tests { use codex_protocol::protocol::TokenUsage; use codex_protocol::protocol::TokenUsageInfo; use codex_utils_absolute_path::AbsolutePathBuf; + use codex_utils_absolute_path::test_support::PathBufExt; + use codex_utils_absolute_path::test_support::test_path_buf; use core_test_support::load_default_config_for_test; use pretty_assertions::assert_eq; use rmcp::model::Content; @@ -2984,7 +3059,7 @@ mod tests { fn command_execution_completion_item(command: &str) -> CommandExecutionCompletionItem { CommandExecutionCompletionItem { command: command.to_string(), - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), command_actions: vec![V2ParsedCommand::Unknown { command: command.to_string(), }], @@ -3030,7 +3105,7 @@ mod tests { "type": "command", "source": "shell", "command": format!("rm -f /tmp/{id}.sqlite"), - "cwd": "/tmp", + "cwd": test_path_buf("/tmp"), })) .expect("guardian action"), } @@ -3043,6 +3118,7 @@ mod tests { outgoing: ThreadScopedOutgoingMessageSender, thread_state: Arc>, thread_watch_manager: ThreadWatchManager, + analytics_events_client: AnalyticsEventsClient, codex_home: PathBuf, } @@ -3057,6 +3133,7 @@ mod tests { self.conversation_id, self.conversation.clone(), self.thread_manager.clone(), + Some(self.analytics_events_client.clone()), self.outgoing.clone(), self.thread_state.clone(), self.thread_watch_manager.clone(), @@ -3074,7 +3151,7 @@ mod tests { let action = codex_protocol::protocol::GuardianAssessmentAction::Command { source: codex_protocol::protocol::GuardianCommandSource::Shell, command: "rm -rf /tmp/example.sqlite".to_string(), - cwd: "/tmp".into(), + cwd: test_path_buf("/tmp").abs(), }; let notification = guardian_auto_approval_review_notification( &conversation_id, @@ -3117,7 +3194,7 @@ mod tests { let action = codex_protocol::protocol::GuardianAssessmentAction::Command { source: codex_protocol::protocol::GuardianCommandSource::Shell, command: "rm -rf /tmp/example.sqlite".to_string(), - cwd: "/tmp".into(), + cwd: test_path_buf("/tmp").abs(), }; let notification = guardian_auto_approval_review_notification( &conversation_id, @@ -3356,7 +3433,7 @@ mod tests { codex_core::test_support::thread_manager_with_models_provider_and_home( CodexAuth::create_dummy_chatgpt_auth_for_testing(), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -3383,6 +3460,13 @@ mod tests { outgoing: outgoing.clone(), thread_state: thread_state.clone(), thread_watch_manager: thread_watch_manager.clone(), + analytics_events_client: AnalyticsEventsClient::new( + AuthManager::from_auth_for_testing( + CodexAuth::create_dummy_chatgpt_auth_for_testing(), + ), + "http://localhost".to_string(), + Some(false), + ), codex_home: codex_home.path().to_path_buf(), }; @@ -3833,6 +3917,7 @@ mod tests { conversation_id, event_turn_id.clone(), turn_complete_event(&event_turn_id), + /*analytics_events_client*/ None, &outgoing, &thread_state, ) @@ -3881,6 +3966,7 @@ mod tests { conversation_id, event_turn_id.clone(), turn_aborted_event(&event_turn_id), + /*analytics_events_client*/ None, &outgoing, &thread_state, ) @@ -3928,6 +4014,7 @@ mod tests { conversation_id, event_turn_id.clone(), turn_complete_event(&event_turn_id), + /*analytics_events_client*/ None, &outgoing, &thread_state, ) @@ -4134,6 +4221,7 @@ mod tests { tool: "list_mcp_resources".to_string(), arguments: Some(serde_json::json!({"server": ""})), }, + mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()), }; let thread_id = ThreadId::new().to_string(); @@ -4154,6 +4242,7 @@ mod tests { tool: begin_event.invocation.tool, status: McpToolCallStatus::InProgress, arguments: serde_json::json!({"server": ""}), + mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()), result: None, error: None, duration_ms: None, @@ -4194,6 +4283,7 @@ mod tests { conversation_a, a_turn1.clone(), turn_complete_event(&a_turn1), + /*analytics_events_client*/ None, &outgoing, &thread_state, ) @@ -4215,6 +4305,7 @@ mod tests { conversation_b, b_turn1.clone(), turn_complete_event(&b_turn1), + /*analytics_events_client*/ None, &outgoing, &thread_state, ) @@ -4226,6 +4317,7 @@ mod tests { conversation_a, a_turn2.clone(), turn_complete_event(&a_turn2), + /*analytics_events_client*/ None, &outgoing, &thread_state, ) @@ -4291,6 +4383,7 @@ mod tests { tool: "list_mcp_resources".to_string(), arguments: None, }, + mcp_app_resource_uri: None, }; let thread_id = ThreadId::new().to_string(); @@ -4311,6 +4404,7 @@ mod tests { tool: begin_event.invocation.tool, status: McpToolCallStatus::InProgress, arguments: JsonValue::Null, + mcp_app_resource_uri: None, result: None, error: None, duration_ms: None, @@ -4342,6 +4436,7 @@ mod tests { tool: "list_mcp_resources".to_string(), arguments: Some(serde_json::json!({"server": ""})), }, + mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()), duration: Duration::from_nanos(92708), result: Ok(result), }; @@ -4364,13 +4459,14 @@ mod tests { tool: end_event.invocation.tool, status: McpToolCallStatus::Completed, arguments: serde_json::json!({"server": ""}), - result: Some(McpToolCallResult { + mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()), + result: Some(Box::new(McpToolCallResult { content, structured_content: None, meta: Some(serde_json::json!({ "ui/resourceUri": "ui://widget/list-resources.html" })), - }), + })), error: None, duration_ms: Some(0), }, @@ -4388,6 +4484,7 @@ mod tests { tool: "list_mcp_resources".to_string(), arguments: None, }, + mcp_app_resource_uri: None, duration: Duration::from_millis(1), result: Err("boom".to_string()), }; @@ -4410,6 +4507,7 @@ mod tests { tool: end_event.invocation.tool, status: McpToolCallStatus::Failed, arguments: JsonValue::Null, + mcp_app_resource_uri: None, result: None, error: Some(McpToolCallError { message: "boom".to_string(), diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 6762b9e129..6a9dfb4042 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -22,6 +22,9 @@ use chrono::DateTime; use chrono::SecondsFormat; use chrono::Utc; use codex_analytics::AnalyticsEventsClient; +use codex_analytics::AnalyticsJsonRpcError; +use codex_analytics::InputError; +use codex_analytics::TurnSteerRequestError; use codex_app_server_protocol::Account; use codex_app_server_protocol::AccountLoginCompletedNotification; use codex_app_server_protocol::AccountUpdatedNotification; @@ -36,7 +39,7 @@ use codex_app_server_protocol::CancelLoginAccountResponse; use codex_app_server_protocol::CancelLoginAccountStatus; use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::ClientResponse; -use codex_app_server_protocol::CodexErrorInfo as AppServerCodexErrorInfo; +use codex_app_server_protocol::CodexErrorInfo; use codex_app_server_protocol::CollaborationModeListParams; use codex_app_server_protocol::CollaborationModeListResponse; use codex_app_server_protocol::CommandExecParams; @@ -76,6 +79,8 @@ use codex_app_server_protocol::LoginAccountParams; use codex_app_server_protocol::LoginAccountResponse; use codex_app_server_protocol::LoginApiKeyParams; use codex_app_server_protocol::LogoutAccountResponse; +use codex_app_server_protocol::MarketplaceAddParams; +use codex_app_server_protocol::MarketplaceAddResponse; use codex_app_server_protocol::MarketplaceInterface; use codex_app_server_protocol::McpResourceReadParams; use codex_app_server_protocol::McpResourceReadResponse; @@ -87,6 +92,7 @@ use codex_app_server_protocol::McpServerStatus; use codex_app_server_protocol::McpServerStatusDetail; use codex_app_server_protocol::McpServerToolCallParams; use codex_app_server_protocol::McpServerToolCallResponse; +use codex_app_server_protocol::MemoryResetResponse; use codex_app_server_protocol::MockExperimentalMethodParams; use codex_app_server_protocol::MockExperimentalMethodResponse; use codex_app_server_protocol::ModelListParams; @@ -132,11 +138,15 @@ use codex_app_server_protocol::ThreadForkParams; use codex_app_server_protocol::ThreadForkResponse; use codex_app_server_protocol::ThreadIncrementElicitationParams; use codex_app_server_protocol::ThreadIncrementElicitationResponse; +use codex_app_server_protocol::ThreadInjectItemsParams; +use codex_app_server_protocol::ThreadInjectItemsResponse; use codex_app_server_protocol::ThreadItem; use codex_app_server_protocol::ThreadListParams; use codex_app_server_protocol::ThreadListResponse; use codex_app_server_protocol::ThreadLoadedListParams; use codex_app_server_protocol::ThreadLoadedListResponse; +use codex_app_server_protocol::ThreadMemoryModeSetParams; +use codex_app_server_protocol::ThreadMemoryModeSetResponse; use codex_app_server_protocol::ThreadMetadataGitInfoUpdateParams; use codex_app_server_protocol::ThreadMetadataUpdateParams; use codex_app_server_protocol::ThreadMetadataUpdateResponse; @@ -176,6 +186,7 @@ use codex_app_server_protocol::ThreadUnsubscribeStatus; use codex_app_server_protocol::Turn; use codex_app_server_protocol::TurnError; use codex_app_server_protocol::TurnInterruptParams; +use codex_app_server_protocol::TurnInterruptResponse; use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStatus; @@ -193,7 +204,6 @@ use codex_chatgpt::connectors; use codex_cloud_requirements::cloud_requirements_loader; use codex_config::types::McpServerTransportConfig; use codex_core::CodexThread; -use codex_core::Cursor as RolloutCursor; use codex_core::ForkSnapshot; use codex_core::NewThread; use codex_core::RolloutRecorder; @@ -201,8 +211,8 @@ use codex_core::SessionMeta; use codex_core::SteerInputError; use codex_core::ThreadConfigSnapshot; use codex_core::ThreadManager; -use codex_core::ThreadSortKey as CoreThreadSortKey; use codex_core::append_thread_name; +use codex_core::clear_memory_roots_contents; use codex_core::config::Config; use codex_core::config::ConfigOverrides; use codex_core::config::NetworkProxyAuditMetadata; @@ -213,6 +223,7 @@ use codex_core::config_loader::CloudRequirementsLoadErrorCode; use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::LoaderOverrides; use codex_core::config_loader::load_config_layers_state; +use codex_core::config_loader::project_trust_key; use codex_core::exec::ExecCapturePolicy; use codex_core::exec::ExecExpiration; use codex_core::exec::ExecParams; @@ -221,24 +232,25 @@ use codex_core::find_archived_thread_path_by_id_str; use codex_core::find_thread_name_by_id; use codex_core::find_thread_names_by_ids; use codex_core::find_thread_path_by_id_str; -use codex_core::parse_cursor; use codex_core::path_utils; -use codex_core::plugins::MarketplaceError; -use codex_core::plugins::MarketplacePluginSource; +use codex_core::plugins::MarketplaceAddError; use codex_core::plugins::OPENAI_CURATED_MARKETPLACE_NAME; use codex_core::plugins::PluginInstallError as CorePluginInstallError; use codex_core::plugins::PluginInstallRequest; use codex_core::plugins::PluginReadRequest; use codex_core::plugins::PluginUninstallError as CorePluginUninstallError; -use codex_core::plugins::load_plugin_apps; -use codex_core::plugins::load_plugin_mcp_servers; +use codex_core::plugins::add_marketplace as add_marketplace_to_codex_home; use codex_core::read_head_for_summary; use codex_core::read_session_meta_line; -use codex_core::rollout_date_parts; use codex_core::sandboxing::SandboxPermissions; use codex_core::windows_sandbox::WindowsSandboxLevelExt; use codex_core::windows_sandbox::WindowsSandboxSetupMode as CoreWindowsSandboxSetupMode; use codex_core::windows_sandbox::WindowsSandboxSetupRequest; +use codex_core_plugins::loader::load_plugin_apps; +use codex_core_plugins::loader::load_plugin_mcp_servers; +use codex_core_plugins::manifest::PluginManifestInterface; +use codex_core_plugins::marketplace::MarketplaceError; +use codex_core_plugins::marketplace::MarketplacePluginSource; use codex_exec_server::LOCAL_FS; use codex_features::FEATURES; use codex_features::Feature; @@ -309,15 +321,20 @@ use codex_state::StateRuntime; use codex_state::ThreadMetadata; use codex_state::ThreadMetadataBuilder; use codex_state::log_db::LogDbLayer; +use codex_thread_store::ArchiveThreadParams as StoreArchiveThreadParams; +use codex_thread_store::ListThreadsParams as StoreListThreadsParams; +use codex_thread_store::LocalThreadStore; +use codex_thread_store::ReadThreadParams as StoreReadThreadParams; +use codex_thread_store::StoredThread; +use codex_thread_store::ThreadSortKey as StoreThreadSortKey; +use codex_thread_store::ThreadStore; +use codex_thread_store::ThreadStoreError; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_json_to_toml::json_to_toml; use codex_utils_pty::DEFAULT_OUTPUT_BYTES_CAP; use std::collections::BTreeMap; use std::collections::HashMap; use std::collections::HashSet; -use std::ffi::OsStr; -use std::fs::FileTimes; -use std::fs::OpenOptions; use std::io::Error as IoError; use std::path::Path; use std::path::PathBuf; @@ -326,7 +343,7 @@ use std::sync::RwLock; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::time::Duration; -use std::time::SystemTime; +use std::time::Instant; use tokio::sync::Mutex; use tokio::sync::broadcast; use tokio::sync::oneshot; @@ -346,12 +363,17 @@ use codex_app_server_protocol::ServerRequest; mod apps_list_helpers; mod plugin_app_helpers; mod plugin_mcp_oauth; +mod token_usage_replay; use crate::filters::compute_source_filters; use crate::filters::source_kind_matches; use crate::thread_state::ThreadListenerCommand; use crate::thread_state::ThreadState; use crate::thread_state::ThreadStateManager; +use token_usage_replay::latest_token_usage_turn_id_for_thread_path; +use token_usage_replay::latest_token_usage_turn_id_from_rollout_items; +use token_usage_replay::latest_token_usage_turn_id_from_rollout_path; +use token_usage_replay::send_thread_token_usage_update_to_connection; const THREAD_LIST_DEFAULT_LIMIT: usize = 25; const THREAD_LIST_MAX_LIMIT: usize = 100; @@ -368,6 +390,7 @@ struct ThreadListFilters { const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60); const LOGIN_ISSUER_OVERRIDE_ENV_VAR: &str = "CODEX_APP_SERVER_LOGIN_ISSUER"; const APP_LIST_LOAD_TIMEOUT: Duration = Duration::from_secs(90); +const THREAD_UNLOADING_DELAY: Duration = Duration::from_secs(30 * 60); enum ActiveLogin { Browser { @@ -429,6 +452,7 @@ pub(crate) struct CodexMessageProcessor { analytics_events_client: AnalyticsEventsClient, arg0_paths: Arg0DispatchPaths, config: Arc, + thread_store: LocalThreadStore, cli_overrides: Arc>>, runtime_feature_enablement: Arc>>, cloud_requirements: Arc>, @@ -457,6 +481,7 @@ struct ListenerTaskContext { thread_manager: Arc, thread_state_manager: ThreadStateManager, outgoing: Arc, + pending_thread_unloads: Arc>>, analytics_events_client: AnalyticsEventsClient, general_analytics_enabled: bool, thread_watch_manager: ThreadWatchManager, @@ -477,6 +502,110 @@ enum RefreshTokenRequestOutcome { FailedPermanently, } +struct UnloadingState { + delay: Duration, + has_subscribers_rx: watch::Receiver, + has_subscribers: (bool, Instant), + thread_status_rx: watch::Receiver, + is_active: (bool, Instant), +} + +impl UnloadingState { + async fn new( + listener_task_context: &ListenerTaskContext, + thread_id: ThreadId, + delay: Duration, + ) -> Option { + let has_subscribers_rx = listener_task_context + .thread_state_manager + .subscribe_to_has_connections(thread_id) + .await?; + let thread_status_rx = listener_task_context + .thread_watch_manager + .subscribe(thread_id) + .await?; + let has_subscribers = (*has_subscribers_rx.borrow(), Instant::now()); + let is_active = ( + matches!(*thread_status_rx.borrow(), ThreadStatus::Active { .. }), + Instant::now(), + ); + Some(Self { + delay, + has_subscribers_rx, + thread_status_rx, + has_subscribers, + is_active, + }) + } + + fn unloading_target(&self) -> Option { + match (self.has_subscribers, self.is_active) { + ((false, has_no_subscribers_since), (false, is_inactive_since)) => { + Some(std::cmp::max(has_no_subscribers_since, is_inactive_since) + self.delay) + } + _ => None, + } + } + + fn sync_receiver_values(&mut self) { + let has_subscribers = *self.has_subscribers_rx.borrow(); + if self.has_subscribers.0 != has_subscribers { + self.has_subscribers = (has_subscribers, Instant::now()); + } + + let is_active = matches!(*self.thread_status_rx.borrow(), ThreadStatus::Active { .. }); + if self.is_active.0 != is_active { + self.is_active = (is_active, Instant::now()); + } + } + + fn should_unload_now(&mut self) -> bool { + self.sync_receiver_values(); + self.unloading_target() + .is_some_and(|target| target <= Instant::now()) + } + + fn note_thread_activity_observed(&mut self) { + if !self.is_active.0 { + self.is_active = (false, Instant::now()); + } + } + + async fn wait_for_unloading_trigger(&mut self) -> bool { + loop { + self.sync_receiver_values(); + let unloading_target = self.unloading_target(); + if let Some(target) = unloading_target + && target <= Instant::now() + { + return true; + } + let unloading_sleep = async { + if let Some(target) = unloading_target { + tokio::time::sleep_until(target.into()).await; + } else { + futures::future::pending::<()>().await; + } + }; + tokio::select! { + _ = unloading_sleep => return true, + changed = self.has_subscribers_rx.changed() => { + if changed.is_err() { + return false; + } + self.sync_receiver_values(); + }, + changed = self.thread_status_rx.changed() => { + if changed.is_err() { + return false; + } + self.sync_receiver_values(); + }, + } + } + } +} + pub(crate) struct CodexMessageProcessorArgs { pub(crate) auth_manager: Arc, pub(crate) thread_manager: Arc, @@ -492,21 +621,10 @@ pub(crate) struct CodexMessageProcessorArgs { } impl CodexMessageProcessor { - async fn instruction_sources_from_config(config: &Config) -> Vec { - let mut paths: Vec = config.user_instructions_path.iter().cloned().collect(); - match codex_core::discover_project_doc_paths(config, LOCAL_FS.as_ref()).await { - Ok(project_doc_paths) => { - paths.extend( - project_doc_paths - .into_iter() - .map(|path| path.as_path().to_path_buf()), - ); - } - Err(err) => { - tracing::warn!(error = %err, "failed to discover project docs for thread response"); - } - } - paths + async fn instruction_sources_from_config(config: &Config) -> Vec { + codex_core::AgentsMdManager::new(config) + .instruction_sources(LOCAL_FS.as_ref()) + .await } pub(crate) fn handle_config_mutation(&self) { @@ -526,6 +644,22 @@ impl CodexMessageProcessor { } } + fn track_error_response( + &self, + request_id: &ConnectionRequestId, + error: &JSONRPCErrorError, + error_type: Option, + ) { + if self.config.features.enabled(Feature::GeneralAnalytics) { + self.analytics_events_client.track_error_response( + request_id.connection_id.0, + request_id.request_id.clone(), + error.clone(), + error_type, + ); + } + } + async fn load_thread( &self, thread_id: &str, @@ -569,6 +703,7 @@ impl CodexMessageProcessor { outgoing: outgoing.clone(), analytics_events_client, arg0_paths, + thread_store: LocalThreadStore::new(codex_rollout::RolloutConfig::from_view(&config)), config, cli_overrides, runtime_feature_enablement, @@ -772,6 +907,14 @@ impl CodexMessageProcessor { self.thread_metadata_update(to_connection_request_id(request_id), params) .await; } + ClientRequest::ThreadMemoryModeSet { request_id, params } => { + self.thread_memory_mode_set(to_connection_request_id(request_id), params) + .await; + } + ClientRequest::MemoryReset { request_id, params } => { + self.memory_reset(to_connection_request_id(request_id), params) + .await; + } ClientRequest::ThreadUnarchive { request_id, params } => { self.thread_unarchive(to_connection_request_id(request_id), params) .await; @@ -811,6 +954,10 @@ impl CodexMessageProcessor { self.skills_list(to_connection_request_id(request_id), params) .await; } + ClientRequest::MarketplaceAdd { request_id, params } => { + self.marketplace_add(to_connection_request_id(request_id), params) + .await; + } ClientRequest::PluginList { request_id, params } => { self.plugin_list(to_connection_request_id(request_id), params) .await; @@ -844,6 +991,10 @@ impl CodexMessageProcessor { ) .await; } + ClientRequest::ThreadInjectItems { request_id, params } => { + self.thread_inject_items(to_connection_request_id(request_id), params) + .await; + } ClientRequest::TurnSteer { request_id, params } => { self.turn_steer(to_connection_request_id(request_id), params) .await; @@ -1158,7 +1309,7 @@ impl CodexMessageProcessor { let opts = LoginServerOptions { open_browser: false, ..LoginServerOptions::new( - config.codex_home.clone(), + config.codex_home.to_path_buf(), CLIENT_ID.to_string(), config.forced_chatgpt_workspace_id.clone(), config.cli_auth_credentials_store_mode, @@ -1220,7 +1371,7 @@ impl CodexMessageProcessor { let auth_manager = self.auth_manager.clone(); let cloud_requirements = self.cloud_requirements.clone(); let chatgpt_base_url = self.config.chatgpt_base_url.clone(); - let codex_home = self.config.codex_home.clone(); + let codex_home = self.config.codex_home.to_path_buf(); let cli_overrides = self.current_cli_overrides(); let auth_url = server.auth_url.clone(); tokio::spawn(async move { @@ -1337,7 +1488,7 @@ impl CodexMessageProcessor { let auth_manager = self.auth_manager.clone(); let cloud_requirements = self.cloud_requirements.clone(); let chatgpt_base_url = self.config.chatgpt_base_url.clone(); - let codex_home = self.config.codex_home.clone(); + let codex_home = self.config.codex_home.to_path_buf(); let cli_overrides = self.current_cli_overrides(); tokio::spawn(async move { let (success, error_msg) = tokio::select! { @@ -1509,7 +1660,7 @@ impl CodexMessageProcessor { self.cloud_requirements.as_ref(), self.auth_manager.clone(), self.config.chatgpt_base_url.clone(), - self.config.codex_home.clone(), + self.config.codex_home.to_path_buf(), ); let cli_overrides = self.current_cli_overrides(); sync_default_client_residency_requirement(&cli_overrides, self.cloud_requirements.as_ref()) @@ -2015,7 +2166,7 @@ impl CodexMessageProcessor { &effective_policy, &effective_file_system_sandbox_policy, effective_network_sandbox_policy, - sandbox_cwd.as_path(), + &sandbox_cwd, &codex_linux_sandbox_exe, use_legacy_landlock, ) { @@ -2142,11 +2293,12 @@ impl CodexMessageProcessor { thread_manager: Arc::clone(&self.thread_manager), thread_state_manager: self.thread_state_manager.clone(), outgoing: Arc::clone(&self.outgoing), + pending_thread_unloads: Arc::clone(&self.pending_thread_unloads), analytics_events_client: self.analytics_events_client.clone(), general_analytics_enabled: self.config.features.enabled(Feature::GeneralAnalytics), thread_watch_manager: self.thread_watch_manager.clone(), fallback_model_provider: self.config.model_provider_id.clone(), - codex_home: self.config.codex_home.clone(), + codex_home: self.config.codex_home.to_path_buf(), }; let request_trace = request_context.request_trace(); let runtime_feature_enablement = self.current_runtime_feature_enablement(); @@ -2289,27 +2441,45 @@ impl CodexMessageProcessor { | codex_protocol::protocol::SandboxPolicy::ExternalSandbox { .. } )) { - let trust_target = resolve_root_git_project_for_trust(config.cwd.as_path()) - .unwrap_or_else(|| config.cwd.to_path_buf()); - if let Err(err) = codex_core::config::set_project_trust_level( - &listener_task_context.codex_home, - trust_target.as_path(), - TrustLevel::Trusted, - ) { - let error = JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to persist trusted project state: {err}"), - data: None, - }; - listener_task_context - .outgoing - .send_error(request_id, error) - .await; - return; - } + let trust_target = resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &config.cwd) + .await + .unwrap_or_else(|| config.cwd.clone()); + let cli_overrides_with_trust; + let cli_overrides_for_reload = if let Err(err) = + codex_core::config::set_project_trust_level( + &listener_task_context.codex_home, + trust_target.as_path(), + TrustLevel::Trusted, + ) { + warn!( + "failed to persist trusted project state for {}; continuing with in-memory trust for this thread: {err}", + trust_target.display() + ); + let mut project = toml::map::Map::new(); + project.insert( + "trust_level".to_string(), + TomlValue::String("trusted".to_string()), + ); + let mut projects = toml::map::Map::new(); + projects.insert( + project_trust_key(trust_target.as_path()), + TomlValue::Table(project), + ); + cli_overrides_with_trust = cli_overrides + .iter() + .cloned() + .chain(std::iter::once(( + "projects".to_string(), + TomlValue::Table(projects), + ))) + .collect::>(); + cli_overrides_with_trust.as_slice() + } else { + &cli_overrides + }; config = match derive_config_from_params( - &cli_overrides, + cli_overrides_for_reload, config_overrides, typesafe_overrides, &cloud_requirements, @@ -2547,7 +2717,6 @@ impl CodexMessageProcessor { } async fn thread_archive(&self, request_id: ConnectionRequestId, params: ThreadArchiveParams) { - // TODO(jif) mostly rewrite this using sqlite after phase 1 let thread_id = match ThreadId::from_string(¶ms.thread_id) { Ok(id) => id, Err(err) => { @@ -2561,32 +2730,28 @@ impl CodexMessageProcessor { } }; - let rollout_path = - match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string()).await - { - Ok(Some(p)) => p, - Ok(None) => { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!("no rollout found for thread id {thread_id}"), - data: None, - }; - self.outgoing.send_error(request_id, error).await; - return; - } - Err(err) => { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!("failed to locate thread id {thread_id}: {err}"), - data: None, - }; - self.outgoing.send_error(request_id, error).await; - return; - } - }; - let thread_id_str = thread_id.to_string(); - match self.archive_thread_common(thread_id, &rollout_path).await { + if let Err(err) = self + .thread_store + .read_thread(StoreReadThreadParams { + thread_id, + include_archived: false, + include_history: false, + }) + .await + { + self.outgoing + .send_error(request_id, thread_store_archive_error("archive", err)) + .await; + return; + } + self.prepare_thread_for_archive(thread_id).await; + + match self + .thread_store + .archive_thread(StoreArchiveThreadParams { thread_id }) + .await + { Ok(()) => { let response = ThreadArchiveResponse {}; self.outgoing.send_response(request_id, response).await; @@ -2598,7 +2763,9 @@ impl CodexMessageProcessor { .await; } Err(err) => { - self.outgoing.send_error(request_id, err).await; + self.outgoing + .send_error(request_id, thread_store_archive_error("archive", err)) + .await; } } } @@ -2772,6 +2939,162 @@ impl CodexMessageProcessor { .await; } + async fn thread_memory_mode_set( + &self, + request_id: ConnectionRequestId, + params: ThreadMemoryModeSetParams, + ) { + let ThreadMemoryModeSetParams { thread_id, mode } = params; + let thread_id = match ThreadId::from_string(&thread_id) { + Ok(id) => id, + Err(err) => { + self.send_invalid_request_error(request_id, format!("invalid thread id: {err}")) + .await; + return; + } + }; + + if let Ok(thread) = self.thread_manager.get_thread(thread_id).await { + if thread.config_snapshot().await.ephemeral { + self.send_invalid_request_error( + request_id, + format!("ephemeral thread does not support memory mode updates: {thread_id}"), + ) + .await; + return; + } + + if let Err(err) = thread.set_thread_memory_mode(mode.to_core()).await { + self.send_internal_error( + request_id, + format!("failed to set thread memory mode: {err}"), + ) + .await; + return; + } + + self.outgoing + .send_response(request_id, ThreadMemoryModeSetResponse {}) + .await; + return; + } + + let rollout_path = + match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string()).await + { + Ok(Some(path)) => Some(path), + Ok(None) => None, + Err(err) => { + self.send_invalid_request_error( + request_id, + format!("failed to locate thread id {thread_id}: {err}"), + ) + .await; + return; + } + }; + + let Some(rollout_path) = rollout_path else { + self.send_invalid_request_error(request_id, format!("thread not found: {thread_id}")) + .await; + return; + }; + + let mut session_meta = match read_session_meta_line(rollout_path.as_path()).await { + Ok(session_meta) => session_meta, + Err(err) => { + self.send_internal_error( + request_id, + format!("failed to set thread memory mode: {err}"), + ) + .await; + return; + } + }; + if session_meta.meta.id != thread_id { + self.send_internal_error( + request_id, + format!( + "failed to set thread memory mode: rollout session metadata id mismatch: expected {thread_id}, found {}", + session_meta.meta.id + ), + ) + .await; + return; + } + session_meta.meta.memory_mode = Some(mode.as_str().to_string()); + let item = RolloutItem::SessionMeta(session_meta); + + if let Err(err) = append_rollout_item_to_path(rollout_path.as_path(), &item).await { + self.send_internal_error( + request_id, + format!("failed to set thread memory mode: {err}"), + ) + .await; + return; + } + + let state_db_ctx = open_state_db_for_direct_thread_lookup(&self.config).await; + reconcile_rollout( + state_db_ctx.as_deref(), + rollout_path.as_path(), + self.config.model_provider_id.as_str(), + /*builder*/ None, + &[], + /*archived_only*/ None, + /*new_thread_memory_mode*/ None, + ) + .await; + + self.outgoing + .send_response(request_id, ThreadMemoryModeSetResponse {}) + .await; + } + + async fn memory_reset(&self, request_id: ConnectionRequestId, _params: Option<()>) { + let state_db = match StateRuntime::init( + self.config.sqlite_home.clone(), + self.config.model_provider_id.clone(), + ) + .await + { + Ok(state_db) => state_db, + Err(err) => { + self.send_internal_error( + request_id, + format!("failed to open state db for memory reset: {err}"), + ) + .await; + return; + } + }; + + if let Err(err) = state_db.clear_memory_data().await { + self.send_internal_error( + request_id, + format!("failed to clear memory rows in state db: {err}"), + ) + .await; + return; + } + + if let Err(err) = clear_memory_roots_contents(&self.config.codex_home).await { + self.send_internal_error( + request_id, + format!( + "failed to clear memory directories under {}: {err}", + self.config.codex_home.display() + ), + ) + .await; + return; + } + + self.outgoing + .send_response(request_id, MemoryResetResponse {}) + .await; + } + async fn thread_metadata_update( &self, request_id: ConnectionRequestId, @@ -2924,7 +3247,7 @@ impl CodexMessageProcessor { return; }; - let mut thread = summary_to_thread(summary); + let mut thread = summary_to_thread(summary, &self.config.cwd); self.attach_thread_name(thread_uuid, &mut thread).await; thread.status = resolve_thread_status( self.thread_watch_manager @@ -3007,7 +3330,7 @@ impl CodexMessageProcessor { config_snapshot.session_source.clone(), ); builder.model_provider = Some(model_provider.clone()); - builder.cwd = config_snapshot.cwd.clone(); + builder.cwd = config_snapshot.cwd.to_path_buf(); builder.cli_version = Some(env!("CARGO_PKG_VERSION").to_string()); builder.sandbox_policy = config_snapshot.sandbox_policy.clone(); builder.approval_mode = config_snapshot.approval_policy; @@ -3075,7 +3398,6 @@ impl CodexMessageProcessor { request_id: ConnectionRequestId, params: ThreadUnarchiveParams, ) { - // TODO(jif) mostly rewrite this using sqlite after phase 1 let thread_id = match ThreadId::from_string(¶ms.thread_id) { Ok(id) => id, Err(err) => { @@ -3089,152 +3411,21 @@ impl CodexMessageProcessor { } }; - let archived_path = match find_archived_thread_path_by_id_str( - &self.config.codex_home, - &thread_id.to_string(), - ) - .await - { - Ok(Some(path)) => path, - Ok(None) => { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!("no archived rollout found for thread id {thread_id}"), - data: None, - }; - self.outgoing.send_error(request_id, error).await; - return; - } - Err(err) => { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!("failed to locate archived thread id {thread_id}: {err}"), - data: None, - }; - self.outgoing.send_error(request_id, error).await; - return; - } - }; - - let rollout_path_display = archived_path.display().to_string(); let fallback_provider = self.config.model_provider_id.clone(); - let state_db_ctx = get_state_db(&self.config).await; - let archived_folder = self - .config - .codex_home - .join(codex_core::ARCHIVED_SESSIONS_SUBDIR); - - let result: Result = async { - let canonical_archived_dir = tokio::fs::canonicalize(&archived_folder).await.map_err( - |err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!( - "failed to unarchive thread: unable to resolve archived directory: {err}" - ), - data: None, - }, - )?; - let canonical_rollout_path = tokio::fs::canonicalize(&archived_path).await; - let canonical_rollout_path = if let Ok(path) = canonical_rollout_path - && path.starts_with(&canonical_archived_dir) - { - path - } else { - return Err(JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!( - "rollout path `{rollout_path_display}` must be in archived directory" - ), - data: None, - }); - }; - - let required_suffix = format!("{thread_id}.jsonl"); - let Some(file_name) = canonical_rollout_path.file_name().map(OsStr::to_owned) else { - return Err(JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!("rollout path `{rollout_path_display}` missing file name"), - data: None, - }); - }; - if !file_name - .to_string_lossy() - .ends_with(required_suffix.as_str()) - { - return Err(JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!( - "rollout path `{rollout_path_display}` does not match thread id {thread_id}" - ), - data: None, - }); - } - - let Some((year, month, day)) = rollout_date_parts(&file_name) else { - return Err(JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!( - "rollout path `{rollout_path_display}` missing filename timestamp" - ), - data: None, - }); - }; - - let sessions_folder = self.config.codex_home.join(codex_core::SESSIONS_SUBDIR); - let dest_dir = sessions_folder.join(year).join(month).join(day); - let restored_path = dest_dir.join(&file_name); - tokio::fs::create_dir_all(&dest_dir) - .await - .map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to unarchive thread: {err}"), - data: None, - })?; - tokio::fs::rename(&canonical_rollout_path, &restored_path) - .await - .map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to unarchive thread: {err}"), - data: None, - })?; - tokio::task::spawn_blocking({ - let restored_path = restored_path.clone(); - move || -> std::io::Result<()> { - let times = FileTimes::new().set_modified(SystemTime::now()); - OpenOptions::new() - .append(true) - .open(&restored_path)? - .set_times(times)?; - Ok(()) - } - }) + let result = self + .thread_store + .unarchive_thread(StoreArchiveThreadParams { thread_id }) .await - .map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to update unarchived thread timestamp: {err}"), - data: None, - })? - .map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to update unarchived thread timestamp: {err}"), - data: None, - })?; - if let Some(ctx) = state_db_ctx { - let _ = ctx - .mark_unarchived(thread_id, restored_path.as_path()) - .await; - } - let summary = - read_summary_from_rollout(restored_path.as_path(), fallback_provider.as_str()) - .await - .map_err(|err| JSONRPCErrorError { + .map_err(|err| thread_store_archive_error("unarchive", err)) + .and_then(|stored_thread| { + summary_from_stored_thread(stored_thread, fallback_provider.as_str()) + .map(|summary| summary_to_thread(summary, &self.config.cwd)) + .ok_or_else(|| JSONRPCErrorError { code: INTERNAL_ERROR_CODE, - message: format!("failed to read unarchived thread: {err}"), + message: format!("failed to read unarchived thread {thread_id}"), data: None, - })?; - Ok(summary_to_thread(summary)) - } - .await; + }) + }); match result { Ok(mut thread) => { @@ -3460,15 +3651,15 @@ impl CodexMessageProcessor { .map(|value| value as usize) .unwrap_or(THREAD_LIST_DEFAULT_LIMIT) .clamp(1, THREAD_LIST_MAX_LIMIT); - let core_sort_key = match sort_key.unwrap_or(ThreadSortKey::CreatedAt) { - ThreadSortKey::CreatedAt => CoreThreadSortKey::CreatedAt, - ThreadSortKey::UpdatedAt => CoreThreadSortKey::UpdatedAt, + let store_sort_key = match sort_key.unwrap_or(ThreadSortKey::CreatedAt) { + ThreadSortKey::CreatedAt => StoreThreadSortKey::CreatedAt, + ThreadSortKey::UpdatedAt => StoreThreadSortKey::UpdatedAt, }; let (summaries, next_cursor) = match self .list_threads_common( requested_page_size, cursor, - core_sort_key, + store_sort_key, ThreadListFilters { model_providers, source_kinds, @@ -3493,7 +3684,7 @@ impl CodexMessageProcessor { let conversation_id = summary.conversation_id; thread_ids.insert(conversation_id); - let thread = summary_to_thread(summary); + let thread = summary_to_thread(summary, &self.config.cwd); status_ids.push(thread.id.clone()); threads.push((conversation_id, thread)); } @@ -3637,11 +3828,11 @@ impl CodexMessageProcessor { } let mut thread = if let Some(summary) = db_summary { - summary_to_thread(summary) + summary_to_thread(summary, &self.config.cwd) } else if let Some(rollout_path) = rollout_path.as_ref() { let fallback_provider = self.config.model_provider_id.as_str(); match read_summary_from_rollout(rollout_path, fallback_provider).await { - Ok(summary) => summary_to_thread(summary), + Ok(summary) => summary_to_thread(summary, &self.config.cwd), Err(err) => { self.send_internal_error( request_id, @@ -3748,17 +3939,17 @@ impl CodexMessageProcessor { self.command_exec_manager .connection_closed(connection_id) .await; - let thread_ids_with_no_subscribers = self + let thread_ids = self .thread_state_manager .remove_connection(connection_id) .await; - for thread_id in thread_ids_with_no_subscribers { - let Ok(thread) = self.thread_manager.get_thread(thread_id).await else { + + for thread_id in thread_ids { + if self.thread_manager.get_thread(thread_id).await.is_err() { + // Reconcile stale app-server bookkeeping when the thread has already been + // removed from the core manager. self.finalize_thread_teardown(thread_id).await; - continue; - }; - self.unload_thread_without_subscribers(thread_id, thread) - .await; + } } } @@ -3917,7 +4108,7 @@ impl CodexMessageProcessor { { Ok(NewThread { thread_id, - thread, + thread: codex_thread, session_configured, }) => { let SessionConfiguredEvent { rollout_path, .. } = session_configured; @@ -3946,7 +4137,7 @@ impl CodexMessageProcessor { let mut thread = match self .load_thread_from_resume_source_or_send_internal( thread_id, - thread.as_ref(), + codex_thread.as_ref(), &response_history, rollout_path.as_path(), fallback_model_provider.as_str(), @@ -3998,7 +4189,25 @@ impl CodexMessageProcessor { ); } + let connection_id = request_id.connection_id; + let token_usage_thread = response.thread.clone(); + let token_usage_turn_id = latest_token_usage_turn_id_from_rollout_items( + &response_history.get_rollout_items(), + &token_usage_thread, + ); self.outgoing.send_response(request_id, response).await; + // The client needs restored usage before it starts another turn. + // Sending after the response preserves JSON-RPC request ordering while + // still filling the status line before the next turn lifecycle begins. + send_thread_token_usage_update_to_connection( + &self.outgoing, + connection_id, + thread_id, + &token_usage_thread, + codex_thread.as_ref(), + token_usage_turn_id, + ) + .await; } Err(err) => { let error = JSONRPCErrorError { @@ -4124,13 +4333,18 @@ impl CodexMessageProcessor { .thread_state_manager .thread_state(existing_thread_id) .await; - self.ensure_listener_task_running( - existing_thread_id, - existing_thread.clone(), - thread_state.clone(), - ApiVersion::V2, - ) - .await; + if let Err(error) = self + .ensure_listener_task_running( + existing_thread_id, + existing_thread.clone(), + thread_state.clone(), + ApiVersion::V2, + ) + .await + { + self.outgoing.send_error(request_id, error).await; + return true; + } let config_snapshot = existing_thread.config_snapshot().await; let mismatch_details = collect_resume_override_mismatches(params, &config_snapshot); @@ -4142,9 +4356,7 @@ impl CodexMessageProcessor { ); } let mut config_for_instruction_sources = self.config.as_ref().clone(); - if let Ok(cwd) = AbsolutePathBuf::try_from(config_snapshot.cwd.clone()) { - config_for_instruction_sources.cwd = cwd; - } + config_for_instruction_sources.cwd = config_snapshot.cwd.clone(); let instruction_sources = Self::instruction_sources_from_config(&config_for_instruction_sources).await; let thread_summary = match load_thread_summary_for_rollout( @@ -4527,7 +4739,7 @@ impl CodexMessageProcessor { .await { Ok(summary) => { - let mut thread = summary_to_thread(summary); + let mut thread = summary_to_thread(summary, &self.config.cwd); thread.forked_from_id = forked_from_id_from_rollout(fork_rollout_path.as_path()).await; thread @@ -4631,7 +4843,31 @@ impl CodexMessageProcessor { ); } + let connection_id = request_id.connection_id; + let token_usage_thread = response.thread.clone(); + let token_usage_turn_id = if let Some(turn_id) = + latest_token_usage_turn_id_for_thread_path(&token_usage_thread).await + { + Some(turn_id) + } else { + latest_token_usage_turn_id_from_rollout_path( + rollout_path.as_path(), + &token_usage_thread, + ) + .await + }; self.outgoing.send_response(request_id, response).await; + // Mirror the resume contract for forks: the new thread is usable as soon + // as the response arrives, so restored usage must follow immediately. + send_thread_token_usage_update_to_connection( + &self.outgoing, + connection_id, + thread_id, + &token_usage_thread, + forked_thread.as_ref(), + token_usage_turn_id, + ) + .await; let notif = ThreadStartedNotification { thread }; self.outgoing @@ -4656,7 +4892,7 @@ impl CodexMessageProcessor { let path = match params { GetConversationSummaryParams::RolloutPath { rollout_path } => { if rollout_path.is_relative() { - self.config.codex_home.join(&rollout_path) + self.config.codex_home.join(&rollout_path).to_path_buf() } else { rollout_path } @@ -4709,7 +4945,7 @@ impl CodexMessageProcessor { &self, requested_page_size: usize, cursor: Option, - sort_key: CoreThreadSortKey, + sort_key: StoreThreadSortKey, filters: ThreadListFilters, ) -> Result<(Vec, Option), JSONRPCErrorError> { let ThreadListFilters { @@ -4719,16 +4955,7 @@ impl CodexMessageProcessor { cwd, search_term, } = filters; - let mut cursor_obj: Option = match cursor.as_ref() { - Some(cursor_str) => { - Some(parse_cursor(cursor_str).ok_or_else(|| JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!("invalid cursor: {cursor_str}"), - data: None, - })?) - } - None => None, - }; + let mut cursor_obj = cursor; let mut last_cursor = cursor_obj.clone(); let mut remaining = requested_page_size; let mut items = Vec::with_capacity(requested_page_size); @@ -4747,54 +4974,26 @@ impl CodexMessageProcessor { let fallback_provider = self.config.model_provider_id.clone(); let (allowed_sources_vec, source_kind_filter) = compute_source_filters(source_kinds); let allowed_sources = allowed_sources_vec.as_slice(); - let state_db_ctx = get_state_db(&self.config).await; while remaining > 0 { let page_size = remaining.min(THREAD_LIST_MAX_LIMIT); - let page = if archived { - RolloutRecorder::list_archived_threads( - &self.config, + let page = self + .thread_store + .list_threads(StoreListThreadsParams { page_size, - cursor_obj.as_ref(), + cursor: cursor_obj.clone(), sort_key, - allowed_sources, - model_provider_filter.as_deref(), - fallback_provider.as_str(), - search_term.as_deref(), - ) + allowed_sources: allowed_sources.to_vec(), + model_providers: model_provider_filter.clone(), + archived, + search_term: search_term.clone(), + }) .await - .map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to list threads: {err}"), - data: None, - })? - } else { - RolloutRecorder::list_threads( - &self.config, - page_size, - cursor_obj.as_ref(), - sort_key, - allowed_sources, - model_provider_filter.as_deref(), - fallback_provider.as_str(), - search_term.as_deref(), - ) - .await - .map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to list threads: {err}"), - data: None, - })? - }; + .map_err(thread_store_list_error)?; let mut filtered = Vec::with_capacity(page.items.len()); for it in page.items { - let Some(summary) = summary_from_thread_list_item( - it, - fallback_provider.as_str(), - state_db_ctx.as_ref(), - ) - .await + let Some(summary) = summary_from_stored_thread(it, fallback_provider.as_str()) else { continue; }; @@ -4814,12 +5013,8 @@ impl CodexMessageProcessor { items.extend(filtered); remaining = requested_page_size.saturating_sub(items.len()); - // Encode RolloutCursor into the JSON-RPC string form returned to clients. let next_cursor_value = page.next_cursor.clone(); - next_cursor = next_cursor_value - .as_ref() - .and_then(|cursor| serde_json::to_value(cursor).ok()) - .and_then(|value| value.as_str().map(str::to_owned)); + next_cursor = next_cursor_value.clone(); if remaining == 0 { break; } @@ -5065,7 +5260,11 @@ impl CodexMessageProcessor { &self, config: &Config, ) -> Result<(), JSONRPCErrorError> { - let configured_servers = self.thread_manager.mcp_manager().configured_servers(config); + let configured_servers = self + .thread_manager + .mcp_manager() + .configured_servers(config) + .await; let mcp_servers = match serde_json::to_value(configured_servers) { Ok(value) => value, Err(err) => { @@ -5125,7 +5324,8 @@ impl CodexMessageProcessor { let configured_servers = self .thread_manager .mcp_manager() - .configured_servers(&config); + .configured_servers(&config) + .await; let Some(server) = configured_servers.get(&name) else { let error = JSONRPCErrorError { code: INVALID_REQUEST_ERROR_CODE, @@ -5227,7 +5427,9 @@ impl CodexMessageProcessor { return; } }; - let mcp_config = config.to_mcp_config(self.thread_manager.plugins_manager().as_ref()); + let mcp_config = config + .to_mcp_config(self.thread_manager.plugins_manager().as_ref()) + .await; let auth = self.auth_manager.auth().await; tokio::spawn(async move { @@ -5517,31 +5719,23 @@ impl CodexMessageProcessor { } async fn unload_thread_without_subscribers( - &self, + thread_manager: Arc, + outgoing: Arc, + pending_thread_unloads: Arc>>, + thread_state_manager: ThreadStateManager, + thread_watch_manager: ThreadWatchManager, thread_id: ThreadId, thread: Arc, ) { - // This connection was the last subscriber. Only now do we unload the thread. - info!("thread {thread_id} has no subscribers; shutting down"); - let should_start_unload_task = self.pending_thread_unloads.lock().await.insert(thread_id); + info!("thread {thread_id} has no subscribers and is idle; shutting down"); // Any pending app-server -> client requests for this thread can no longer be // answered; cancel their callbacks before shutdown/unload. - self.outgoing + outgoing .cancel_requests_for_thread(thread_id, /*error*/ None) .await; - self.thread_state_manager - .remove_thread_state(thread_id) - .await; + thread_state_manager.remove_thread_state(thread_id).await; - if !should_start_unload_task { - return; - } - - let outgoing = self.outgoing.clone(); - let pending_thread_unloads = self.pending_thread_unloads.clone(); - let thread_manager = self.thread_manager.clone(); - let thread_watch_manager = self.thread_watch_manager.clone(); tokio::spawn(async move { match Self::wait_for_thread_shutdown(&thread).await { ThreadShutdownResult::Complete => { @@ -5590,7 +5784,7 @@ impl CodexMessageProcessor { } }; - let Ok(thread) = self.thread_manager.get_thread(thread_id).await else { + if self.thread_manager.get_thread(thread_id).await.is_err() { // Reconcile stale app-server bookkeeping when the thread has already been // removed from the core manager. This keeps loaded-status/subscription state // consistent with the source of truth before reporting NotLoaded. @@ -5610,103 +5804,21 @@ impl CodexMessageProcessor { .thread_state_manager .unsubscribe_connection_from_thread(thread_id, request_id.connection_id) .await; - if !was_subscribed { - self.outgoing - .send_response( - request_id, - ThreadUnsubscribeResponse { - status: ThreadUnsubscribeStatus::NotSubscribed, - }, - ) - .await; - return; - } - - if !self.thread_state_manager.has_subscribers(thread_id).await { - self.unload_thread_without_subscribers(thread_id, thread) - .await; - } + let status = if was_subscribed { + ThreadUnsubscribeStatus::Unsubscribed + } else { + ThreadUnsubscribeStatus::NotSubscribed + }; self.outgoing - .send_response( - request_id, - ThreadUnsubscribeResponse { - status: ThreadUnsubscribeStatus::Unsubscribed, - }, - ) + .send_response(request_id, ThreadUnsubscribeResponse { status }) .await; } - async fn archive_thread_common( - &self, - thread_id: ThreadId, - rollout_path: &Path, - ) -> Result<(), JSONRPCErrorError> { - // Verify rollout_path is under sessions dir. - let rollout_folder = self.config.codex_home.join(codex_core::SESSIONS_SUBDIR); - - let canonical_sessions_dir = match tokio::fs::canonicalize(&rollout_folder).await { - Ok(path) => path, - Err(err) => { - return Err(JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!( - "failed to archive thread: unable to resolve sessions directory: {err}" - ), - data: None, - }); - } - }; - let canonical_rollout_path = tokio::fs::canonicalize(rollout_path).await; - let canonical_rollout_path = if let Ok(path) = canonical_rollout_path - && path.starts_with(&canonical_sessions_dir) - { - path - } else { - return Err(JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!( - "rollout path `{}` must be in sessions directory", - rollout_path.display() - ), - data: None, - }); - }; - - // Verify file name matches thread id. - let required_suffix = format!("{thread_id}.jsonl"); - let Some(file_name) = canonical_rollout_path.file_name().map(OsStr::to_owned) else { - return Err(JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!( - "rollout path `{}` missing file name", - rollout_path.display() - ), - data: None, - }); - }; - if !file_name - .to_string_lossy() - .ends_with(required_suffix.as_str()) - { - return Err(JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!( - "rollout path `{}` does not match thread id {thread_id}", - rollout_path.display() - ), - data: None, - }); - } - - let mut state_db_ctx = None; - + async fn prepare_thread_for_archive(&self, thread_id: ThreadId) { // If the thread is active, request shutdown and wait briefly. let removed_conversation = self.thread_manager.remove_thread(&thread_id).await; if let Some(conversation) = removed_conversation { - if let Some(ctx) = conversation.state_db() { - state_db_ctx = Some(ctx); - } info!("thread {thread_id} was active; shutting down"); match Self::wait_for_thread_shutdown(&conversation).await { ThreadShutdownResult::Complete => {} @@ -5721,34 +5833,6 @@ impl CodexMessageProcessor { } } self.finalize_thread_teardown(thread_id).await; - - if state_db_ctx.is_none() { - state_db_ctx = get_state_db(&self.config).await; - } - - // Move the rollout file to archived. - let result: std::io::Result<()> = async move { - let archive_folder = self - .config - .codex_home - .join(codex_core::ARCHIVED_SESSIONS_SUBDIR); - tokio::fs::create_dir_all(&archive_folder).await?; - let archived_path = archive_folder.join(&file_name); - tokio::fs::rename(&canonical_rollout_path, &archived_path).await?; - if let Some(ctx) = state_db_ctx { - let _ = ctx - .mark_archived(thread_id, archived_path.as_path(), Utc::now()) - .await; - } - Ok(()) - } - .await; - - result.map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to archive thread: {err}"), - data: None, - }) } async fn apps_list(&self, request_id: ConnectionRequestId, params: AppsListParams) { @@ -5992,7 +6076,7 @@ impl CodexMessageProcessor { }; let cwd_set: HashSet = cwds.iter().cloned().collect(); - let mut extra_roots_by_cwd: HashMap> = HashMap::new(); + let mut extra_roots_by_cwd: HashMap> = HashMap::new(); for entry in per_cwd_extra_user_roots.unwrap_or_default() { if !cwd_set.contains(&entry.cwd) { warn!( @@ -6004,7 +6088,7 @@ impl CodexMessageProcessor { let mut valid_extra_roots = Vec::new(); for root in entry.extra_user_roots { - if !root.is_absolute() { + let Ok(root) = AbsolutePathBuf::from_absolute_path_checked(root.as_path()) else { self.send_invalid_request_error( request_id, format!( @@ -6014,7 +6098,7 @@ impl CodexMessageProcessor { ) .await; return; - } + }; valid_extra_roots.push(root); } extra_roots_by_cwd @@ -6032,30 +6116,48 @@ impl CodexMessageProcessor { }; let skills_manager = self.thread_manager.skills_manager(); let plugins_manager = self.thread_manager.plugins_manager(); + let fs = match self.thread_manager.environment_manager().current().await { + Ok(Some(environment)) => Some(environment.get_filesystem()), + Ok(None) => None, + Err(err) => { + self.outgoing + .send_error( + request_id, + JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("failed to create environment: {err}"), + data: None, + }, + ) + .await; + return; + } + }; let cli_overrides = self.current_cli_overrides(); let mut data = Vec::new(); for cwd in cwds { let extra_roots = extra_roots_by_cwd .get(&cwd) .map_or(&[][..], std::vec::Vec::as_slice); - let cwd_abs = match AbsolutePathBuf::try_from(cwd.as_path()) { + let cwd_abs = match AbsolutePathBuf::relative_to_current_dir(cwd.as_path()) { Ok(path) => path, Err(err) => { let error_path = cwd.clone(); data.push(codex_app_server_protocol::SkillsListEntry { cwd, skills: Vec::new(), - errors: errors_to_info(&[codex_core::skills::SkillError { + errors: vec![codex_app_server_protocol::SkillErrorInfo { path: error_path, message: err.to_string(), - }]), + }], }); continue; } }; let config_layer_stack = match load_config_layers_state( + LOCAL_FS.as_ref(), &self.config.codex_home, - Some(cwd_abs), + Some(cwd_abs.clone()), &cli_overrides, LoaderOverrides::default(), CloudRequirementsLoader::default(), @@ -6068,26 +6170,33 @@ impl CodexMessageProcessor { data.push(codex_app_server_protocol::SkillsListEntry { cwd, skills: Vec::new(), - errors: errors_to_info(&[codex_core::skills::SkillError { + errors: vec![codex_app_server_protocol::SkillErrorInfo { path: error_path, message: err.to_string(), - }]), + }], }); continue; } }; - let effective_skill_roots = plugins_manager.effective_skill_roots_for_layer_stack( - &config_layer_stack, - config.features.enabled(Feature::Plugins), - ); + let effective_skill_roots = plugins_manager + .effective_skill_roots_for_layer_stack( + &config_layer_stack, + config.features.enabled(Feature::Plugins), + ) + .await; let skills_input = codex_core::skills::SkillsLoadInput::new( - cwd.clone(), + cwd_abs.clone(), effective_skill_roots, config_layer_stack, config.bundled_skills_enabled(), ); let outcome = skills_manager - .skills_for_cwd_with_extra_user_roots(&skills_input, force_reload, extra_roots) + .skills_for_cwd_with_extra_user_roots( + &skills_input, + force_reload, + extra_roots, + fs.clone(), + ) .await; let errors = errors_to_info(&outcome.errors); let skills = skills_to_info(&outcome.skills, &outcome.disabled_paths); @@ -6109,7 +6218,7 @@ impl CodexMessageProcessor { force_remote_sync, } = params; let roots = cwds.unwrap_or_default(); - plugins_manager.maybe_start_non_curated_plugin_cache_refresh_for_roots(&roots); + plugins_manager.maybe_start_non_curated_plugin_cache_refresh(&roots); let mut config = match self.load_latest_config(/*fallback_cwd*/ None).await { Ok(config) => config, @@ -6252,6 +6361,39 @@ impl CodexMessageProcessor { .await; } + async fn marketplace_add(&self, request_id: ConnectionRequestId, params: MarketplaceAddParams) { + let result = add_marketplace_to_codex_home( + self.config.codex_home.to_path_buf(), + codex_core::plugins::MarketplaceAddRequest { + source: params.source, + ref_name: params.ref_name, + sparse_paths: params.sparse_paths.unwrap_or_default(), + }, + ) + .await; + + match result { + Ok(outcome) => { + self.outgoing + .send_response( + request_id, + MarketplaceAddResponse { + marketplace_name: outcome.marketplace_name, + installed_root: outcome.installed_root, + already_added: outcome.already_added, + }, + ) + .await; + } + Err(MarketplaceAddError::InvalidRequest(message)) => { + self.send_invalid_request_error(request_id, message).await; + } + Err(MarketplaceAddError::Internal(message)) => { + self.send_internal_error(request_id, message).await; + } + } + } + async fn plugin_read(&self, request_id: ConnectionRequestId, params: PluginReadParams) { let plugins_manager = self.thread_manager.plugins_manager(); let PluginReadParams { @@ -6272,26 +6414,16 @@ impl CodexMessageProcessor { plugin_name, marketplace_path, }; - let config_for_read = config.clone(); - let outcome = match tokio::task::spawn_blocking(move || { - plugins_manager.read_plugin_for_config(&config_for_read, &request) - }) - .await + let outcome = match plugins_manager + .read_plugin_for_config(&config, &request) + .await { - Ok(Ok(outcome)) => outcome, - Ok(Err(err)) => { + Ok(outcome) => outcome, + Err(err) => { self.send_marketplace_error(request_id, err, "read plugin details") .await; return; } - Err(err) => { - self.send_internal_error( - request_id, - format!("failed to read plugin details: {err}"), - ) - .await; - return; - } }; let app_summaries = plugin_app_helpers::load_plugin_app_summaries(&config, &outcome.plugin.apps).await; @@ -6432,7 +6564,8 @@ impl CodexMessageProcessor { self.clear_plugin_related_caches(); - let plugin_mcp_servers = load_plugin_mcp_servers(result.installed_path.as_path()); + let plugin_mcp_servers = + load_plugin_mcp_servers(result.installed_path.as_path()).await; if !plugin_mcp_servers.is_empty() { if let Err(err) = self.queue_mcp_server_refresh_for_config(&config).await { @@ -6445,7 +6578,7 @@ impl CodexMessageProcessor { .await; } - let plugin_apps = load_plugin_apps(result.installed_path.as_path()); + let plugin_apps = load_plugin_apps(result.installed_path.as_path()).await; let auth = self.auth_manager.auth().await; let apps_needing_auth = if plugin_apps.is_empty() || !config.features.apps_enabled_for_auth( @@ -6648,12 +6781,18 @@ impl CodexMessageProcessor { app_server_client_version: Option, ) { if let Err(error) = Self::validate_v2_input_limit(¶ms.input) { + self.track_error_response( + &request_id, + &error, + Some(AnalyticsJsonRpcError::Input(InputError::TooLarge)), + ); self.outgoing.send_error(request_id, error).await; return; } let (_, thread) = match self.load_thread(¶ms.thread_id).await { Ok(v) => v, Err(error) => { + self.track_error_response(&request_id, &error, /*error_type*/ None); self.outgoing.send_error(request_id, error).await; return; } @@ -6665,6 +6804,7 @@ impl CodexMessageProcessor { ) .await { + self.track_error_response(&request_id, &error, /*error_type*/ None); self.outgoing.send_error(request_id, error).await; return; } @@ -6748,6 +6888,15 @@ impl CodexMessageProcessor { }; let response = TurnStartResponse { turn }; + if self.config.features.enabled(Feature::GeneralAnalytics) { + self.analytics_events_client.track_response( + request_id.connection_id.0, + ClientResponse::TurnStart { + request_id: request_id.request_id.clone(), + response: response.clone(), + }, + ); + } self.outgoing.send_response(request_id, response).await; } Err(err) => { @@ -6756,11 +6905,61 @@ impl CodexMessageProcessor { message: format!("failed to start turn: {err}"), data: None, }; + self.track_error_response(&request_id, &error, /*error_type*/ None); self.outgoing.send_error(request_id, error).await; } } } + async fn thread_inject_items( + &self, + request_id: ConnectionRequestId, + params: ThreadInjectItemsParams, + ) { + let (_, thread) = match self.load_thread(¶ms.thread_id).await { + Ok(value) => value, + Err(error) => { + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let items = match params + .items + .into_iter() + .enumerate() + .map(|(index, value)| { + serde_json::from_value::(value) + .map_err(|err| format!("items[{index}] is not a valid response item: {err}")) + }) + .collect::, _>>() + { + Ok(items) => items, + Err(message) => { + self.send_invalid_request_error(request_id, message).await; + return; + } + }; + + match thread.inject_response_items(items).await { + Ok(()) => { + self.outgoing + .send_response(request_id, ThreadInjectItemsResponse {}) + .await; + } + Err(CodexErr::InvalidRequest(message)) => { + self.send_invalid_request_error(request_id, message).await; + } + Err(err) => { + self.send_internal_error( + request_id, + format!("failed to inject response items: {err}"), + ) + .await; + } + } + } + async fn set_app_server_client_info( thread: &CodexThread, app_server_client_name: Option, @@ -6780,6 +6979,7 @@ impl CodexMessageProcessor { let (_, thread) = match self.load_thread(¶ms.thread_id).await { Ok(v) => v, Err(error) => { + self.track_error_response(&request_id, &error, /*error_type*/ None); self.outgoing.send_error(request_id, error).await; return; } @@ -6797,6 +6997,11 @@ impl CodexMessageProcessor { .record_request_turn_id(&request_id, ¶ms.expected_turn_id) .await; if let Err(error) = Self::validate_v2_input_limit(¶ms.input) { + self.track_error_response( + &request_id, + &error, + Some(AnalyticsJsonRpcError::Input(InputError::TooLarge)), + ); self.outgoing.send_error(request_id, error).await; return; } @@ -6817,36 +7022,51 @@ impl CodexMessageProcessor { { Ok(turn_id) => { let response = TurnSteerResponse { turn_id }; + if self.config.features.enabled(Feature::GeneralAnalytics) { + self.analytics_events_client.track_response( + request_id.connection_id.0, + ClientResponse::TurnSteer { + request_id: request_id.request_id.clone(), + response: response.clone(), + }, + ); + } self.outgoing.send_response(request_id, response).await; } Err(err) => { - let (code, message, data) = match err { + let (code, message, data, error_type) = match err { SteerInputError::NoActiveTurn(_) => ( INVALID_REQUEST_ERROR_CODE, "no active turn to steer".to_string(), None, + Some(AnalyticsJsonRpcError::TurnSteer( + TurnSteerRequestError::NoActiveTurn, + )), ), SteerInputError::ExpectedTurnMismatch { expected, actual } => ( INVALID_REQUEST_ERROR_CODE, format!("expected active turn id `{expected}` but found `{actual}`"), None, + Some(AnalyticsJsonRpcError::TurnSteer( + TurnSteerRequestError::ExpectedTurnMismatch, + )), ), SteerInputError::ActiveTurnNotSteerable { turn_kind } => { - let message = match turn_kind { - codex_protocol::protocol::NonSteerableTurnKind::Review => { - "cannot steer a review turn".to_string() - } - codex_protocol::protocol::NonSteerableTurnKind::Compact => { - "cannot steer a compact turn".to_string() - } + let (message, turn_steer_error) = match turn_kind { + codex_protocol::protocol::NonSteerableTurnKind::Review => ( + "cannot steer a review turn".to_string(), + TurnSteerRequestError::NonSteerableReview, + ), + codex_protocol::protocol::NonSteerableTurnKind::Compact => ( + "cannot steer a compact turn".to_string(), + TurnSteerRequestError::NonSteerableCompact, + ), }; let error = TurnError { message: message.clone(), - codex_error_info: Some( - AppServerCodexErrorInfo::ActiveTurnNotSteerable { - turn_kind: turn_kind.into(), - }, - ), + codex_error_info: Some(CodexErrorInfo::ActiveTurnNotSteerable { + turn_kind: turn_kind.into(), + }), additional_details: None, }; let data = match serde_json::to_value(error) { @@ -6859,12 +7079,18 @@ impl CodexMessageProcessor { None } }; - (INVALID_REQUEST_ERROR_CODE, message, data) + ( + INVALID_REQUEST_ERROR_CODE, + message, + data, + Some(AnalyticsJsonRpcError::TurnSteer(turn_steer_error)), + ) } SteerInputError::EmptyInput => ( INVALID_REQUEST_ERROR_CODE, "input must not be empty".to_string(), None, + Some(AnalyticsJsonRpcError::Input(InputError::Empty)), ), }; let error = JSONRPCErrorError { @@ -6872,6 +7098,7 @@ impl CodexMessageProcessor { message, data, }; + self.track_error_response(&request_id, &error, error_type); self.outgoing.send_error(request_id, error).await; } } @@ -6938,6 +7165,7 @@ impl CodexMessageProcessor { &request_id, thread.as_ref(), Op::RealtimeConversationStart(ConversationStartParams { + output_modality: params.output_modality, prompt: params.prompt, session_id: params.session_id, transport: params.transport.map(|transport| match transport { @@ -7229,7 +7457,7 @@ impl CodexMessageProcessor { if let Some(rollout_path) = review_thread.rollout_path() { match read_summary_from_rollout(rollout_path.as_path(), fallback_provider).await { Ok(summary) => { - let mut thread = summary_to_thread(summary); + let mut thread = summary_to_thread(summary, &self.config.cwd); self.thread_watch_manager .upsert_thread_silently(thread.clone()) .await; @@ -7337,9 +7565,12 @@ impl CodexMessageProcessor { async fn turn_interrupt(&self, request_id: ConnectionRequestId, params: TurnInterruptParams) { let TurnInterruptParams { thread_id, turn_id } = params; - self.outgoing - .record_request_turn_id(&request_id, &turn_id) - .await; + let is_startup_interrupt = turn_id.is_empty(); + if !is_startup_interrupt { + self.outgoing + .record_request_turn_id(&request_id, &turn_id) + .await; + } let (thread_uuid, thread) = match self.load_thread(&thread_id).await { Ok(v) => v, @@ -7349,21 +7580,48 @@ impl CodexMessageProcessor { } }; - let request = request_id.clone(); - - // Record the pending interrupt so we can reply when TurnAborted arrives. - { + // Record turn interrupts so we can reply when TurnAborted arrives. Startup + // interrupts do not have a turn and are acknowledged after submission. + if !is_startup_interrupt { let thread_state = self.thread_state_manager.thread_state(thread_uuid).await; let mut thread_state = thread_state.lock().await; thread_state .pending_interrupts - .push((request, ApiVersion::V2)); + .push((request_id.clone(), ApiVersion::V2)); } - // Submit the interrupt; we'll respond upon TurnAborted. - let _ = self + // Submit the interrupt. Turn interrupts respond upon TurnAborted; startup + // interrupts respond here because startup cancellation has no turn event. + let submit_result = self .submit_core_op(&request_id, thread.as_ref(), Op::Interrupt) .await; + match submit_result { + Ok(_) if is_startup_interrupt => { + self.outgoing + .send_response(request_id, TurnInterruptResponse {}) + .await; + } + Ok(_) => {} + Err(err) => { + if !is_startup_interrupt { + let thread_state = self.thread_state_manager.thread_state(thread_uuid).await; + let mut thread_state = thread_state.lock().await; + thread_state + .pending_interrupts + .retain(|(pending_request_id, _)| pending_request_id != &request_id); + } + let interrupt_target = if is_startup_interrupt { + "startup" + } else { + "turn" + }; + self.send_internal_error( + request_id, + format!("failed to interrupt {interrupt_target}: {err}"), + ) + .await; + } + } } async fn ensure_conversation_listener( @@ -7378,11 +7636,12 @@ impl CodexMessageProcessor { thread_manager: Arc::clone(&self.thread_manager), thread_state_manager: self.thread_state_manager.clone(), outgoing: Arc::clone(&self.outgoing), + pending_thread_unloads: Arc::clone(&self.pending_thread_unloads), analytics_events_client: self.analytics_events_client.clone(), general_analytics_enabled: self.config.features.enabled(Feature::GeneralAnalytics), thread_watch_manager: self.thread_watch_manager.clone(), fallback_model_provider: self.config.model_provider_id.clone(), - codex_home: self.config.codex_home.clone(), + codex_home: self.config.codex_home.to_path_buf(), }, conversation_id, connection_id, @@ -7413,21 +7672,45 @@ impl CodexMessageProcessor { }); } }; - let Some(thread_state) = listener_task_context - .thread_state_manager - .try_ensure_connection_subscribed(conversation_id, connection_id, raw_events_enabled) - .await - else { - return Ok(EnsureConversationListenerResult::ConnectionClosed); + let thread_state = { + let pending_thread_unloads = listener_task_context.pending_thread_unloads.lock().await; + if pending_thread_unloads.contains(&conversation_id) { + return Err(JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: format!( + "thread {conversation_id} is closing; retry after the thread is closed" + ), + data: None, + }); + } + let Some(thread_state) = listener_task_context + .thread_state_manager + .try_ensure_connection_subscribed( + conversation_id, + connection_id, + raw_events_enabled, + ) + .await + else { + return Ok(EnsureConversationListenerResult::ConnectionClosed); + }; + thread_state }; - Self::ensure_listener_task_running_task( - listener_task_context, + if let Err(error) = Self::ensure_listener_task_running_task( + listener_task_context.clone(), conversation_id, conversation, thread_state, api_version, ) - .await; + .await + { + let _ = listener_task_context + .thread_state_manager + .unsubscribe_connection_from_thread(conversation_id, connection_id) + .await; + return Err(error); + } Ok(EnsureConversationListenerResult::Attached) } @@ -7461,24 +7744,25 @@ impl CodexMessageProcessor { conversation: Arc, thread_state: Arc>, api_version: ApiVersion, - ) { + ) -> Result<(), JSONRPCErrorError> { Self::ensure_listener_task_running_task( ListenerTaskContext { thread_manager: Arc::clone(&self.thread_manager), thread_state_manager: self.thread_state_manager.clone(), outgoing: Arc::clone(&self.outgoing), + pending_thread_unloads: Arc::clone(&self.pending_thread_unloads), analytics_events_client: self.analytics_events_client.clone(), general_analytics_enabled: self.config.features.enabled(Feature::GeneralAnalytics), thread_watch_manager: self.thread_watch_manager.clone(), fallback_model_provider: self.config.model_provider_id.clone(), - codex_home: self.config.codex_home.clone(), + codex_home: self.config.codex_home.to_path_buf(), }, conversation_id, conversation, thread_state, api_version, ) - .await; + .await } async fn ensure_listener_task_running_task( @@ -7487,12 +7771,27 @@ impl CodexMessageProcessor { conversation: Arc, thread_state: Arc>, api_version: ApiVersion, - ) { + ) -> Result<(), JSONRPCErrorError> { let (cancel_tx, mut cancel_rx) = oneshot::channel(); + let Some(mut unloading_state) = UnloadingState::new( + &listener_task_context, + conversation_id, + THREAD_UNLOADING_DELAY, + ) + .await + else { + return Err(JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: format!( + "thread {conversation_id} is closing; retry after the thread is closed" + ), + data: None, + }); + }; let (mut listener_command_rx, listener_generation) = { let mut thread_state = thread_state.lock().await; if thread_state.listener_matches(&conversation) { - return; + return Ok(()); } thread_state.set_listener(cancel_tx, &conversation) }; @@ -7500,6 +7799,7 @@ impl CodexMessageProcessor { outgoing, thread_manager, thread_state_manager, + pending_thread_unloads, analytics_events_client: _, general_analytics_enabled: _, thread_watch_manager, @@ -7510,10 +7810,28 @@ impl CodexMessageProcessor { tokio::spawn(async move { loop { tokio::select! { + biased; _ = &mut cancel_rx => { // Listener was superseded or the thread is being torn down. break; } + listener_command = listener_command_rx.recv() => { + let Some(listener_command) = listener_command else { + break; + }; + handle_thread_listener_command( + conversation_id, + &conversation, + codex_home.as_path(), + &thread_state_manager, + &thread_state, + &thread_watch_manager, + &outgoing_for_task, + &pending_thread_unloads, + listener_command, + ) + .await; + } event = conversation.next_event() => { let event = match event { Ok(event) => event, @@ -7559,6 +7877,9 @@ impl CodexMessageProcessor { conversation_id, conversation.clone(), thread_manager.clone(), + listener_task_context + .general_analytics_enabled + .then(|| listener_task_context.analytics_events_client.clone()), thread_outgoing, thread_state.clone(), thread_watch_manager.clone(), @@ -7568,21 +7889,38 @@ impl CodexMessageProcessor { ) .await; } - listener_command = listener_command_rx.recv() => { - let Some(listener_command) = listener_command else { + unloading_watchers_open = unloading_state.wait_for_unloading_trigger() => { + if !unloading_watchers_open { break; - }; - handle_thread_listener_command( + } + if !unloading_state.should_unload_now() { + continue; + } + if matches!(conversation.agent_status().await, AgentStatus::Running) { + unloading_state.note_thread_activity_observed(); + continue; + } + { + let mut pending_thread_unloads = pending_thread_unloads.lock().await; + if pending_thread_unloads.contains(&conversation_id) { + continue; + } + if !unloading_state.should_unload_now() { + continue; + } + pending_thread_unloads.insert(conversation_id); + } + Self::unload_thread_without_subscribers( + thread_manager.clone(), + outgoing_for_task.clone(), + pending_thread_unloads.clone(), + thread_state_manager.clone(), + thread_watch_manager.clone(), conversation_id, - &conversation, - codex_home.as_path(), - &thread_state_manager, - &thread_state, - &thread_watch_manager, - &outgoing_for_task, - listener_command, + conversation.clone(), ) .await; + break; } } } @@ -7592,6 +7930,7 @@ impl CodexMessageProcessor { thread_state.clear_listener(); } }); + Ok(()) } async fn git_diff_to_origin(&self, request_id: ConnectionRequestId, cwd: PathBuf) { let diff = git_diff_to_remote(&cwd).await; @@ -7976,7 +8315,7 @@ impl CodexMessageProcessor { policy_cwd: config.cwd.to_path_buf(), command_cwd, env_map: std::env::vars().collect(), - codex_home: config.codex_home.clone(), + codex_home: config.codex_home.to_path_buf(), active_profile: config.active_profile.clone(), }; codex_core::windows_sandbox::run_windows_sandbox_setup(setup_request).await @@ -8082,6 +8421,7 @@ async fn handle_thread_listener_command( thread_state: &Arc>, thread_watch_manager: &ThreadWatchManager, outgoing: &Arc, + pending_thread_unloads: &Arc>>, listener_command: ThreadListenerCommand, ) { match listener_command { @@ -8094,6 +8434,7 @@ async fn handle_thread_listener_command( thread_state, thread_watch_manager, outgoing, + pending_thread_unloads, *resume_request, ) .await; @@ -8123,6 +8464,7 @@ async fn handle_pending_thread_resume_request( thread_state: &Arc>, thread_watch_manager: &ThreadWatchManager, outgoing: &Arc, + pending_thread_unloads: &Arc>>, pending: crate::thread_state::PendingThreadResumeRequest, ) { let active_turn = { @@ -8176,6 +8518,37 @@ async fn handle_pending_thread_resume_request( has_live_in_progress_turn, ); + { + let pending_thread_unloads = pending_thread_unloads.lock().await; + if pending_thread_unloads.contains(&conversation_id) { + drop(pending_thread_unloads); + outgoing + .send_error( + request_id, + JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: format!( + "thread {conversation_id} is closing; retry thread/resume after the thread is closed" + ), + data: None, + }, + ) + .await; + return; + } + if !thread_state_manager + .try_add_connection_to_thread(conversation_id, connection_id) + .await + { + tracing::debug!( + thread_id = %conversation_id, + connection_id = ?connection_id, + "skipping running thread resume for closed connection" + ); + return; + } + } + let ThreadConfigSnapshot { model, model_provider_id, @@ -8200,13 +8573,27 @@ async fn handle_pending_thread_resume_request( sandbox: sandbox_policy.into(), reasoning_effort, }; + let token_usage_thread = response.thread.clone(); + let token_usage_turn_id = latest_token_usage_turn_id_from_rollout_path( + pending.rollout_path.as_path(), + &token_usage_thread, + ) + .await; outgoing.send_response(request_id, response).await; + // Rejoining a loaded thread has the same UI contract as a cold resume, but + // uses the live conversation state instead of reconstructing a new session. + send_thread_token_usage_update_to_connection( + outgoing, + connection_id, + conversation_id, + &token_usage_thread, + conversation.as_ref(), + token_usage_turn_id, + ) + .await; outgoing .replay_requests_to_connection_for_thread(connection_id, conversation_id) .await; - let _attached = thread_state_manager - .try_add_connection_to_thread(conversation_id, connection_id) - .await; } enum ThreadTurnSource<'a> { @@ -8319,7 +8706,7 @@ fn collect_resume_override_mismatches( } if let Some(requested_cwd) = request.cwd.as_deref() { let requested_cwd_path = std::path::PathBuf::from(requested_cwd); - if requested_cwd_path != config_snapshot.cwd { + if requested_cwd_path != config_snapshot.cwd.as_path() { mismatch_details.push(format!( "cwd requested={} active={}", requested_cwd_path.display(), @@ -8431,7 +8818,7 @@ fn has_model_resume_override( fn skills_to_info( skills: &[codex_core::skills::SkillMetadata], - disabled_paths: &std::collections::HashSet, + disabled_paths: &std::collections::HashSet, ) -> Vec { skills .iter() @@ -8477,7 +8864,7 @@ fn skills_to_info( fn plugin_skills_to_info( skills: &[codex_core::skills::SkillMetadata], - disabled_skill_paths: &std::collections::HashSet, + disabled_skill_paths: &std::collections::HashSet, ) -> Vec { skills .iter() @@ -8501,9 +8888,7 @@ fn plugin_skills_to_info( .collect() } -fn plugin_interface_to_info( - interface: codex_core::plugins::PluginManifestInterface, -) -> PluginInterface { +fn plugin_interface_to_info(interface: PluginManifestInterface) -> PluginInterface { PluginInterface { display_name: interface.display_name, short_description: interface.short_description, @@ -8534,7 +8919,7 @@ fn errors_to_info( errors .iter() .map(|err| codex_app_server_protocol::SkillErrorInfo { - path: err.path.clone(), + path: err.path.to_path_buf(), message: err.message.clone(), }) .collect() @@ -8821,67 +9206,67 @@ fn set_thread_name_from_title(thread: &mut Thread, title: String) { thread.name = Some(title); } -async fn summary_from_thread_list_item( - it: codex_core::ThreadItem, - fallback_provider: &str, - state_db_ctx: Option<&StateDbHandle>, -) -> Option { - if let Some(thread_id) = it.thread_id { - let timestamp = it.created_at.clone(); - let updated_at = it.updated_at.clone().or_else(|| timestamp.clone()); - let model_provider = it - .model_provider - .clone() - .unwrap_or_else(|| fallback_provider.to_string()); - let cwd = it.cwd?; - let cli_version = it.cli_version.unwrap_or_default(); - let source = with_thread_spawn_agent_metadata( - it.source - .unwrap_or(codex_protocol::protocol::SessionSource::Unknown), - it.agent_nickname.clone(), - it.agent_role.clone(), - ); - return Some(ConversationSummary { - conversation_id: thread_id, - path: it.path, - preview: it.first_user_message.unwrap_or_default(), - timestamp, - updated_at, - model_provider, - cwd, - cli_version, - source, - git_info: if it.git_sha.is_none() - && it.git_branch.is_none() - && it.git_origin_url.is_none() - { - None - } else { - Some(ConversationGitInfo { - sha: it.git_sha, - branch: it.git_branch, - origin_url: it.git_origin_url, - }) - }, - }); +fn thread_store_list_error(err: ThreadStoreError) -> JSONRPCErrorError { + match err { + ThreadStoreError::InvalidRequest { message } => JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message, + data: None, + }, + err => JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("failed to list threads: {err}"), + data: None, + }, } - if let Some(thread_id) = thread_id_from_rollout_path(it.path.as_path()) { - return read_summary_from_state_db_context_by_thread_id(state_db_ctx, thread_id).await; - } - None } -fn thread_id_from_rollout_path(path: &Path) -> Option { - let file_name = path.file_name()?.to_str()?; - let stem = file_name.strip_suffix(".jsonl")?; - if stem.len() < 37 { - return None; +fn thread_store_archive_error(operation: &str, err: ThreadStoreError) -> JSONRPCErrorError { + match err { + ThreadStoreError::InvalidRequest { message } => JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message, + data: None, + }, + err => JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("failed to {operation} thread: {err}"), + data: None, + }, } - let uuid_start = stem.len().saturating_sub(36); - if !stem[..uuid_start].ends_with('-') { - return None; - } - ThreadId::from_string(&stem[uuid_start..]).ok() +} + +fn summary_from_stored_thread( + thread: StoredThread, + fallback_provider: &str, +) -> Option { + let path = thread.rollout_path?; + let source = with_thread_spawn_agent_metadata( + thread.source, + thread.agent_nickname.clone(), + thread.agent_role.clone(), + ); + let git_info = thread.git_info.map(|git| ConversationGitInfo { + sha: git.commit_hash.map(|sha| sha.0), + branch: git.branch, + origin_url: git.repository_url, + }); + Some(ConversationSummary { + conversation_id: thread.thread_id, + path, + preview: thread.first_user_message.unwrap_or(thread.preview), + timestamp: Some(thread.created_at.to_rfc3339_opts(SecondsFormat::Secs, true)), + updated_at: Some(thread.updated_at.to_rfc3339_opts(SecondsFormat::Secs, true)), + model_provider: if thread.model_provider.is_empty() { + fallback_provider.to_string() + } else { + thread.model_provider + }, + cwd: thread.cwd, + cli_version: thread.cli_version, + source, + git_info, + }) } #[allow(clippy::too_many_arguments)] @@ -9103,7 +9488,7 @@ async fn load_thread_summary_for_rollout( ) -> std::result::Result { let mut thread = read_summary_from_rollout(rollout_path, fallback_provider) .await - .map(summary_to_thread) + .map(|summary| summary_to_thread(summary, &config.cwd)) .map_err(|err| { format!( "failed to load rollout `{}` for thread {thread_id}: {err}", @@ -9114,10 +9499,13 @@ async fn load_thread_summary_for_rollout( if let Some(persisted_metadata) = persisted_metadata { merge_mutable_thread_metadata( &mut thread, - summary_to_thread(summary_from_thread_metadata(persisted_metadata)), + summary_to_thread( + summary_from_thread_metadata(persisted_metadata), + &config.cwd, + ), ); } else if let Some(summary) = read_summary_from_state_db_by_thread_id(config, thread_id).await { - merge_mutable_thread_metadata(&mut thread, summary_to_thread(summary)); + merge_mutable_thread_metadata(&mut thread, summary_to_thread(summary, &config.cwd)); } let title = if let Some(metadata) = persisted_metadata { non_empty_title(metadata) @@ -9205,7 +9593,7 @@ async fn read_updated_at(path: &Path, created_at: Option<&str>) -> Option = modified.into(); - updated_at.to_rfc3339_opts(SecondsFormat::Secs, true) + updated_at.to_rfc3339_opts(SecondsFormat::Millis, true) }); updated_at.or_else(|| created_at.map(str::to_string)) } @@ -9237,7 +9625,10 @@ fn build_thread_from_snapshot( } } -pub(crate) fn summary_to_thread(summary: ConversationSummary) -> Thread { +pub(crate) fn summary_to_thread( + summary: ConversationSummary, + fallback_cwd: &AbsolutePathBuf, +) -> Thread { let ConversationSummary { conversation_id, path, @@ -9258,6 +9649,15 @@ pub(crate) fn summary_to_thread(summary: ConversationSummary) -> Thread { branch: info.branch, origin_url: info.origin_url, }); + let cwd = + AbsolutePathBuf::relative_to_current_dir(path_utils::normalize_for_native_workdir(cwd)) + .unwrap_or_else(|err| { + warn!( + path = %path.display(), + "failed to normalize thread cwd while summarizing thread: {err}" + ); + fallback_cwd.clone() + }); Thread { id: conversation_id.to_string(), @@ -9291,6 +9691,8 @@ mod tests { use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; + use codex_utils_absolute_path::test_support::PathBufExt; + use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use serde_json::json; use std::path::PathBuf; @@ -9425,7 +9827,7 @@ mod tests { approval_policy: codex_protocol::protocol::AskForApproval::OnRequest, approvals_reviewer: codex_protocol::config_types::ApprovalsReviewer::User, sandbox_policy: codex_protocol::protocol::SandboxPolicy::DangerFullAccess, - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), ephemeral: false, reasoning_effort: None, personality: None, @@ -9456,6 +9858,22 @@ mod tests { Ok(metadata) } + #[test] + fn summary_from_thread_metadata_formats_protocol_timestamps_as_seconds() -> Result<()> { + let mut metadata = + test_thread_metadata(/*model*/ None, /*reasoning_effort*/ None)?; + metadata.created_at = + DateTime::parse_from_rfc3339("2025-09-05T16:53:11.123Z")?.with_timezone(&Utc); + metadata.updated_at = + DateTime::parse_from_rfc3339("2025-09-05T16:53:12.456Z")?.with_timezone(&Utc); + + let summary = summary_from_thread_metadata(&metadata); + + assert_eq!(summary.timestamp, Some("2025-09-05T16:53:11Z".to_string())); + assert_eq!(summary.updated_at, Some("2025-09-05T16:53:12Z".to_string())); + Ok(()) + } + #[test] fn merge_persisted_resume_metadata_prefers_persisted_model_and_reasoning_effort() -> Result<()> { @@ -9715,7 +10133,7 @@ mod tests { let expected = ConversationSummary { conversation_id, timestamp: Some(timestamp.clone()), - updated_at: Some("2025-09-05T16:53:11Z".to_string()), + updated_at: Some(timestamp), path: path.clone(), preview: String::new(), model_provider: "fallback".to_string(), @@ -9769,7 +10187,8 @@ mod tests { fs::write(&path, format!("{}\n", serde_json::to_string(&line)?))?; let summary = read_summary_from_rollout(path.as_path(), "fallback").await?; - let thread = summary_to_thread(summary); + let fallback_cwd = AbsolutePathBuf::from_absolute_path("/")?; + let thread = summary_to_thread(summary, &fallback_cwd); assert_eq!(thread.agent_nickname, Some("atlas".to_string())); assert_eq!(thread.agent_role, Some("explorer".to_string())); @@ -9903,7 +10322,8 @@ mod tests { /*git_origin_url*/ None, ); - let thread = summary_to_thread(summary); + let fallback_cwd = AbsolutePathBuf::from_absolute_path("/")?; + let thread = summary_to_thread(summary, &fallback_cwd); assert_eq!(thread.agent_nickname, Some("atlas".to_string())); assert_eq!(thread.agent_role, Some("explorer".to_string())); @@ -10001,6 +10421,53 @@ mod tests { Ok(()) } + #[tokio::test] + async fn adding_connection_to_thread_updates_has_connections_watcher() -> Result<()> { + let manager = ThreadStateManager::new(); + let thread_id = ThreadId::from_string("ad7f0408-99b8-4f6e-a46f-bd0eec433370")?; + let connection_a = ConnectionId(1); + let connection_b = ConnectionId(2); + + manager.connection_initialized(connection_a).await; + manager.connection_initialized(connection_b).await; + manager + .try_ensure_connection_subscribed( + thread_id, + connection_a, + /*experimental_raw_events*/ false, + ) + .await + .expect("connection_a should be live"); + let mut has_connections = manager + .subscribe_to_has_connections(thread_id) + .await + .expect("thread should have a has-connections watcher"); + assert!(*has_connections.borrow()); + + assert!( + manager + .unsubscribe_connection_from_thread(thread_id, connection_a) + .await + ); + tokio::time::timeout(Duration::from_secs(1), has_connections.changed()) + .await + .expect("timed out waiting for no-subscriber update") + .expect("has-connections watcher should remain open"); + assert!(!*has_connections.borrow()); + + assert!( + manager + .try_add_connection_to_thread(thread_id, connection_b) + .await + ); + tokio::time::timeout(Duration::from_secs(1), has_connections.changed()) + .await + .expect("timed out waiting for subscriber update") + .expect("has-connections watcher should remain open"); + assert!(*has_connections.borrow()); + Ok(()) + } + #[tokio::test] async fn closed_connection_cannot_be_reintroduced_by_auto_subscribe() -> Result<()> { let manager = ThreadStateManager::new(); diff --git a/codex-rs/app-server/src/codex_message_processor/token_usage_replay.rs b/codex-rs/app-server/src/codex_message_processor/token_usage_replay.rs new file mode 100644 index 0000000000..d3c06f5e7c --- /dev/null +++ b/codex-rs/app-server/src/codex_message_processor/token_usage_replay.rs @@ -0,0 +1,133 @@ +//! Replays persisted token usage snapshots when a client attaches to an existing thread. +//! +//! The message processor decides when replay is allowed and preserves JSON-RPC response +//! ordering. This module owns notification construction and the attribution rules that +//! map the latest persisted `TokenCount` back to a v2 turn id. +//! +//! Rollout histories can contain explicit turn ids or generated turn ids. When explicit +//! ids do not match the rebuilt thread, replay falls back to the active turn position at +//! the time the `TokenCount` was persisted so the notification still targets the +//! corresponding rebuilt turn. + +use std::path::Path; +use std::sync::Arc; + +use codex_app_server_protocol::ServerNotification; +use codex_app_server_protocol::Thread; +use codex_app_server_protocol::ThreadHistoryBuilder; +use codex_app_server_protocol::ThreadTokenUsage; +use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification; +use codex_app_server_protocol::TurnStatus; +use codex_core::CodexThread; +use codex_protocol::ThreadId; +use codex_protocol::protocol::EventMsg; +use codex_protocol::protocol::RolloutItem; + +use crate::codex_message_processor::read_rollout_items_from_rollout; +use crate::outgoing_message::ConnectionId; +use crate::outgoing_message::OutgoingMessageSender; + +/// Sends a restored token usage update to the connection that attached to a thread. +/// +/// This is lifecycle replay rather than a model event: the rollout already contains +/// the original `TokenCount`, and emitting through `send_event` here would duplicate +/// persisted usage records. Keeping this helper connection-scoped also avoids +/// surprising other subscribers with a historical usage update while they may be +/// rendering live turn events. +pub(super) async fn send_thread_token_usage_update_to_connection( + outgoing: &Arc, + connection_id: ConnectionId, + thread_id: ThreadId, + thread: &Thread, + conversation: &CodexThread, + token_usage_turn_id: Option, +) { + let Some(info) = conversation.token_usage_info().await else { + return; + }; + let notification = ThreadTokenUsageUpdatedNotification { + thread_id: thread_id.to_string(), + turn_id: token_usage_turn_id.unwrap_or_else(|| latest_token_usage_turn_id(thread)), + token_usage: ThreadTokenUsage::from(info), + }; + outgoing + .send_server_notification_to_connections( + &[connection_id], + ServerNotification::ThreadTokenUsageUpdated(notification), + ) + .await; +} + +pub(super) async fn latest_token_usage_turn_id_for_thread_path(thread: &Thread) -> Option { + let rollout_path = thread.path.as_deref()?; + latest_token_usage_turn_id_from_rollout_path(rollout_path, thread).await +} + +pub(super) async fn latest_token_usage_turn_id_from_rollout_path( + rollout_path: &Path, + thread: &Thread, +) -> Option { + let rollout_items = read_rollout_items_from_rollout(rollout_path).await.ok()?; + latest_token_usage_turn_id_from_rollout_items(&rollout_items, thread) +} + +/// Identifies the turn that was active when a `TokenCount` record appeared. +/// +/// The id is preferred when it still appears in the rebuilt thread. The position is a +/// fallback for histories whose implicit turn ids are regenerated during reconstruction. +struct TokenUsageTurnOwner { + id: String, + position: Option, +} + +pub(super) fn latest_token_usage_turn_id_from_rollout_items( + rollout_items: &[RolloutItem], + thread: &Thread, +) -> Option { + let owner = latest_token_usage_turn_owner_from_rollout_items(rollout_items)?; + if thread.turns.iter().any(|turn| turn.id == owner.id) { + return Some(owner.id); + } + owner + .position + .and_then(|position| thread.turns.get(position)) + .map(|turn| turn.id.clone()) +} + +fn latest_token_usage_turn_owner_from_rollout_items( + rollout_items: &[RolloutItem], +) -> Option { + let mut builder = ThreadHistoryBuilder::new(); + let mut token_usage_turn_owner = None; + + for item in rollout_items { + if matches!(item, RolloutItem::EventMsg(EventMsg::TokenCount(_))) { + token_usage_turn_owner = + builder + .active_turn_snapshot() + .map(|turn| TokenUsageTurnOwner { + id: turn.id, + position: builder.active_turn_position(), + }); + } + builder.handle_rollout_item(item); + } + + token_usage_turn_owner +} + +/// Chooses a fallback turn id that should own a replayed token usage update. +/// +/// Normal replay derives the owner from the rollout position of the latest +/// `TokenCount` event. This fallback only preserves a stable wire shape for +/// unusual histories where that rollout information cannot be read. +fn latest_token_usage_turn_id(thread: &Thread) -> String { + thread + .turns + .iter() + .rev() + .find(|turn| matches!(turn.status, TurnStatus::Completed | TurnStatus::Failed)) + .or_else(|| thread.turns.last()) + .map(|turn| turn.id.clone()) + .unwrap_or_default() +} diff --git a/codex-rs/app-server/src/config_api.rs b/codex-rs/app-server/src/config_api.rs index e85f137bc9..2bc7447300 100644 --- a/codex-rs/app-server/src/config_api.rs +++ b/codex-rs/app-server/src/config_api.rs @@ -27,8 +27,8 @@ use codex_core::config_loader::LoaderOverrides; use codex_core::config_loader::ResidencyRequirement as CoreResidencyRequirement; use codex_core::config_loader::SandboxModeRequirement as CoreSandboxModeRequirement; use codex_core::plugins::PluginId; -use codex_core::plugins::collect_plugin_enabled_candidates; -use codex_core::plugins::installed_plugin_telemetry_metadata; +use codex_core_plugins::loader::installed_plugin_telemetry_metadata; +use codex_core_plugins::toggles::collect_plugin_enabled_candidates; use codex_features::canonical_feature_for_key; use codex_features::feature_for_key; use codex_protocol::config_types::WebSearchMode; @@ -210,7 +210,7 @@ impl ConfigApi { .write_value(params) .await .map_err(map_error)?; - self.emit_plugin_toggle_events(pending_changes); + self.emit_plugin_toggle_events(pending_changes).await; Ok(response) } @@ -230,7 +230,7 @@ impl ConfigApi { .batch_write(params) .await .map_err(map_error)?; - self.emit_plugin_toggle_events(pending_changes); + self.emit_plugin_toggle_events(pending_changes).await; if reload_user_config { self.user_config_reloader.reload_user_config().await; } @@ -299,13 +299,16 @@ impl ConfigApi { Ok(ExperimentalFeatureEnablementSetResponse { enablement }) } - fn emit_plugin_toggle_events(&self, pending_changes: std::collections::BTreeMap) { + async fn emit_plugin_toggle_events( + &self, + pending_changes: std::collections::BTreeMap, + ) { for (plugin_id, enabled) in pending_changes { let Ok(plugin_id) = PluginId::parse(&plugin_id) else { continue; }; let metadata = - installed_plugin_telemetry_metadata(self.codex_home.as_path(), &plugin_id); + installed_plugin_telemetry_metadata(self.codex_home.as_path(), &plugin_id).await; if enabled { self.analytics_events_client.track_plugin_enabled(metadata); } else { @@ -449,7 +452,6 @@ fn map_network_requirements_to_api( .collect() }), managed_allowed_domains_only: network.managed_allowed_domains_only, - danger_full_access_denylist_only: network.danger_full_access_denylist_only, allowed_domains, denied_domains, unix_sockets: network.unix_sockets.map(|unix_sockets| { @@ -595,7 +597,6 @@ mod tests { ]), }), managed_allowed_domains_only: Some(false), - danger_full_access_denylist_only: Some(true), unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml { entries: std::collections::BTreeMap::from([( "/tmp/proxy.sock".to_string(), @@ -655,7 +656,6 @@ mod tests { ("example.com".to_string(), NetworkDomainPermission::Deny), ])), managed_allowed_domains_only: Some(false), - danger_full_access_denylist_only: Some(true), allowed_domains: Some(vec!["api.openai.com".to_string()]), denied_domains: Some(vec!["example.com".to_string()]), unix_sockets: Some(std::collections::BTreeMap::from([( @@ -690,7 +690,6 @@ mod tests { dangerously_allow_all_unix_sockets: None, domains: None, managed_allowed_domains_only: None, - danger_full_access_denylist_only: None, unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml { entries: std::collections::BTreeMap::from([( "/tmp/ignored.sock".to_string(), @@ -714,7 +713,6 @@ mod tests { dangerously_allow_all_unix_sockets: None, domains: None, managed_allowed_domains_only: None, - danger_full_access_denylist_only: None, allowed_domains: None, denied_domains: None, unix_sockets: Some(std::collections::BTreeMap::from([( diff --git a/codex-rs/app-server/src/external_agent_config_api.rs b/codex-rs/app-server/src/external_agent_config_api.rs index 7cf0d65d73..2f90a55a13 100644 --- a/codex-rs/app-server/src/external_agent_config_api.rs +++ b/codex-rs/app-server/src/external_agent_config_api.rs @@ -6,6 +6,8 @@ use codex_app_server_protocol::ExternalAgentConfigImportResponse; use codex_app_server_protocol::ExternalAgentConfigMigrationItem; use codex_app_server_protocol::ExternalAgentConfigMigrationItemType; use codex_app_server_protocol::JSONRPCErrorError; +use codex_app_server_protocol::MigrationDetails; +use codex_app_server_protocol::PluginsMigration; use codex_core::external_agent_config::ExternalAgentConfigDetectOptions; use codex_core::external_agent_config::ExternalAgentConfigMigrationItem as CoreMigrationItem; use codex_core::external_agent_config::ExternalAgentConfigMigrationItemType as CoreMigrationItemType; @@ -35,6 +37,7 @@ impl ExternalAgentConfigApi { include_home: params.include_home, cwds: params.cwds, }) + .await .map_err(map_io_error)?; Ok(ExternalAgentConfigDetectResponse { @@ -51,12 +54,25 @@ impl ExternalAgentConfigApi { CoreMigrationItemType::AgentsMd => { ExternalAgentConfigMigrationItemType::AgentsMd } + CoreMigrationItemType::Plugins => { + ExternalAgentConfigMigrationItemType::Plugins + } CoreMigrationItemType::McpServerConfig => { ExternalAgentConfigMigrationItemType::McpServerConfig } }, description: migration_item.description, cwd: migration_item.cwd, + details: migration_item.details.map(|details| MigrationDetails { + plugins: details + .plugins + .into_iter() + .map(|plugin| PluginsMigration { + marketplace_name: plugin.marketplace_name, + plugin_names: plugin.plugin_names, + }) + .collect(), + }), }) .collect(), }) @@ -82,15 +98,33 @@ impl ExternalAgentConfigApi { ExternalAgentConfigMigrationItemType::AgentsMd => { CoreMigrationItemType::AgentsMd } + ExternalAgentConfigMigrationItemType::Plugins => { + CoreMigrationItemType::Plugins + } ExternalAgentConfigMigrationItemType::McpServerConfig => { CoreMigrationItemType::McpServerConfig } }, description: migration_item.description, cwd: migration_item.cwd, + details: migration_item.details.map(|details| { + codex_core::external_agent_config::MigrationDetails { + plugins: details + .plugins + .into_iter() + .map(|plugin| { + codex_core::external_agent_config::PluginsMigration { + marketplace_name: plugin.marketplace_name, + plugin_names: plugin.plugin_names, + } + }) + .collect(), + } + }), }) .collect(), ) + .await .map_err(map_io_error)?; Ok(ExternalAgentConfigImportResponse {}) diff --git a/codex-rs/app-server/src/fs_api.rs b/codex-rs/app-server/src/fs_api.rs index 57b355f818..a2c71871db 100644 --- a/codex-rs/app-server/src/fs_api.rs +++ b/codex-rs/app-server/src/fs_api.rs @@ -46,7 +46,7 @@ impl FsApi { ) -> Result { let bytes = self .file_system - .read_file(¶ms.path) + .read_file(¶ms.path, /*sandbox*/ None) .await .map_err(map_fs_error)?; Ok(FsReadFileResponse { @@ -64,7 +64,7 @@ impl FsApi { )) })?; self.file_system - .write_file(¶ms.path, bytes) + .write_file(¶ms.path, bytes, /*sandbox*/ None) .await .map_err(map_fs_error)?; Ok(FsWriteFileResponse {}) @@ -80,6 +80,7 @@ impl FsApi { CreateDirectoryOptions { recursive: params.recursive.unwrap_or(true), }, + /*sandbox*/ None, ) .await .map_err(map_fs_error)?; @@ -92,12 +93,13 @@ impl FsApi { ) -> Result { let metadata = self .file_system - .get_metadata(¶ms.path) + .get_metadata(¶ms.path, /*sandbox*/ None) .await .map_err(map_fs_error)?; Ok(FsGetMetadataResponse { is_directory: metadata.is_directory, is_file: metadata.is_file, + is_symlink: metadata.is_symlink, created_at_ms: metadata.created_at_ms, modified_at_ms: metadata.modified_at_ms, }) @@ -109,7 +111,7 @@ impl FsApi { ) -> Result { let entries = self .file_system - .read_directory(¶ms.path) + .read_directory(¶ms.path, /*sandbox*/ None) .await .map_err(map_fs_error)?; Ok(FsReadDirectoryResponse { @@ -135,6 +137,7 @@ impl FsApi { recursive: params.recursive.unwrap_or(true), force: params.force.unwrap_or(true), }, + /*sandbox*/ None, ) .await .map_err(map_fs_error)?; @@ -152,6 +155,7 @@ impl FsApi { CopyOptions { recursive: params.recursive, }, + /*sandbox*/ None, ) .await .map_err(map_fs_error)?; diff --git a/codex-rs/app-server/src/fs_watch.rs b/codex-rs/app-server/src/fs_watch.rs index 3a5b226248..ff00051472 100644 --- a/codex-rs/app-server/src/fs_watch.rs +++ b/codex-rs/app-server/src/fs_watch.rs @@ -14,7 +14,6 @@ use codex_core::file_watcher::FileWatcherSubscriber; use codex_core::file_watcher::Receiver; use codex_core::file_watcher::WatchPath; use codex_core::file_watcher::WatchRegistration; -use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::HashMap; use std::collections::HashSet; use std::collections::hash_map::Entry; @@ -128,7 +127,7 @@ impl FsWatchManager { }; let outgoing = self.outgoing.clone(); let (subscriber, rx) = self.file_watcher.add_subscriber(); - let watch_root = params.path.to_path_buf().clone(); + let watch_root = params.path.clone(); let registration = subscriber.register_paths(vec![WatchPath { path: params.path.to_path_buf(), recursive: false, @@ -166,7 +165,7 @@ impl FsWatchManager { let mut changed_paths = event .paths .into_iter() - .map(|path| AbsolutePathBuf::resolve_path_against_base(&path, &watch_root)) + .map(|path| watch_root.join(path)) .collect::>(); changed_paths.sort_by(|left, right| left.as_path().cmp(right.as_path())); if !changed_paths.is_empty() { diff --git a/codex-rs/app-server/src/in_process.rs b/codex-rs/app-server/src/in_process.rs index eb76848c57..4458bce89d 100644 --- a/codex-rs/app-server/src/in_process.rs +++ b/codex-rs/app-server/src/in_process.rs @@ -82,6 +82,7 @@ use codex_exec_server::EnvironmentManager; use codex_feedback::CodexFeedback; use codex_login::AuthManager; use codex_protocol::protocol::SessionSource; +pub use codex_state::log_db::LogDbLayer; use tokio::sync::mpsc; use tokio::sync::oneshot; use tokio::time::timeout; @@ -117,6 +118,8 @@ pub struct InProcessStartArgs { pub cloud_requirements: CloudRequirementsLoader, /// Feedback sink used by app-server/core telemetry and logs. pub feedback: CodexFeedback, + /// SQLite tracing layer used to flush recently emitted logs before feedback upload. + pub log_db: Option, /// Environment manager used by core execution and filesystem operations. pub environment_manager: Arc, /// Startup warnings emitted after initialize succeeds. @@ -386,7 +389,7 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle { AuthManager::shared_from_config(args.config.as_ref(), args.enable_codex_api_key_env); let (processor_tx, mut processor_rx) = mpsc::channel::(channel_capacity); let mut processor_handle = tokio::spawn(async move { - let processor = MessageProcessor::new(MessageProcessorArgs { + let processor = Arc::new(MessageProcessor::new(MessageProcessorArgs { outgoing: Arc::clone(&processor_outgoing), arg0_paths: args.arg0_paths, config: args.config, @@ -395,15 +398,15 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle { loader_overrides: args.loader_overrides, cloud_requirements: args.cloud_requirements, feedback: args.feedback, - log_db: None, + log_db: args.log_db, config_warnings: args.config_warnings, session_source: args.session_source, auth_manager, rpc_transport: AppServerRpcTransport::InProcess, remote_control_handle: None, - }); + })); let mut thread_created_rx = processor.thread_created_receiver(); - let mut session = ConnectionSessionState::default(); + let session = Arc::new(ConnectionSessionState::default()); let mut listen_for_threads = true; loop { @@ -411,28 +414,33 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle { command = processor_rx.recv() => { match command { Some(ProcessorCommand::Request(request)) => { - let was_initialized = session.initialized; + let was_initialized = session.initialized(); processor .process_client_request( IN_PROCESS_CONNECTION_ID, *request, - &mut session, + Arc::clone(&session), &outbound_initialized, ) .await; + let opted_out_notification_methods_snapshot = + session.opted_out_notification_methods(); + let experimental_api_enabled = + session.experimental_api_enabled(); + let is_initialized = session.initialized(); if let Ok(mut opted_out_notification_methods) = outbound_opted_out_notification_methods.write() { *opted_out_notification_methods = - session.opted_out_notification_methods.clone(); + opted_out_notification_methods_snapshot; } else { warn!("failed to update outbound opted-out notifications"); } outbound_experimental_api_enabled.store( - session.experimental_api_enabled, + experimental_api_enabled, Ordering::Release, ); - if !was_initialized && session.initialized { + if !was_initialized && is_initialized { processor.send_initialize_notifications().await; } } @@ -447,7 +455,7 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle { created = thread_created_rx.recv(), if listen_for_threads => { match created { Ok(thread_id) => { - let connection_ids = if session.initialized { + let connection_ids = if session.initialized() { vec![IN_PROCESS_CONNECTION_ID] } else { Vec::::new() @@ -708,6 +716,7 @@ mod tests { match ConfigBuilder::default().build().await { Ok(config) => config, Err(_) => Config::load_default_with_cli_overrides(Vec::new()) + .await .expect("default config should load"), } } @@ -723,6 +732,7 @@ mod tests { loader_overrides: LoaderOverrides::default(), cloud_requirements: CloudRequirementsLoader::default(), feedback: CodexFeedback::new(), + log_db: None, environment_manager: Arc::new(EnvironmentManager::new(/*exec_server_url*/ None)), config_warnings: Vec::new(), session_source, diff --git a/codex-rs/app-server/src/lib.rs b/codex-rs/app-server/src/lib.rs index bfc87251ad..ba946c67b6 100644 --- a/codex-rs/app-server/src/lib.rs +++ b/codex-rs/app-server/src/lib.rs @@ -44,6 +44,7 @@ use codex_core::check_execpolicy_for_warnings; use codex_core::config_loader::ConfigLoadError; use codex_core::config_loader::TextRange as CoreTextRange; use codex_exec_server::EnvironmentManager; +use codex_exec_server::ExecServerRuntimePaths; use codex_feedback::CodexFeedback; use codex_protocol::protocol::SessionSource; use codex_state::log_db; @@ -360,7 +361,12 @@ pub async fn run_main_with_transport( session_source: SessionSource, auth: AppServerWebsocketAuthSettings, ) -> IoResult<()> { - let environment_manager = Arc::new(EnvironmentManager::from_env()); + let environment_manager = Arc::new(EnvironmentManager::from_env_with_runtime_paths(Some( + ExecServerRuntimePaths::from_optional_paths( + arg0_paths.codex_self_exe.clone(), + arg0_paths.codex_linux_sandbox_exe.clone(), + )?, + ))); let (transport_event_tx, mut transport_event_rx) = mpsc::channel::(CHANNEL_CAPACITY); let (outgoing_tx, mut outgoing_rx) = mpsc::channel::(CHANNEL_CAPACITY); @@ -404,7 +410,7 @@ pub async fn run_main_with_transport( cloud_requirements_loader( auth_manager, config.chatgpt_base_url, - config.codex_home.clone(), + config.codex_home.to_path_buf(), ) } Err(err) => { @@ -426,12 +432,14 @@ pub async fn run_main_with_transport( Err(err) => { let message = config_warning_from_error("Invalid configuration; using defaults.", &err); config_warnings.push(message); - Config::load_default_with_cli_overrides(cli_kv_overrides.clone()).map_err(|e| { - std::io::Error::new( - ErrorKind::InvalidData, - format!("error loading default config after config error: {e}"), - ) - })? + Config::load_default_with_cli_overrides(cli_kv_overrides.clone()) + .await + .map_err(|e| { + std::io::Error::new( + ErrorKind::InvalidData, + format!("error loading default config after config error: {e}"), + ) + })? } }; @@ -646,7 +654,7 @@ pub async fn run_main_with_transport( AuthManager::shared_from_config(&config, /*enable_codex_api_key_env*/ false); let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone(); let loader_overrides = loader_overrides_for_config_api; - let processor = MessageProcessor::new(MessageProcessorArgs { + let processor = Arc::new(MessageProcessor::new(MessageProcessorArgs { outgoing: outgoing_message_sender, arg0_paths, config: Arc::new(config), @@ -661,7 +669,7 @@ pub async fn run_main_with_transport( auth_manager, rpc_transport: analytics_rpc_transport(transport), remote_control_handle: Some(remote_control_handle), - }); + })); let mut thread_created_rx = processor.thread_created_receiver(); let mut running_turn_count_rx = processor.subscribe_running_assistant_turn_count(); let mut connections = HashMap::::new(); @@ -763,23 +771,28 @@ pub async fn run_main_with_transport( warn!("dropping request from unknown connection: {connection_id:?}"); continue; }; - let was_initialized = connection_state.session.initialized; + let was_initialized = + connection_state.session.initialized(); processor .process_request( connection_id, request, transport, - &mut connection_state.session, + Arc::clone(&connection_state.session), ) .await; + let opted_out_notification_methods_snapshot = connection_state + .session + .opted_out_notification_methods(); + let experimental_api_enabled = + connection_state.session.experimental_api_enabled(); + let is_initialized = connection_state.session.initialized(); if let Ok(mut opted_out_notification_methods) = connection_state .outbound_opted_out_notification_methods .write() { - *opted_out_notification_methods = connection_state - .session - .opted_out_notification_methods - .clone(); + *opted_out_notification_methods = + opted_out_notification_methods_snapshot; } else { warn!( "failed to update outbound opted-out notifications" @@ -788,10 +801,10 @@ pub async fn run_main_with_transport( connection_state .outbound_experimental_api_enabled .store( - connection_state.session.experimental_api_enabled, + experimental_api_enabled, std::sync::atomic::Ordering::Release, ); - if !was_initialized && connection_state.session.initialized { + if !was_initialized && is_initialized { processor .send_initialize_notifications_to_connection( connection_id, @@ -831,12 +844,12 @@ pub async fn run_main_with_transport( created = thread_created_rx.recv(), if listen_for_threads => { match created { Ok(thread_id) => { - let initialized_connection_ids: Vec = connections - .iter() - .filter_map(|(connection_id, connection_state)| { - connection_state.session.initialized.then_some(*connection_id) - }) - .collect(); + let mut initialized_connection_ids = Vec::new(); + for (connection_id, connection_state) in &connections { + if connection_state.session.initialized() { + initialized_connection_ids.push(*connection_id); + } + } processor .try_attach_thread_listener( thread_id, diff --git a/codex-rs/app-server/src/main.rs b/codex-rs/app-server/src/main.rs index 9a23680fb9..069227070e 100644 --- a/codex-rs/app-server/src/main.rs +++ b/codex-rs/app-server/src/main.rs @@ -12,6 +12,7 @@ use std::path::PathBuf; // Debug-only test hook: lets integration tests point the server at a temporary // managed config file without writing to /etc. const MANAGED_CONFIG_PATH_ENV_VAR: &str = "CODEX_APP_SERVER_MANAGED_CONFIG_PATH"; +const DISABLE_MANAGED_CONFIG_ENV_VAR: &str = "CODEX_APP_SERVER_DISABLE_MANAGED_CONFIG"; #[derive(Debug, Parser)] struct AppServerArgs { @@ -40,10 +41,12 @@ struct AppServerArgs { fn main() -> anyhow::Result<()> { arg0_dispatch_or_else(|arg0_paths: Arg0DispatchPaths| async move { let args = AppServerArgs::parse(); - let managed_config_path = managed_config_path_from_debug_env(); - let loader_overrides = LoaderOverrides { - managed_config_path, - ..Default::default() + let loader_overrides = if disable_managed_config_from_debug_env() { + LoaderOverrides::without_managed_config_for_tests() + } else { + managed_config_path_from_debug_env() + .map(LoaderOverrides::with_managed_config_path_for_tests) + .unwrap_or_default() }; let transport = args.listen; let session_source = args.session_source; @@ -63,6 +66,17 @@ fn main() -> anyhow::Result<()> { }) } +fn disable_managed_config_from_debug_env() -> bool { + #[cfg(debug_assertions)] + { + if let Ok(value) = std::env::var(DISABLE_MANAGED_CONFIG_ENV_VAR) { + return matches!(value.as_str(), "1" | "true" | "TRUE" | "yes" | "YES"); + } + } + + false +} + fn managed_config_path_from_debug_env() -> Option { #[cfg(debug_assertions)] { diff --git a/codex-rs/app-server/src/message_processor.rs b/codex-rs/app-server/src/message_processor.rs index 8c2bba00a3..6221cebee5 100644 --- a/codex-rs/app-server/src/message_processor.rs +++ b/codex-rs/app-server/src/message_processor.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::collections::HashSet; use std::future::Future; use std::sync::Arc; +use std::sync::OnceLock; use std::sync::RwLock; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; @@ -9,7 +10,6 @@ use std::sync::atomic::Ordering; use crate::codex_message_processor::CodexMessageProcessor; use crate::codex_message_processor::CodexMessageProcessorArgs; use crate::config_api::ConfigApi; -use crate::error_code::INTERNAL_ERROR_CODE; use crate::error_code::INVALID_REQUEST_ERROR_CODE; use crate::external_agent_config_api::ExternalAgentConfigApi; use crate::fs_api::FsApi; @@ -21,6 +21,7 @@ use crate::outgoing_message::RequestContext; use crate::transport::AppServerTransport; use crate::transport::RemoteControlHandle; use async_trait::async_trait; +use axum::http::HeaderValue; use codex_analytics::AnalyticsEventsClient; use codex_analytics::AppServerRpcTransport; use codex_app_server_protocol::AppListUpdatedNotification; @@ -174,13 +175,52 @@ pub(crate) struct MessageProcessor { remote_control_handle: Option, } -#[derive(Clone, Debug, Default)] +#[derive(Debug, Default)] pub(crate) struct ConnectionSessionState { - pub(crate) initialized: bool, - pub(crate) experimental_api_enabled: bool, - pub(crate) opted_out_notification_methods: HashSet, - pub(crate) app_server_client_name: Option, - pub(crate) client_version: Option, + initialized: OnceLock, +} + +#[derive(Debug)] +struct InitializedConnectionSessionState { + experimental_api_enabled: bool, + opted_out_notification_methods: HashSet, + app_server_client_name: String, + client_version: String, +} + +impl ConnectionSessionState { + pub(crate) fn initialized(&self) -> bool { + self.initialized.get().is_some() + } + + pub(crate) fn experimental_api_enabled(&self) -> bool { + self.initialized + .get() + .is_some_and(|session| session.experimental_api_enabled) + } + + pub(crate) fn opted_out_notification_methods(&self) -> HashSet { + self.initialized + .get() + .map(|session| session.opted_out_notification_methods.clone()) + .unwrap_or_default() + } + + pub(crate) fn app_server_client_name(&self) -> Option<&str> { + self.initialized + .get() + .map(|session| session.app_server_client_name.as_str()) + } + + pub(crate) fn client_version(&self) -> Option<&str> { + self.initialized + .get() + .map(|session| session.client_version.as_str()) + } + + fn initialize(&self, session: InitializedConnectionSessionState) -> Result<(), ()> { + self.initialized.set(session).map_err(|_| ()) + } } pub(crate) struct MessageProcessorArgs { @@ -266,7 +306,7 @@ impl MessageProcessor { .plugins_manager() .maybe_start_plugin_startup_tasks_for_config(&config, auth_manager.clone()); let config_api = ConfigApi::new( - config.codex_home.clone(), + config.codex_home.to_path_buf(), cli_overrides, runtime_feature_enablement, loader_overrides, @@ -274,7 +314,8 @@ impl MessageProcessor { thread_manager, analytics_events_client.clone(), ); - let external_agent_config_api = ExternalAgentConfigApi::new(config.codex_home.clone()); + let external_agent_config_api = + ExternalAgentConfigApi::new(config.codex_home.to_path_buf()); let fs_api = FsApi::default(); let fs_watch_manager = FsWatchManager::new(outgoing.clone()); @@ -299,11 +340,11 @@ impl MessageProcessor { } pub(crate) async fn process_request( - &self, + self: &Arc, connection_id: ConnectionId, request: JSONRPCRequest, transport: AppServerTransport, - session: &mut ConnectionSessionState, + session: Arc, ) { let request_method = request.method.as_str(); tracing::trace!( @@ -316,7 +357,7 @@ impl MessageProcessor { request_id: request.id.clone(), }; let request_span = - crate::app_server_tracing::request_span(&request, transport, connection_id, session); + crate::app_server_tracing::request_span(&request, transport, connection_id, &session); let request_trace = request.trace.as_ref().map(|trace| W3cTraceContext { traceparent: trace.traceparent.clone(), tracestate: trace.tracestate.clone(), @@ -358,7 +399,7 @@ impl MessageProcessor { self.handle_client_request( request_id.clone(), codex_request, - session, + Arc::clone(&session), /*outbound_initialized*/ None, request_context.clone(), ) @@ -373,10 +414,10 @@ impl MessageProcessor { /// This bypasses JSON request deserialization but keeps identical request /// semantics by delegating to `handle_client_request`. pub(crate) async fn process_client_request( - &self, + self: &Arc, connection_id: ConnectionId, request: ClientRequest, - session: &mut ConnectionSessionState, + session: Arc, outbound_initialized: &AtomicBool, ) { let request_id = ConnectionRequestId { @@ -384,7 +425,7 @@ impl MessageProcessor { request_id: request.id().clone(), }; let request_span = - crate::app_server_tracing::typed_request_span(&request, connection_id, session); + crate::app_server_tracing::typed_request_span(&request, connection_id, &session); let request_context = RequestContext::new(request_id.clone(), request_span, /*parent_trace*/ None); tracing::trace!( @@ -402,7 +443,7 @@ impl MessageProcessor { self.handle_client_request( request_id.clone(), request, - session, + Arc::clone(&session), Some(outbound_initialized), request_context.clone(), ) @@ -525,10 +566,10 @@ impl MessageProcessor { } async fn handle_client_request( - &self, + self: &Arc, connection_request_id: ConnectionRequestId, codex_request: ClientRequest, - session: &mut ConnectionSessionState, + session: Arc, // `Some(...)` means the caller wants initialize to immediately mark the // connection outbound-ready. Websocket JSON-RPC calls pass `None` so // lib.rs can deliver connection-scoped initialize notifications first. @@ -536,138 +577,166 @@ impl MessageProcessor { request_context: RequestContext, ) { let connection_id = connection_request_id.connection_id; - match codex_request { + if let ClientRequest::Initialize { request_id, params } = codex_request { // Handle Initialize internally so CodexMessageProcessor does not have to concern // itself with the `initialized` bool. - ClientRequest::Initialize { request_id, params } => { - let connection_request_id = ConnectionRequestId { - connection_id, - request_id, + let connection_request_id = ConnectionRequestId { + connection_id, + request_id, + }; + if session.initialized() { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "Already initialized".to_string(), + data: None, }; - if session.initialized { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: "Already initialized".to_string(), - data: None, - }; - self.outgoing.send_error(connection_request_id, error).await; - return; - } - - // TODO(maxj): Revisit capability scoping for `experimental_api_enabled`. - // Current behavior is per-connection. Reviewer feedback notes this can - // create odd cross-client behavior (for example dynamic tool calls on a - // shared thread when another connected client did not opt into - // experimental API). Proposed direction is instance-global first-write-wins - // with initialize-time mismatch rejection. - let analytics_initialize_params = params.clone(); - let (experimental_api_enabled, opt_out_notification_methods) = - match params.capabilities { - Some(capabilities) => ( - capabilities.experimental_api, - capabilities - .opt_out_notification_methods - .unwrap_or_default(), - ), - None => (false, Vec::new()), - }; - session.experimental_api_enabled = experimental_api_enabled; - session.opted_out_notification_methods = - opt_out_notification_methods.into_iter().collect(); - let ClientInfo { - name, - title: _title, - version, - } = params.client_info; - session.app_server_client_name = Some(name.clone()); - session.client_version = Some(version.clone()); - let originator = name.clone(); - if let Err(error) = set_default_originator(originator.clone()) { - match error { - SetOriginatorError::InvalidHeaderValue => { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: format!( - "Invalid clientInfo.name: '{name}'. Must be a valid HTTP header value." - ), - data: None, - }; - self.outgoing - .send_error(connection_request_id.clone(), error) - .await; - return; - } - SetOriginatorError::AlreadyInitialized => { - // No-op. This is expected to happen if the originator is already set via env var. - // TODO(owen): Once we remove support for CODEX_INTERNAL_ORIGINATOR_OVERRIDE, - // this will be an unexpected state and we can return a JSON-RPC error indicating - // internal server error. - } - } - } - if self.config.features.enabled(Feature::GeneralAnalytics) { - self.analytics_events_client.track_initialize( - connection_id.0, - analytics_initialize_params, - originator, - self.rpc_transport, - ); - } - set_default_client_residency_requirement(self.config.enforce_residency.value()); - let user_agent_suffix = format!("{name}; {version}"); - if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() { - *suffix = Some(user_agent_suffix); - } - - let user_agent = get_codex_user_agent(); - let codex_home = match self.config.codex_home.clone().try_into() { - Ok(codex_home) => codex_home, - Err(err) => { - let error = JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("Invalid CODEX_HOME: {err}"), - data: None, - }; - self.outgoing.send_error(connection_request_id, error).await; - return; - } - }; - let response = InitializeResponse { - user_agent, - codex_home, - platform_family: std::env::consts::FAMILY.to_string(), - platform_os: std::env::consts::OS.to_string(), - }; - self.outgoing - .send_response(connection_request_id, response) - .await; - - session.initialized = true; - if let Some(outbound_initialized) = outbound_initialized { - // In-process clients can complete readiness immediately here. The - // websocket path defers this until lib.rs finishes transport-layer - // initialize handling for the specific connection. - outbound_initialized.store(true, Ordering::Release); - self.codex_message_processor - .connection_initialized(connection_id) - .await; - } + self.outgoing.send_error(connection_request_id, error).await; return; } - _ => { - if !session.initialized { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: "Not initialized".to_string(), - data: None, - }; - self.outgoing.send_error(connection_request_id, error).await; - return; + + // TODO(maxj): Revisit capability scoping for `experimental_api_enabled`. + // Current behavior is per-connection. Reviewer feedback notes this can + // create odd cross-client behavior (for example dynamic tool calls on a + // shared thread when another connected client did not opt into + // experimental API). Proposed direction is instance-global first-write-wins + // with initialize-time mismatch rejection. + let analytics_initialize_params = params.clone(); + let (experimental_api_enabled, opt_out_notification_methods) = match params.capabilities + { + Some(capabilities) => ( + capabilities.experimental_api, + capabilities + .opt_out_notification_methods + .unwrap_or_default(), + ), + None => (false, Vec::new()), + }; + let ClientInfo { + name, + title: _title, + version, + } = params.client_info; + // Validate before committing; set_default_originator validates while + // mutating process-global metadata. + if HeaderValue::from_str(&name).is_err() { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: format!( + "Invalid clientInfo.name: '{name}'. Must be a valid HTTP header value." + ), + data: None, + }; + self.outgoing + .send_error(connection_request_id.clone(), error) + .await; + return; + } + let originator = name.clone(); + let user_agent_suffix = format!("{name}; {version}"); + let codex_home = self.config.codex_home.clone(); + if session + .initialize(InitializedConnectionSessionState { + experimental_api_enabled, + opted_out_notification_methods: opt_out_notification_methods + .into_iter() + .collect(), + app_server_client_name: name.clone(), + client_version: version, + }) + .is_err() + { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "Already initialized".to_string(), + data: None, + }; + self.outgoing.send_error(connection_request_id, error).await; + return; + } + + // Only the request that wins session initialization may mutate + // process-global client metadata. + if let Err(error) = set_default_originator(originator.clone()) { + match error { + SetOriginatorError::InvalidHeaderValue => { + tracing::warn!( + client_info_name = %name, + "validated clientInfo.name was rejected while setting originator" + ); + } + SetOriginatorError::AlreadyInitialized => { + // No-op. This is expected to happen if the originator is already set via env var. + // TODO(owen): Once we remove support for CODEX_INTERNAL_ORIGINATOR_OVERRIDE, + // this will be an unexpected state and we can return a JSON-RPC error indicating + // internal server error. + } } } + if self.config.features.enabled(Feature::GeneralAnalytics) { + self.analytics_events_client.track_initialize( + connection_id.0, + analytics_initialize_params, + originator, + self.rpc_transport, + ); + } + set_default_client_residency_requirement(self.config.enforce_residency.value()); + if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() { + *suffix = Some(user_agent_suffix); + } + + let user_agent = get_codex_user_agent(); + let response = InitializeResponse { + user_agent, + codex_home, + platform_family: std::env::consts::FAMILY.to_string(), + platform_os: std::env::consts::OS.to_string(), + }; + + self.outgoing + .send_response(connection_request_id, response) + .await; + + if let Some(outbound_initialized) = outbound_initialized { + // In-process clients can complete readiness immediately here. The + // websocket path defers this until lib.rs finishes transport-layer + // initialize handling for the specific connection. + outbound_initialized.store(true, Ordering::Release); + self.codex_message_processor + .connection_initialized(connection_id) + .await; + } + return; } + + self.dispatch_initialized_client_request( + connection_request_id, + codex_request, + session, + request_context, + ) + .await; + } + + async fn dispatch_initialized_client_request( + self: &Arc, + connection_request_id: ConnectionRequestId, + codex_request: ClientRequest, + session: Arc, + request_context: RequestContext, + ) { + if !session.initialized() { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "Not initialized".to_string(), + data: None, + }; + self.outgoing.send_error(connection_request_id, error).await; + return; + } + if let Some(reason) = codex_request.experimental_reason() - && !session.experimental_api_enabled + && !session.experimental_api_enabled() { let error = JSONRPCErrorError { code: INVALID_REQUEST_ERROR_CODE, @@ -677,6 +746,40 @@ impl MessageProcessor { self.outgoing.send_error(connection_request_id, error).await; return; } + let connection_id = connection_request_id.connection_id; + if self.config.features.enabled(Feature::GeneralAnalytics) + && let ClientRequest::TurnStart { request_id, .. } + | ClientRequest::TurnSteer { request_id, .. } = &codex_request + { + self.analytics_events_client.track_request( + connection_id.0, + request_id.clone(), + codex_request.clone(), + ); + } + + let app_server_client_name = session.app_server_client_name().map(str::to_string); + let client_version = session.client_version().map(str::to_string); + Arc::clone(self) + .handle_initialized_client_request( + connection_request_id, + codex_request, + request_context, + app_server_client_name, + client_version, + ) + .await; + } + + async fn handle_initialized_client_request( + self: Arc, + connection_request_id: ConnectionRequestId, + codex_request: ClientRequest, + request_context: RequestContext, + app_server_client_name: Option, + client_version: Option, + ) { + let connection_id = connection_request_id.connection_id; match codex_request { ClientRequest::ConfigRead { request_id, params } => { @@ -849,8 +952,8 @@ impl MessageProcessor { .process_request( connection_id, other, - session.app_server_client_name.clone(), - session.client_version.clone(), + app_server_client_name, + client_version, request_context, ) .boxed() diff --git a/codex-rs/app-server/src/message_processor/tracing_tests.rs b/codex-rs/app-server/src/message_processor/tracing_tests.rs index d0fe22e9b0..c1bb995bab 100644 --- a/codex-rs/app-server/src/message_processor/tracing_tests.rs +++ b/codex-rs/app-server/src/message_processor/tracing_tests.rs @@ -109,9 +109,9 @@ fn tracing_test_guard() -> &'static tokio::sync::Mutex<()> { struct TracingHarness { _server: MockServer, _codex_home: TempDir, - processor: MessageProcessor, + processor: Arc, outgoing_rx: mpsc::Receiver, - session: ConnectionSessionState, + session: Arc, tracing: &'static TestTracing, } @@ -129,7 +129,7 @@ impl TracingHarness { _codex_home: codex_home, processor, outgoing_rx, - session: ConnectionSessionState::default(), + session: Arc::new(ConnectionSessionState::default()), tracing, }; @@ -152,7 +152,7 @@ impl TracingHarness { /*trace*/ None, ) .await; - assert!(harness.session.initialized); + assert!(harness.session.initialized()); Ok(harness) } @@ -182,7 +182,7 @@ impl TracingHarness { TEST_CONNECTION_ID, request, AppServerTransport::Stdio, - &mut self.session, + Arc::clone(&self.session), ) .await; read_response(&mut self.outgoing_rx, request_id).await @@ -230,14 +230,14 @@ async fn build_test_config(codex_home: &Path, server_uri: &str) -> Result, ) -> ( - MessageProcessor, + Arc, mpsc::Receiver, ) { let (outgoing_tx, outgoing_rx) = mpsc::channel(16); let outgoing = Arc::new(OutgoingMessageSender::new(outgoing_tx)); let auth_manager = AuthManager::shared_from_config(config.as_ref(), /*enable_codex_api_key_env*/ false); - let processor = MessageProcessor::new(MessageProcessorArgs { + let processor = Arc::new(MessageProcessor::new(MessageProcessorArgs { outgoing, arg0_paths: Arg0DispatchPaths::default(), config, @@ -252,7 +252,7 @@ fn build_test_processor( auth_manager, rpc_transport: AppServerRpcTransport::Stdio, remote_control_handle: None, - }); + })); (processor, outgoing_rx) } diff --git a/codex-rs/app-server/src/thread_state.rs b/codex-rs/app-server/src/thread_state.rs index 11d6ad6bb3..504e59468b 100644 --- a/codex-rs/app-server/src/thread_state.rs +++ b/codex-rs/app-server/src/thread_state.rs @@ -8,6 +8,7 @@ use codex_core::CodexThread; use codex_core::ThreadConfigSnapshot; use codex_protocol::ThreadId; use codex_protocol::protocol::EventMsg; +use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::HashMap; use std::collections::HashSet; use std::path::PathBuf; @@ -16,6 +17,7 @@ use std::sync::Weak; use tokio::sync::Mutex; use tokio::sync::mpsc; use tokio::sync::oneshot; +use tokio::sync::watch; use tracing::error; type PendingInterruptQueue = Vec<( @@ -27,7 +29,7 @@ pub(crate) struct PendingThreadResumeRequest { pub(crate) request_id: ConnectionRequestId, pub(crate) rollout_path: PathBuf, pub(crate) config_snapshot: ThreadConfigSnapshot, - pub(crate) instruction_sources: Vec, + pub(crate) instruction_sources: Vec, pub(crate) thread_summary: codex_app_server_protocol::Thread, } @@ -159,6 +161,7 @@ pub(crate) async fn resolve_server_request_on_thread_listener( struct ThreadEntry { state: Arc>, connection_ids: HashSet, + has_connections_watcher: watch::Sender, } impl Default for ThreadEntry { @@ -166,10 +169,21 @@ impl Default for ThreadEntry { Self { state: Arc::new(Mutex::new(ThreadState::default())), connection_ids: HashSet::new(), + has_connections_watcher: watch::channel(false).0, } } } +impl ThreadEntry { + fn update_has_connections(&self) { + let _ = self.has_connections_watcher.send_if_modified(|current| { + let prev = *current; + *current = !self.connection_ids.is_empty(); + prev != *current + }); + } +} + #[derive(Default)] struct ThreadStateManagerInner { live_connections: HashSet, @@ -286,12 +300,14 @@ impl ThreadStateManager { } if let Some(thread_entry) = state.threads.get_mut(&thread_id) { thread_entry.connection_ids.remove(&connection_id); + thread_entry.update_has_connections(); } }; true } + #[cfg(test)] pub(crate) async fn has_subscribers(&self, thread_id: ThreadId) -> bool { self.state .lock() @@ -319,6 +335,7 @@ impl ThreadStateManager { .insert(thread_id); let thread_entry = state.threads.entry(thread_id).or_default(); thread_entry.connection_ids.insert(connection_id); + thread_entry.update_has_connections(); thread_entry.state.clone() }; { @@ -344,12 +361,9 @@ impl ThreadStateManager { .entry(connection_id) .or_default() .insert(thread_id); - state - .threads - .entry(thread_id) - .or_default() - .connection_ids - .insert(connection_id); + let thread_entry = state.threads.entry(thread_id).or_default(); + thread_entry.connection_ids.insert(connection_id); + thread_entry.update_has_connections(); true } @@ -364,6 +378,7 @@ impl ThreadStateManager { for thread_id in &thread_ids { if let Some(thread_entry) = state.threads.get_mut(thread_id) { thread_entry.connection_ids.remove(&connection_id); + thread_entry.update_has_connections(); } } thread_ids @@ -377,4 +392,15 @@ impl ThreadStateManager { .collect::>() } } + + pub(crate) async fn subscribe_to_has_connections( + &self, + thread_id: ThreadId, + ) -> Option> { + let state = self.state.lock().await; + state + .threads + .get(&thread_id) + .map(|thread_entry| thread_entry.has_connections_watcher.subscribe()) + } } diff --git a/codex-rs/app-server/src/thread_status.rs b/codex-rs/app-server/src/thread_status.rs index 802f7e197c..f78b8753a9 100644 --- a/codex-rs/app-server/src/thread_status.rs +++ b/codex-rs/app-server/src/thread_status.rs @@ -8,9 +8,8 @@ use codex_app_server_protocol::Thread; use codex_app_server_protocol::ThreadActiveFlag; use codex_app_server_protocol::ThreadStatus; use codex_app_server_protocol::ThreadStatusChangedNotification; +use codex_protocol::ThreadId; use std::collections::HashMap; -#[cfg(test)] -use std::path::PathBuf; use std::sync::Arc; use tokio::sync::Mutex; #[cfg(test)] @@ -244,6 +243,13 @@ impl ThreadWatchManager { } } + pub(crate) async fn subscribe( + &self, + thread_id: ThreadId, + ) -> Option> { + Some(self.state.lock().await.subscribe(thread_id.to_string())) + } + async fn note_active_guard_released( &self, thread_id: String, @@ -295,6 +301,7 @@ pub(crate) fn resolve_thread_status( #[derive(Default)] struct ThreadWatchState { runtime_by_thread_id: HashMap, + status_watcher_by_thread_id: HashMap>, } impl ThreadWatchState { @@ -309,6 +316,7 @@ impl ThreadWatchState { .entry(thread_id.clone()) .or_default(); runtime.is_loaded = true; + self.update_status_watcher_for_thread(&thread_id); if emit_notification { self.status_changed_notification(thread_id, previous_status) } else { @@ -319,6 +327,7 @@ impl ThreadWatchState { fn remove_thread(&mut self, thread_id: &str) -> Option { let previous_status = self.status_for(thread_id); self.runtime_by_thread_id.remove(thread_id); + self.update_status_watcher(thread_id, &ThreadStatus::NotLoaded); if previous_status.is_some() && previous_status != Some(ThreadStatus::NotLoaded) { Some(ThreadStatusChangedNotification { thread_id: thread_id.to_string(), @@ -344,6 +353,7 @@ impl ThreadWatchState { .or_default(); runtime.is_loaded = true; mutate(runtime); + self.update_status_watcher_for_thread(thread_id); self.status_changed_notification(thread_id.to_string(), previous_status) } @@ -358,6 +368,40 @@ impl ThreadWatchState { .unwrap_or(ThreadStatus::NotLoaded) } + fn subscribe(&mut self, thread_id: String) -> watch::Receiver { + let status = self.loaded_status_for_thread(&thread_id); + let sender = self + .status_watcher_by_thread_id + .entry(thread_id) + .or_insert_with(|| watch::channel(status.clone()).0); + sender.subscribe() + } + + fn update_status_watcher_for_thread(&mut self, thread_id: &str) { + let status = self.loaded_status_for_thread(thread_id); + self.update_status_watcher(thread_id, &status); + } + + fn update_status_watcher(&mut self, thread_id: &str, status: &ThreadStatus) { + let remove_watcher = if let Some(sender) = self.status_watcher_by_thread_id.get(thread_id) { + let status = status.clone(); + let _ = sender.send_if_modified(|current| { + if *current == status { + false + } else { + *current = status; + true + } + }); + sender.receiver_count() == 0 + } else { + false + }; + if remove_watcher { + self.status_watcher_by_thread_id.remove(thread_id); + } + } + fn status_changed_notification( &self, thread_id: String, @@ -409,6 +453,8 @@ fn loaded_thread_status(runtime: &RuntimeFacts) -> ThreadStatus { #[cfg(test)] mod tests { use super::*; + use codex_utils_absolute_path::test_support::PathBufExt; + use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use tokio::time::Duration; use tokio::time::timeout; @@ -752,6 +798,55 @@ mod tests { ); } + #[tokio::test] + async fn status_watchers_receive_only_their_thread_updates() { + let manager = ThreadWatchManager::new(); + manager + .upsert_thread(test_thread( + INTERACTIVE_THREAD_ID, + codex_app_server_protocol::SessionSource::Cli, + )) + .await; + manager + .upsert_thread(test_thread( + NON_INTERACTIVE_THREAD_ID, + codex_app_server_protocol::SessionSource::AppServer, + )) + .await; + let interactive_thread_id = ThreadId::from_string(INTERACTIVE_THREAD_ID) + .expect("interactive thread id should parse"); + let non_interactive_thread_id = ThreadId::from_string(NON_INTERACTIVE_THREAD_ID) + .expect("non-interactive thread id should parse"); + let mut interactive_rx = manager + .subscribe(interactive_thread_id) + .await + .expect("interactive status watcher should subscribe"); + let mut non_interactive_rx = manager + .subscribe(non_interactive_thread_id) + .await + .expect("non-interactive status watcher should subscribe"); + + manager.note_turn_started(INTERACTIVE_THREAD_ID).await; + + timeout(Duration::from_secs(1), interactive_rx.changed()) + .await + .expect("timed out waiting for interactive status update") + .expect("interactive status watcher should remain open"); + assert_eq!( + *interactive_rx.borrow(), + ThreadStatus::Active { + active_flags: vec![], + }, + ); + assert!( + timeout(Duration::from_millis(100), non_interactive_rx.changed()) + .await + .is_err(), + "unrelated thread watcher should not receive an update" + ); + assert_eq!(*non_interactive_rx.borrow(), ThreadStatus::Idle); + } + async fn wait_for_status( manager: &ThreadWatchManager, thread_id: &str, @@ -800,7 +895,7 @@ mod tests { updated_at: 0, status: ThreadStatus::NotLoaded, path: None, - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), cli_version: "test".to_string(), agent_nickname: None, agent_role: None, diff --git a/codex-rs/app-server/src/transport/auth.rs b/codex-rs/app-server/src/transport/auth.rs index a67c692b7d..45f44a36c9 100644 --- a/codex-rs/app-server/src/transport/auth.rs +++ b/codex-rs/app-server/src/transport/auth.rs @@ -34,6 +34,10 @@ pub struct AppServerWebsocketAuthArgs { #[arg(long = "ws-token-file", value_name = "PATH")] pub ws_token_file: Option, + /// Hex-encoded SHA-256 digest of the capability token. + #[arg(long = "ws-token-sha256", value_name = "HEX")] + pub ws_token_sha256: Option, + /// Absolute path to the shared secret file for signed JWT bearer tokens. #[arg(long = "ws-shared-secret-file", value_name = "PATH")] pub ws_shared_secret_file: Option, @@ -65,7 +69,7 @@ pub struct AppServerWebsocketAuthSettings { #[derive(Debug, Clone, PartialEq, Eq)] pub enum AppServerWebsocketAuthConfig { CapabilityToken { - token_file: AbsolutePathBuf, + source: AppServerWebsocketCapabilityTokenSource, }, SignedBearerToken { shared_secret_file: AbsolutePathBuf, @@ -75,6 +79,12 @@ pub enum AppServerWebsocketAuthConfig { }, } +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum AppServerWebsocketCapabilityTokenSource { + TokenFile { token_file: AbsolutePathBuf }, + TokenSha256 { token_sha256: [u8; 32] }, +} + #[derive(Clone, Debug, Default)] pub(crate) struct WebsocketAuthPolicy { pub(crate) mode: Option, @@ -144,17 +154,34 @@ impl AppServerWebsocketAuthArgs { "`--ws-shared-secret-file`, `--ws-issuer`, `--ws-audience`, and `--ws-max-clock-skew-seconds` require `--ws-auth signed-bearer-token`" ); } - let token_file = self.ws_token_file.context( - "`--ws-token-file` is required when `--ws-auth capability-token` is set", - )?; - Some(AppServerWebsocketAuthConfig::CapabilityToken { - token_file: absolute_path_arg("--ws-token-file", token_file)?, - }) + let source = match (self.ws_token_file, self.ws_token_sha256) { + (Some(_), Some(_)) => { + anyhow::bail!( + "`--ws-token-file` and `--ws-token-sha256` are mutually exclusive" + ); + } + (Some(token_file), None) => { + AppServerWebsocketCapabilityTokenSource::TokenFile { + token_file: absolute_path_arg("--ws-token-file", token_file)?, + } + } + (None, Some(token_sha256)) => { + AppServerWebsocketCapabilityTokenSource::TokenSha256 { + token_sha256: sha256_digest_arg("--ws-token-sha256", &token_sha256)?, + } + } + (None, None) => { + anyhow::bail!( + "`--ws-token-file` or `--ws-token-sha256` is required when `--ws-auth capability-token` is set" + ); + } + }; + Some(AppServerWebsocketAuthConfig::CapabilityToken { source }) } Some(WebsocketAuthCliMode::SignedBearerToken) => { - if self.ws_token_file.is_some() { + if self.ws_token_file.is_some() || self.ws_token_sha256.is_some() { anyhow::bail!( - "`--ws-token-file` requires `--ws-auth capability-token`, not `signed-bearer-token`" + "`--ws-token-file` and `--ws-token-sha256` require `--ws-auth capability-token`, not `signed-bearer-token`" ); } let shared_secret_file = self.ws_shared_secret_file.context( @@ -174,6 +201,7 @@ impl AppServerWebsocketAuthArgs { } None => { if self.ws_token_file.is_some() + || self.ws_token_sha256.is_some() || self.ws_shared_secret_file.is_some() || self.ws_issuer.is_some() || self.ws_audience.is_some() @@ -195,12 +223,19 @@ pub(crate) fn policy_from_settings( settings: &AppServerWebsocketAuthSettings, ) -> io::Result { let mode = match settings.config.as_ref() { - Some(AppServerWebsocketAuthConfig::CapabilityToken { token_file }) => { - let token = read_trimmed_secret(token_file.as_ref())?; - Some(WebsocketAuthMode::CapabilityToken { - token_sha256: sha256_digest(token.as_bytes()), - }) - } + Some(AppServerWebsocketAuthConfig::CapabilityToken { source }) => match source { + AppServerWebsocketCapabilityTokenSource::TokenFile { token_file } => { + let token = read_trimmed_secret(token_file.as_ref())?; + Some(WebsocketAuthMode::CapabilityToken { + token_sha256: sha256_digest(token.as_bytes()), + }) + } + AppServerWebsocketCapabilityTokenSource::TokenSha256 { token_sha256 } => { + Some(WebsocketAuthMode::CapabilityToken { + token_sha256: *token_sha256, + }) + } + }, Some(AppServerWebsocketAuthConfig::SignedBearerToken { shared_secret_file, issuer, @@ -387,6 +422,30 @@ fn absolute_path_arg(flag_name: &str, path: PathBuf) -> anyhow::Result anyhow::Result<[u8; 32]> { + let trimmed = value.trim(); + if trimmed.len() != 64 { + anyhow::bail!("{flag_name} must be a 64-character hex SHA-256 digest"); + } + + let mut digest = [0u8; 32]; + for (index, pair) in trimmed.as_bytes().chunks_exact(2).enumerate() { + let high = hex_nibble(flag_name, pair[0])?; + let low = hex_nibble(flag_name, pair[1])?; + digest[index] = (high << 4) | low; + } + Ok(digest) +} + +fn hex_nibble(flag_name: &str, byte: u8) -> anyhow::Result { + match byte { + b'0'..=b'9' => Ok(byte - b'0'), + b'a'..=b'f' => Ok(byte - b'a' + 10), + b'A'..=b'F' => Ok(byte - b'A' + 10), + _ => anyhow::bail!("{flag_name} must be a 64-character hex SHA-256 digest"), + } +} + fn sha256_digest(input: &[u8]) -> [u8; 32] { let mut digest = [0u8; 32]; digest.copy_from_slice(&Sha256::digest(input)); @@ -403,6 +462,7 @@ fn unauthorized(message: &'static str) -> WebsocketAuthError { #[cfg(test)] mod tests { use super::*; + use axum::http::HeaderValue; use base64::Engine; use base64::engine::general_purpose::URL_SAFE_NO_PAD; use hmac::Hmac; @@ -443,19 +503,98 @@ mod tests { } #[test] - fn capability_token_args_require_token_file() { + fn capability_token_args_require_token_file_or_hash() { let err = AppServerWebsocketAuthArgs { ws_auth: Some(WebsocketAuthCliMode::CapabilityToken), ..Default::default() } .try_into_settings() - .expect_err("capability-token mode should require a token file"); + .expect_err("capability-token mode should require a token source"); assert!( - err.to_string().contains("--ws-token-file"), + err.to_string().contains("--ws-token-file") + && err.to_string().contains("--ws-token-sha256"), "unexpected error: {err}" ); } + #[test] + fn capability_token_args_accept_token_hash() { + let settings = AppServerWebsocketAuthArgs { + ws_auth: Some(WebsocketAuthCliMode::CapabilityToken), + ws_token_sha256: Some("ab".repeat(32)), + ..Default::default() + } + .try_into_settings() + .expect("capability-token hash args should parse"); + + assert_eq!( + settings, + AppServerWebsocketAuthSettings { + config: Some(AppServerWebsocketAuthConfig::CapabilityToken { + source: AppServerWebsocketCapabilityTokenSource::TokenSha256 { + token_sha256: [0xab; 32], + }, + }), + } + ); + } + + #[test] + fn capability_token_args_reject_multiple_token_sources() { + let err = AppServerWebsocketAuthArgs { + ws_auth: Some(WebsocketAuthCliMode::CapabilityToken), + ws_token_file: Some(PathBuf::from("/tmp/token")), + ws_token_sha256: Some("ab".repeat(32)), + ..Default::default() + } + .try_into_settings() + .expect_err("capability-token mode should reject multiple token sources"); + assert!( + err.to_string().contains("mutually exclusive"), + "unexpected error: {err}" + ); + } + + #[test] + fn capability_token_args_reject_malformed_token_hash() { + let err = AppServerWebsocketAuthArgs { + ws_auth: Some(WebsocketAuthCliMode::CapabilityToken), + ws_token_sha256: Some("not-a-sha256".to_string()), + ..Default::default() + } + .try_into_settings() + .expect_err("capability-token mode should reject malformed token hashes"); + assert!( + err.to_string().contains("64-character hex"), + "unexpected error: {err}" + ); + } + + #[test] + fn capability_token_hash_policy_authorizes_matching_bearer_token() { + let settings = AppServerWebsocketAuthSettings { + config: Some(AppServerWebsocketAuthConfig::CapabilityToken { + source: AppServerWebsocketCapabilityTokenSource::TokenSha256 { + token_sha256: sha256_digest(b"super-secret-token"), + }, + }), + }; + let policy = policy_from_settings(&settings).expect("hash policy should build"); + let mut headers = HeaderMap::new(); + headers.insert( + AUTHORIZATION, + HeaderValue::from_static("Bearer super-secret-token"), + ); + authorize_upgrade(&headers, &policy).expect("matching token should authorize"); + + headers.insert( + AUTHORIZATION, + HeaderValue::from_static("Bearer wrong-token"), + ); + let err = authorize_upgrade(&headers, &policy).expect_err("wrong token should fail"); + assert_eq!(err.status_code(), StatusCode::UNAUTHORIZED); + } + #[test] fn signed_bearer_args_require_mode_when_mode_specific_flags_are_set() { let err = AppServerWebsocketAuthArgs { diff --git a/codex-rs/app-server/src/transport/mod.rs b/codex-rs/app-server/src/transport/mod.rs index 92383cb78f..75a4971905 100644 --- a/codex-rs/app-server/src/transport/mod.rs +++ b/codex-rs/app-server/src/transport/mod.rs @@ -121,7 +121,7 @@ pub(crate) struct ConnectionState { pub(crate) outbound_initialized: Arc, pub(crate) outbound_experimental_api_enabled: Arc, pub(crate) outbound_opted_out_notification_methods: Arc>>, - pub(crate) session: ConnectionSessionState, + pub(crate) session: Arc, } impl ConnectionState { @@ -134,7 +134,7 @@ impl ConnectionState { outbound_initialized, outbound_experimental_api_enabled, outbound_opted_out_notification_methods, - session: ConnectionSessionState::default(), + session: Arc::new(ConnectionSessionState::default()), } } } @@ -402,7 +402,6 @@ mod tests { use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use serde_json::json; - use std::path::PathBuf; use tokio::time::Duration; use tokio::time::timeout; @@ -772,7 +771,7 @@ mod tests { reason: Some("Need extra read access".to_string()), network_approval_context: None, command: Some("cat file".to_string()), - cwd: Some(PathBuf::from("/tmp")), + cwd: Some(absolute_path("/tmp")), command_actions: None, additional_permissions: Some( codex_app_server_protocol::AdditionalPermissionProfile { @@ -834,7 +833,7 @@ mod tests { reason: Some("Need extra read access".to_string()), network_approval_context: None, command: Some("cat file".to_string()), - cwd: Some(PathBuf::from("/tmp")), + cwd: Some(absolute_path("/tmp")), command_actions: None, additional_permissions: Some( codex_app_server_protocol::AdditionalPermissionProfile { diff --git a/codex-rs/app-server/src/transport/remote_control/mod.rs b/codex-rs/app-server/src/transport/remote_control/mod.rs index 1ea89bb643..c014c7a2c9 100644 --- a/codex-rs/app-server/src/transport/remote_control/mod.rs +++ b/codex-rs/app-server/src/transport/remote_control/mod.rs @@ -4,7 +4,6 @@ mod protocol; mod websocket; use crate::transport::remote_control::websocket::RemoteControlWebsocket; -use crate::transport::remote_control::websocket::load_remote_control_auth; pub use self::protocol::ClientId; use self::protocol::ServerEvent; @@ -59,9 +58,6 @@ pub(crate) async fn start_remote_control( } else { None }; - if initial_enabled { - validate_remote_control_auth(&auth_manager).await?; - } let (enabled_tx, enabled_rx) = watch::channel(initial_enabled); let join_handle = tokio::spawn(async move { @@ -86,15 +82,5 @@ pub(crate) async fn start_remote_control( )) } -pub(crate) async fn validate_remote_control_auth( - auth_manager: &Arc, -) -> io::Result<()> { - match load_remote_control_auth(auth_manager).await { - Ok(_) => Ok(()), - Err(err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()), - Err(err) => Err(err), - } -} - #[cfg(test)] mod tests; diff --git a/codex-rs/app-server/src/transport/remote_control/protocol.rs b/codex-rs/app-server/src/transport/remote_control/protocol.rs index 857855f2a0..dcb79e5c35 100644 --- a/codex-rs/app-server/src/transport/remote_control/protocol.rs +++ b/codex-rs/app-server/src/transport/remote_control/protocol.rs @@ -48,9 +48,8 @@ pub enum ClientEvent { message: JSONRPCMessage, }, /// Backend-generated acknowledgement for all server envelopes addressed to - /// `client_id` whose envelope `seq_id` is less than or equal to this ack's - /// `seq_id`. This cursor is client-scoped, not stream-scoped, so receivers - /// must not use `stream_id` to partition acks. + /// `client_id` and `stream_id` whose envelope `seq_id` is less than or equal + /// to this ack's `seq_id`. This cursor is stream-scoped. Ack, Ping, ClientClosed, @@ -65,7 +64,7 @@ pub(crate) struct ClientEnvelope { pub(crate) client_id: ClientId, #[serde(rename = "stream_id", skip_serializing_if = "Option::is_none")] pub(crate) stream_id: Option, - /// For `Ack`, this is the backend-generated per-client cursor over + /// For `Ack`, this is the backend-generated per-stream cursor over /// `ServerEnvelope.seq_id`. #[serde(rename = "seq_id", skip_serializing_if = "Option::is_none")] pub(crate) seq_id: Option, diff --git a/codex-rs/app-server/src/transport/remote_control/tests.rs b/codex-rs/app-server/src/transport/remote_control/tests.rs index 21808a3a18..d702a99c2c 100644 --- a/codex-rs/app-server/src/transport/remote_control/tests.rs +++ b/codex-rs/app-server/src/transport/remote_control/tests.rs @@ -5,6 +5,7 @@ use super::enroll::update_persisted_remote_control_enrollment; use super::protocol::ClientEnvelope; use super::protocol::ClientEvent; use super::protocol::ClientId; +use super::protocol::StreamId; use super::protocol::normalize_remote_control_url; use super::websocket::REMOTE_CONTROL_PROTOCOL_VERSION; use super::*; @@ -96,6 +97,7 @@ fn remote_control_auth_dot_json(account_id: Option<&str>) -> AuthDotJson { account_id: account_id.map(str::to_string), }), last_refresh: Some(chrono::Utc::now()), + agent_identity: None, } } @@ -162,7 +164,7 @@ async fn remote_control_transport_manages_virtual_clients_and_routes_messages() json!({ "type": "pong", "client_id": "client-1", - "seq_id": 0, + "seq_id": 1, "status": "unknown", }) ); @@ -368,7 +370,7 @@ async fn remote_control_transport_manages_virtual_clients_and_routes_messages() json!({ "type": "pong", "client_id": "client-1", - "seq_id": 3, + "seq_id": 1, "status": "unknown", }) ); @@ -478,6 +480,44 @@ async fn remote_control_start_allows_remote_control_invalid_url_when_disabled() .expect("remote control task should join"); } +#[tokio::test] +async fn remote_control_start_allows_missing_auth_when_enabled() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let auth_manager = AuthManager::shared( + codex_home.path().to_path_buf(), + /*enable_codex_api_key_env*/ false, + AuthCredentialsStoreMode::File, + ); + let (transport_event_tx, _transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + /*state_db*/ None, + auth_manager, + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start before ChatGPT auth is available"); + + timeout(Duration::from_millis(100), listener.accept()) + .await + .expect_err("remote control should wait for auth before connecting"); + + shutdown_token.cancel(); + timeout(Duration::from_secs(1), remote_task) + .await + .expect("remote control task should stop") + .expect("remote control task should join"); +} + #[tokio::test] async fn remote_control_handle_set_enabled_stops_and_restarts_connections() { let listener = TcpListener::bind("127.0.0.1:0") @@ -617,12 +657,13 @@ async fn remote_control_transport_clears_outgoing_buffer_when_backend_acks() { )) .await .expect("remote writer should accept outgoing message"); + let (server_event, stream_id) = read_server_event_with_stream_id(&mut first_websocket).await; assert_eq!( - read_server_event(&mut first_websocket).await, + server_event, json!({ "type": "server_message", "client_id": "client-1", - "seq_id": 0, + "seq_id": 1, "message": { "method": "configWarning", "params": { @@ -638,8 +679,8 @@ async fn remote_control_transport_clears_outgoing_buffer_when_backend_acks() { ClientEnvelope { event: ClientEvent::Ack, client_id: client_id.clone(), - stream_id: None, - seq_id: Some(0), + stream_id: Some(stream_id), + seq_id: Some(1), cursor: None, }, ) @@ -853,7 +894,7 @@ async fn remote_control_http_mode_enrolls_before_connecting() { json!({ "type": "server_message", "client_id": backend_client_id.0.clone(), - "seq_id": 0, + "seq_id": 1, "message": { "id": 11, "result": { @@ -881,7 +922,7 @@ async fn remote_control_http_mode_enrolls_before_connecting() { json!({ "type": "server_message", "client_id": backend_client_id.0.clone(), - "seq_id": 1, + "seq_id": 2, "message": { "method": "configWarning", "params": { @@ -1355,6 +1396,12 @@ async fn send_client_event( } async fn read_server_event(websocket: &mut WebSocketStream) -> serde_json::Value { + read_server_event_with_stream_id(websocket).await.0 +} + +async fn read_server_event_with_stream_id( + websocket: &mut WebSocketStream, +) -> (serde_json::Value, StreamId) { loop { let frame = timeout(Duration::from_secs(5), websocket.next()) .await @@ -1365,13 +1412,15 @@ async fn read_server_event(websocket: &mut WebSocketStream) -> serde_ tungstenite::Message::Text(text) => { let mut event: serde_json::Value = serde_json::from_str(text.as_ref()).expect("server event should deserialize"); - if let Some(stream_id) = event + let stream_id = event .as_object_mut() .and_then(|event| event.remove("stream_id")) - { - assert!(stream_id.is_string(), "stream_id should be a string"); - } - return event; + .expect("stream_id should be present"); + let stream_id = stream_id + .as_str() + .expect("stream_id should be a string") + .to_string(); + return (event, StreamId(stream_id)); } tungstenite::Message::Ping(payload) => { websocket diff --git a/codex-rs/app-server/src/transport/remote_control/websocket.rs b/codex-rs/app-server/src/transport/remote_control/websocket.rs index 42924c2d34..d813be1f6a 100644 --- a/codex-rs/app-server/src/transport/remote_control/websocket.rs +++ b/codex-rs/app-server/src/transport/remote_control/websocket.rs @@ -14,6 +14,7 @@ use super::protocol::ClientEvent; use super::protocol::ClientId; use super::protocol::RemoteControlTarget; use super::protocol::ServerEnvelope; +use super::protocol::StreamId; use axum::http::HeaderValue; use base64::Engine; use codex_core::util::backoff; @@ -25,8 +26,8 @@ use futures::SinkExt; use futures::StreamExt; use futures::stream::SplitSink; use futures::stream::SplitStream; -use std::collections::BTreeMap; use std::collections::HashMap; +use std::collections::VecDeque; use std::io; use std::io::ErrorKind; use std::sync::Arc; @@ -57,10 +58,7 @@ const REMOTE_CONTROL_ACCOUNT_ID_RETRY_INTERVAL: std::time::Duration = std::time::Duration::from_secs(1); struct BoundedOutboundBuffer { - // Remote-control acks are generated by the backend at client scope, so - // retransmit retention is keyed by client_id only. stream_id stays on each - // envelope for routing, but it is not part of the ack cursor. - buffer_by_client: HashMap>, + buffer_by_stream: HashMap<(ClientId, StreamId), VecDeque>, used_tx: watch::Sender, } @@ -68,46 +66,50 @@ impl BoundedOutboundBuffer { fn new() -> (Self, watch::Receiver) { let (used_tx, used_rx) = watch::channel(0); let buffer = Self { - buffer_by_client: HashMap::new(), + buffer_by_stream: HashMap::new(), used_tx, }; (buffer, used_rx) } fn insert(&mut self, server_envelope: &ServerEnvelope) { - self.buffer_by_client - .entry(server_envelope.client_id.clone()) + self.buffer_by_stream + .entry(( + server_envelope.client_id.clone(), + server_envelope.stream_id.clone(), + )) .or_default() - .insert(server_envelope.seq_id, server_envelope.clone()); + .push_back(server_envelope.clone()); self.used_tx.send_modify(|used| *used += 1); } - fn ack(&mut self, client_id: &ClientId, acked_seq_id: u64) { - let Some(buffer) = self.buffer_by_client.get_mut(client_id) else { + fn ack(&mut self, client_id: &ClientId, stream_id: &StreamId, acked_seq_id: u64) { + let key = (client_id.clone(), stream_id.clone()); + let Some(buffer) = self.buffer_by_stream.get_mut(&key) else { return; }; - while let Some(seq_id) = buffer.first_key_value().map(|(seq_id, _)| seq_id) - && *seq_id <= acked_seq_id + while let Some(server_envelope) = buffer.front() + && server_envelope.seq_id <= acked_seq_id { - buffer.pop_first(); + buffer.pop_front(); self.used_tx.send_modify(|used| *used -= 1); } if buffer.is_empty() { - self.buffer_by_client.remove(client_id); + self.buffer_by_stream.remove(&key); } } fn server_envelopes(&self) -> impl Iterator { - self.buffer_by_client + self.buffer_by_stream .values() - .flat_map(|buffer| buffer.values()) + .flat_map(|buffer| buffer.iter()) } } struct WebsocketState { outbound_buffer: BoundedOutboundBuffer, subscribe_cursor: Option, - next_seq_id: u64, + next_seq_id_by_stream: HashMap<(ClientId, StreamId), u64>, } pub(crate) struct RemoteControlWebsocket { @@ -162,7 +164,7 @@ impl RemoteControlWebsocket { state: Arc::new(Mutex::new(WebsocketState { outbound_buffer, subscribe_cursor: None, - next_seq_id: 0, + next_seq_id_by_stream: HashMap::new(), })), server_event_rx: Arc::new(Mutex::new(server_event_rx)), used_rx, @@ -445,10 +447,16 @@ impl RemoteControlWebsocket { } } }; - let (server_envelope, write_complete_tx) = { + let (payload, write_complete_tx) = { let mut state = state.lock().await; - let seq_id = state.next_seq_id; - state.next_seq_id = state.next_seq_id.saturating_add(1); + let seq_key = ( + queued_server_envelope.client_id.clone(), + queued_server_envelope.stream_id.clone(), + ); + let seq_id = *state + .next_seq_id_by_stream + .entry(seq_key.clone()) + .or_insert(1); let server_envelope = ServerEnvelope { event: queued_server_envelope.event, @@ -456,17 +464,19 @@ impl RemoteControlWebsocket { seq_id, stream_id: queued_server_envelope.stream_id, }; + let payload = match serde_json::to_string(&server_envelope) { + Ok(payload) => payload, + Err(err) => { + error!("failed to serialize remote-control server event: {err}"); + continue; + } + }; + state + .next_seq_id_by_stream + .insert(seq_key, seq_id.saturating_add(1)); state.outbound_buffer.insert(&server_envelope); - (server_envelope, queued_server_envelope.write_complete_tx) - }; - - let payload = match serde_json::to_string(&server_envelope) { - Ok(payload) => payload, - Err(err) => { - error!("failed to serialize remote-control server event: {err}"); - continue; - } + (payload, queued_server_envelope.write_complete_tx) }; tokio::select! { @@ -590,10 +600,13 @@ impl RemoteControlWebsocket { } if let ClientEvent::Ack = &client_envelope.event && let Some(acked_seq_id) = client_envelope.seq_id + && let Some(stream_id) = client_envelope.stream_id.as_ref() { - websocket_state - .outbound_buffer - .ack(&client_envelope.client_id, acked_seq_id); + websocket_state.outbound_buffer.ack( + &client_envelope.client_id, + stream_id, + acked_seq_id, + ); } drop(websocket_state); @@ -920,6 +933,13 @@ mod tests { use tokio::time::timeout; use tokio_tungstenite::accept_async; + // Windows Bazel CI can take longer than a few seconds for the websocket + // client connection attempt to reach the local test listener. + #[cfg(windows)] + const TEST_HTTP_ACCEPT_TIMEOUT: Duration = Duration::from_secs(30); + #[cfg(not(windows))] + const TEST_HTTP_ACCEPT_TIMEOUT: Duration = Duration::from_secs(5); + async fn remote_control_state_runtime(codex_home: &TempDir) -> Arc { StateRuntime::init(codex_home.path().to_path_buf(), "test-provider".to_string()) .await @@ -971,6 +991,7 @@ mod tests { account_id: Some("account_id".to_string()), }), last_refresh: Some(Utc::now()), + agent_identity: None, } } @@ -1219,7 +1240,7 @@ mod tests { let state = Arc::new(Mutex::new(WebsocketState { outbound_buffer, subscribe_cursor: None, - next_seq_id: 0, + next_seq_id_by_stream: HashMap::new(), })); let (_server_event_tx, server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY); let server_event_rx = Arc::new(Mutex::new(server_event_rx)); @@ -1247,6 +1268,83 @@ mod tests { .expect("writer should stop cleanly"); } + #[tokio::test] + async fn run_server_writer_inner_assigns_contiguous_seq_ids_per_stream() { + let (client_stream, mut server_stream) = connected_websocket_pair().await; + let (websocket_writer, _websocket_reader) = client_stream.split(); + let (outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); + let state = Arc::new(Mutex::new(WebsocketState { + outbound_buffer, + subscribe_cursor: None, + next_seq_id_by_stream: HashMap::new(), + })); + let (server_event_tx, server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY); + let server_event_rx = Arc::new(Mutex::new(server_event_rx)); + let shutdown_token = CancellationToken::new(); + let writer_task = tokio::spawn(RemoteControlWebsocket::run_server_writer_inner( + state, + server_event_rx, + used_rx, + websocket_writer, + Duration::from_secs(60), + shutdown_token.clone(), + )); + + let client_id = ClientId("client-1".to_string()); + let first_stream = StreamId("stream-1".to_string()); + let second_stream = StreamId("stream-2".to_string()); + for stream_id in [&first_stream, &second_stream, &first_stream] { + server_event_tx + .send(super::super::QueuedServerEnvelope { + event: ServerEvent::Pong { + status: crate::transport::remote_control::protocol::PongStatus::Active, + }, + client_id: client_id.clone(), + stream_id: stream_id.clone(), + write_complete_tx: None, + }) + .await + .expect("server event should queue"); + } + + assert_eq!( + read_server_text_event(&mut server_stream).await, + serde_json::json!({ + "type": "pong", + "client_id": "client-1", + "stream_id": "stream-1", + "seq_id": 1, + "status": "active", + }) + ); + assert_eq!( + read_server_text_event(&mut server_stream).await, + serde_json::json!({ + "type": "pong", + "client_id": "client-1", + "stream_id": "stream-2", + "seq_id": 1, + "status": "active", + }) + ); + assert_eq!( + read_server_text_event(&mut server_stream).await, + serde_json::json!({ + "type": "pong", + "client_id": "client-1", + "stream_id": "stream-1", + "seq_id": 2, + "status": "active", + }) + ); + + shutdown_token.cancel(); + writer_task + .await + .expect("writer task should join") + .expect("writer should stop cleanly"); + } + #[tokio::test] async fn run_websocket_reader_inner_times_out_without_pong_frames() { let (client_stream, _server_stream) = connected_websocket_pair().await; @@ -1255,7 +1353,7 @@ mod tests { let state = Arc::new(Mutex::new(WebsocketState { outbound_buffer, subscribe_cursor: None, - next_seq_id: 0, + next_seq_id_by_stream: HashMap::new(), })); let (server_event_tx, _server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY); let (transport_event_tx, _transport_event_rx) = @@ -1286,70 +1384,32 @@ mod tests { } #[test] - fn outbound_buffer_acks_by_client_id_across_stream_ids() { + fn outbound_buffer_acks_by_stream_id() { let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); let client_1 = ClientId("client-1".to_string()); let client_2 = ClientId("client-2".to_string()); + let stream_1 = StreamId("stream-1".to_string()); outbound_buffer.insert(&server_envelope( &client_1, "stream-1", - /*seq_id*/ 0, + /*seq_id*/ 1, "first-client-old-stream", )); outbound_buffer.insert(&server_envelope( &client_2, "stream-1", - /*seq_id*/ 1, + /*seq_id*/ 2, "second-client", )); outbound_buffer.insert(&server_envelope( &client_1, "stream-2", - /*seq_id*/ 2, + /*seq_id*/ 3, "first-client-new-stream", )); - outbound_buffer.ack(&client_1, /*acked_seq_id*/ 2); - - let mut retained = outbound_buffer - .server_envelopes() - .map(|server_envelope| { - ( - server_envelope.client_id.0.as_str(), - server_envelope.stream_id.0.as_str(), - server_envelope.seq_id, - ) - }) - .collect::>(); - retained.sort_unstable(); - assert_eq!(retained, vec![("client-2", "stream-1", 1)]); - assert_eq!(*used_rx.borrow(), 1); - } - - #[test] - fn outbound_buffer_retains_unacked_messages_until_ack_advances() { - let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); - let client_1 = ClientId("client-1".to_string()); - let client_2 = ClientId("client-2".to_string()); - - outbound_buffer.insert(&server_envelope( - &client_1, - "stream-1", - /*seq_id*/ 0, - "first-old", - )); - outbound_buffer.insert(&server_envelope( - &client_1, - "stream-2", - /*seq_id*/ 1, - "first-new", - )); - outbound_buffer.insert(&server_envelope( - &client_2, "stream-1", /*seq_id*/ 2, "second", - )); - - outbound_buffer.ack(&client_1, /*acked_seq_id*/ 0); + outbound_buffer.ack(&client_1, &stream_1, /*acked_seq_id*/ 3); let mut retained = outbound_buffer .server_envelopes() @@ -1364,7 +1424,50 @@ mod tests { retained.sort_unstable(); assert_eq!( retained, - vec![("client-1", "stream-2", 1), ("client-2", "stream-1", 2)] + vec![("client-1", "stream-2", 3), ("client-2", "stream-1", 2)] + ); + assert_eq!(*used_rx.borrow(), 2); + } + + #[test] + fn outbound_buffer_retains_unacked_messages_until_ack_advances() { + let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); + let client_1 = ClientId("client-1".to_string()); + let client_2 = ClientId("client-2".to_string()); + let stream_1 = StreamId("stream-1".to_string()); + + outbound_buffer.insert(&server_envelope( + &client_1, + "stream-1", + /*seq_id*/ 1, + "first-old", + )); + outbound_buffer.insert(&server_envelope( + &client_1, + "stream-2", + /*seq_id*/ 2, + "first-new", + )); + outbound_buffer.insert(&server_envelope( + &client_2, "stream-1", /*seq_id*/ 3, "second", + )); + + outbound_buffer.ack(&client_1, &stream_1, /*acked_seq_id*/ 1); + + let mut retained = outbound_buffer + .server_envelopes() + .map(|server_envelope| { + ( + server_envelope.client_id.0.as_str(), + server_envelope.stream_id.0.as_str(), + server_envelope.seq_id, + ) + }) + .collect::>(); + retained.sort_unstable(); + assert_eq!( + retained, + vec![("client-1", "stream-2", 2), ("client-2", "stream-1", 3)] ); assert_eq!(*used_rx.borrow(), 2); } @@ -1393,7 +1496,7 @@ mod tests { } async fn accept_http_request(listener: &TcpListener) -> (TcpStream, String) { - let (stream, _) = timeout(Duration::from_secs(5), listener.accept()) + let (stream, _) = timeout(TEST_HTTP_ACCEPT_TIMEOUT, listener.accept()) .await .expect("HTTP request should arrive in time") .expect("listener accept should succeed"); @@ -1449,6 +1552,20 @@ mod tests { (client_stream, server_stream) } + async fn read_server_text_event( + server_stream: &mut WebSocketStream, + ) -> serde_json::Value { + let message = timeout(Duration::from_secs(5), server_stream.next()) + .await + .expect("server event should arrive in time") + .expect("server websocket should stay open") + .expect("server event should read"); + let tungstenite::Message::Text(text) = message else { + panic!("expected text event, got {message:?}"); + }; + serde_json::from_str(text.as_ref()).expect("server event should deserialize") + } + async fn respond_with_status_and_headers( mut stream: TcpStream, status: &str, diff --git a/codex-rs/app-server/tests/common/auth_fixtures.rs b/codex-rs/app-server/tests/common/auth_fixtures.rs index 99334f0770..86f0fb456d 100644 --- a/codex-rs/app-server/tests/common/auth_fixtures.rs +++ b/codex-rs/app-server/tests/common/auth_fixtures.rs @@ -163,6 +163,7 @@ pub fn write_chatgpt_auth( openai_api_key: None, tokens: Some(tokens), last_refresh, + agent_identity: None, }; save_auth(codex_home, &auth, cli_auth_credentials_store_mode).context("write auth.json") diff --git a/codex-rs/app-server/tests/common/config.rs b/codex-rs/app-server/tests/common/config.rs index deb16c6322..1ac2572fa2 100644 --- a/codex-rs/app-server/tests/common/config.rs +++ b/codex-rs/app-server/tests/common/config.rs @@ -78,3 +78,31 @@ model_provider = "{model_provider_id}" ), ) } + +pub fn write_mock_responses_config_toml_with_chatgpt_base_url( + codex_home: &Path, + server_uri: &str, + chatgpt_base_url: &str, +) -> std::io::Result<()> { + let config_toml = codex_home.join("config.toml"); + std::fs::write( + config_toml, + format!( + r#" +model = "mock-model" +approval_policy = "never" +sandbox_mode = "read-only" +chatgpt_base_url = "{chatgpt_base_url}" + +model_provider = "mock_provider" + +[model_providers.mock_provider] +name = "Mock provider for test" +base_url = "{server_uri}/v1" +wire_api = "responses" +request_max_retries = 0 +stream_max_retries = 0 +"# + ), + ) +} diff --git a/codex-rs/app-server/tests/common/lib.rs b/codex-rs/app-server/tests/common/lib.rs index 3f89765851..6ac26d8a56 100644 --- a/codex-rs/app-server/tests/common/lib.rs +++ b/codex-rs/app-server/tests/common/lib.rs @@ -14,10 +14,13 @@ pub use auth_fixtures::encode_id_token; pub use auth_fixtures::write_chatgpt_auth; use codex_app_server_protocol::JSONRPCResponse; pub use config::write_mock_responses_config_toml; +pub use config::write_mock_responses_config_toml_with_chatgpt_base_url; +pub use core_test_support::PathBufExt; pub use core_test_support::format_with_current_shell; pub use core_test_support::format_with_current_shell_display; pub use core_test_support::format_with_current_shell_display_non_login; pub use core_test_support::format_with_current_shell_non_login; +pub use core_test_support::test_absolute_path; pub use core_test_support::test_path_buf_with_windows; pub use core_test_support::test_tmp_path; pub use core_test_support::test_tmp_path_buf; @@ -37,6 +40,7 @@ pub use responses::create_shell_command_sse_response; pub use rollout::create_fake_rollout; pub use rollout::create_fake_rollout_with_source; pub use rollout::create_fake_rollout_with_text_elements; +pub use rollout::create_fake_rollout_with_token_usage; pub use rollout::rollout_path; use serde::de::DeserializeOwned; diff --git a/codex-rs/app-server/tests/common/mcp_process.rs b/codex-rs/app-server/tests/common/mcp_process.rs index 76bb7bff43..22225c7c92 100644 --- a/codex-rs/app-server/tests/common/mcp_process.rs +++ b/codex-rs/app-server/tests/common/mcp_process.rs @@ -47,6 +47,7 @@ use codex_app_server_protocol::JSONRPCRequest; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::ListMcpServerStatusParams; use codex_app_server_protocol::LoginAccountParams; +use codex_app_server_protocol::MarketplaceAddParams; use codex_app_server_protocol::McpResourceReadParams; use codex_app_server_protocol::McpServerToolCallParams; use codex_app_server_protocol::MockExperimentalMethodParams; @@ -62,8 +63,10 @@ use codex_app_server_protocol::SkillsListParams; use codex_app_server_protocol::ThreadArchiveParams; use codex_app_server_protocol::ThreadCompactStartParams; use codex_app_server_protocol::ThreadForkParams; +use codex_app_server_protocol::ThreadInjectItemsParams; use codex_app_server_protocol::ThreadListParams; use codex_app_server_protocol::ThreadLoadedListParams; +use codex_app_server_protocol::ThreadMemoryModeSetParams; use codex_app_server_protocol::ThreadMetadataUpdateParams; use codex_app_server_protocol::ThreadReadParams; use codex_app_server_protocol::ThreadRealtimeAppendAudioParams; @@ -99,12 +102,17 @@ pub struct McpProcess { } pub const DEFAULT_CLIENT_NAME: &str = "codex-app-server-tests"; +const DISABLE_MANAGED_CONFIG_ENV_VAR: &str = "CODEX_APP_SERVER_DISABLE_MANAGED_CONFIG"; impl McpProcess { pub async fn new(codex_home: &Path) -> anyhow::Result { Self::new_with_env_and_args(codex_home, &[], &[]).await } + pub async fn new_without_managed_config(codex_home: &Path) -> anyhow::Result { + Self::new_with_env(codex_home, &[(DISABLE_MANAGED_CONFIG_ENV_VAR, Some("1"))]).await + } + pub async fn new_with_args(codex_home: &Path, args: &[&str]) -> anyhow::Result { Self::new_with_env_and_args(codex_home, &[], args).await } @@ -136,6 +144,11 @@ impl McpProcess { cmd.current_dir(codex_home); cmd.env("CODEX_HOME", codex_home); cmd.env("RUST_LOG", "info"); + // Keep integration tests isolated from host managed configuration. + cmd.env( + "CODEX_APP_SERVER_MANAGED_CONFIG_PATH", + codex_home.join("managed_config.toml"), + ); cmd.env_remove(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR); cmd.args(args); @@ -512,6 +525,15 @@ impl McpProcess { self.send_request("skills/list", params).await } + /// Send a `marketplace/add` JSON-RPC request. + pub async fn send_marketplace_add_request( + &mut self, + params: MarketplaceAddParams, + ) -> anyhow::Result { + let params = Some(serde_json::to_value(params)?); + self.send_request("marketplace/add", params).await + } + /// Send a `plugin/install` JSON-RPC request. pub async fn send_plugin_install_request( &mut self, @@ -583,6 +605,15 @@ impl McpProcess { self.send_request("mock/experimentalMethod", params).await } + /// Send a `thread/memoryMode/set` JSON-RPC request (v2, experimental). + pub async fn send_thread_memory_mode_set_request( + &mut self, + params: ThreadMemoryModeSetParams, + ) -> anyhow::Result { + let params = Some(serde_json::to_value(params)?); + self.send_request("thread/memoryMode/set", params).await + } + /// Send a `turn/start` JSON-RPC request (v2). pub async fn send_turn_start_request( &mut self, @@ -592,6 +623,15 @@ impl McpProcess { self.send_request("turn/start", params).await } + /// Send a `thread/inject_items` JSON-RPC request (v2). + pub async fn send_thread_inject_items_request( + &mut self, + params: ThreadInjectItemsParams, + ) -> anyhow::Result { + let params = Some(serde_json::to_value(params)?); + self.send_request("thread/inject_items", params).await + } + /// Send a `command/exec` JSON-RPC request (v2). pub async fn send_command_exec_request( &mut self, diff --git a/codex-rs/app-server/tests/common/rollout.rs b/codex-rs/app-server/tests/common/rollout.rs index b67390154e..06b273754c 100644 --- a/codex-rs/app-server/tests/common/rollout.rs +++ b/codex-rs/app-server/tests/common/rollout.rs @@ -1,9 +1,13 @@ use anyhow::Result; use codex_protocol::ThreadId; +use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::GitInfo; use codex_protocol::protocol::SessionMeta; use codex_protocol::protocol::SessionMetaLine; use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::TokenCountEvent; +use codex_protocol::protocol::TokenUsage; +use codex_protocol::protocol::TokenUsageInfo; use serde_json::json; use std::fs; use std::fs::FileTimes; @@ -50,6 +54,61 @@ pub fn create_fake_rollout( ) } +/// Creates a minimal rollout whose history includes a persisted token usage event. +/// +/// Resume and fork tests use this fixture to verify lifecycle replay of restored +/// usage without starting a model turn. The exact token values are intentionally +/// non-zero and asymmetric so assertions catch swapped total/last fields and +/// dropped cached or reasoning counters. +pub fn create_fake_rollout_with_token_usage( + codex_home: &Path, + filename_ts: &str, + meta_rfc3339: &str, + preview: &str, + model_provider: Option<&str>, +) -> Result { + let thread_id = create_fake_rollout( + codex_home, + filename_ts, + meta_rfc3339, + preview, + model_provider, + /*git_info*/ None, + )?; + let payload = serde_json::to_value(EventMsg::TokenCount(TokenCountEvent { + info: Some(TokenUsageInfo { + total_token_usage: TokenUsage { + input_tokens: 120, + cached_input_tokens: 20, + output_tokens: 30, + reasoning_output_tokens: 10, + total_tokens: 150, + }, + last_token_usage: TokenUsage { + input_tokens: 70, + cached_input_tokens: 10, + output_tokens: 20, + reasoning_output_tokens: 5, + total_tokens: 90, + }, + model_context_window: Some(200_000), + }), + rate_limits: None, + }))?; + let file_path = rollout_path(codex_home, filename_ts, &thread_id); + let line = json!({ + "timestamp": meta_rfc3339, + "type": "event_msg", + "payload": payload + }) + .to_string(); + fs::write( + &file_path, + format!("{}{}\n", fs::read_to_string(&file_path)?, line), + )?; + Ok(thread_id) +} + /// Create a minimal rollout file with an explicit session source. pub fn create_fake_rollout_with_source( codex_home: &Path, diff --git a/codex-rs/app-server/tests/suite/auth.rs b/codex-rs/app-server/tests/suite/auth.rs index e6134e4802..b35a6fcba1 100644 --- a/codex-rs/app-server/tests/suite/auth.rs +++ b/codex-rs/app-server/tests/suite/auth.rs @@ -24,7 +24,9 @@ use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; -const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); +// Bazel CI can spend tens of seconds starting app-server subprocesses or +// processing auth RPCs under load. +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); fn create_config_toml_custom_provider( codex_home: &Path, @@ -349,6 +351,8 @@ async fn get_auth_status_omits_token_after_proactive_refresh_failure() -> Result )?; let server = MockServer::start().await; + // App-server startup may proactively read stale auth before this test sends + // getAuthStatus; require the refresh path without depending on that race. Mock::given(method("POST")) .and(path("/oauth/token")) .respond_with(ResponseTemplate::new(401).set_body_json(serde_json::json!({ @@ -356,7 +360,7 @@ async fn get_auth_status_omits_token_after_proactive_refresh_failure() -> Result "code": "refresh_token_reused" } }))) - .expect(2) + .expect(1..=2) .mount(&server) .await; @@ -416,6 +420,8 @@ async fn get_auth_status_returns_token_after_proactive_refresh_recovery() -> Res )?; let server = MockServer::start().await; + // App-server startup may proactively read stale auth before this test sends + // getAuthStatus; require the refresh path without depending on that race. Mock::given(method("POST")) .and(path("/oauth/token")) .respond_with(ResponseTemplate::new(401).set_body_json(serde_json::json!({ @@ -423,7 +429,7 @@ async fn get_auth_status_returns_token_after_proactive_refresh_recovery() -> Res "code": "refresh_token_reused" } }))) - .expect(2) + .expect(1..=2) .mount(&server) .await; diff --git a/codex-rs/app-server/tests/suite/conversation_summary.rs b/codex-rs/app-server/tests/suite/conversation_summary.rs index 9e292d602f..4690a44ca3 100644 --- a/codex-rs/app-server/tests/suite/conversation_summary.rs +++ b/codex-rs/app-server/tests/suite/conversation_summary.rs @@ -18,6 +18,7 @@ use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); const FILENAME_TS: &str = "2025-01-02T12-00-00"; const META_RFC3339: &str = "2025-01-02T12:00:00Z"; +const UPDATED_AT_RFC3339: &str = "2025-01-02T12:00:00.000Z"; const PREVIEW: &str = "Summarize this conversation"; const MODEL_PROVIDER: &str = "openai"; @@ -27,7 +28,7 @@ fn expected_summary(conversation_id: ThreadId, path: PathBuf) -> ConversationSum path, preview: PREVIEW.to_string(), timestamp: Some(META_RFC3339.to_string()), - updated_at: Some(META_RFC3339.to_string()), + updated_at: Some(UPDATED_AT_RFC3339.to_string()), model_provider: MODEL_PROVIDER.to_string(), cwd: PathBuf::from("/"), cli_version: "0.0.0".to_string(), diff --git a/codex-rs/app-server/tests/suite/fuzzy_file_search.rs b/codex-rs/app-server/tests/suite/fuzzy_file_search.rs index 1520d99e3e..f508e0c987 100644 --- a/codex-rs/app-server/tests/suite/fuzzy_file_search.rs +++ b/codex-rs/app-server/tests/suite/fuzzy_file_search.rs @@ -11,6 +11,11 @@ use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; +// macOS arm64 and Windows Bazel CI can spend tens of seconds in app-server +// startup before the initialize response or fuzzy-search notifications arrive. +#[cfg(any(target_os = "macos", windows))] +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); +#[cfg(not(any(target_os = "macos", windows)))] const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); const SHORT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(500); const STOP_GRACE_PERIOD: std::time::Duration = std::time::Duration::from_millis(250); diff --git a/codex-rs/app-server/tests/suite/v2/analytics.rs b/codex-rs/app-server/tests/suite/v2/analytics.rs index a4d7a7f349..a3ecdbc1f4 100644 --- a/codex-rs/app-server/tests/suite/v2/analytics.rs +++ b/codex-rs/app-server/tests/suite/v2/analytics.rs @@ -80,6 +80,24 @@ async fn app_server_default_analytics_enabled_with_flag() -> Result<()> { } pub(crate) async fn enable_analytics_capture(server: &MockServer, codex_home: &Path) -> Result<()> { + let config_path = codex_home.join("config.toml"); + let config_toml = std::fs::read_to_string(&config_path)?; + if !config_toml.contains("[features]") { + std::fs::write( + &config_path, + format!("{config_toml}\n[features]\ngeneral_analytics = true\n"), + )?; + } else if !config_toml.contains("general_analytics") { + std::fs::write( + &config_path, + config_toml.replace("[features]\n", "[features]\ngeneral_analytics = true\n"), + )?; + } + + mount_analytics_capture(server, codex_home).await +} + +pub(crate) async fn mount_analytics_capture(server: &MockServer, codex_home: &Path) -> Result<()> { Mock::given(method("POST")) .and(path("/codex/analytics-events/events")) .respond_with(ResponseTemplate::new(200)) @@ -120,6 +138,41 @@ pub(crate) async fn wait_for_analytics_payload( serde_json::from_slice(&body).map_err(|err| anyhow::anyhow!("invalid analytics payload: {err}")) } +pub(crate) async fn wait_for_analytics_event( + server: &MockServer, + read_timeout: Duration, + event_type: &str, +) -> Result { + timeout(read_timeout, async { + loop { + let Some(requests) = server.received_requests().await else { + tokio::time::sleep(Duration::from_millis(25)).await; + continue; + }; + for request in &requests { + if request.method != "POST" + || request.url.path() != "/codex/analytics-events/events" + { + continue; + } + let payload: Value = serde_json::from_slice(&request.body) + .map_err(|err| anyhow::anyhow!("invalid analytics payload: {err}"))?; + let Some(events) = payload["events"].as_array() else { + continue; + }; + if let Some(event) = events + .iter() + .find(|event| event["event_type"] == event_type) + { + return Ok::(event.clone()); + } + } + tokio::time::sleep(Duration::from_millis(25)).await; + } + }) + .await? +} + pub(crate) fn thread_initialized_event(payload: &Value) -> Result<&Value> { let events = payload["events"] .as_array() diff --git a/codex-rs/app-server/tests/suite/v2/app_list.rs b/codex-rs/app-server/tests/suite/v2/app_list.rs index 57a27961af..78a915d178 100644 --- a/codex-rs/app-server/tests/suite/v2/app_list.rs +++ b/codex-rs/app-server/tests/suite/v2/app_list.rs @@ -56,7 +56,9 @@ use tokio::net::TcpListener; use tokio::task::JoinHandle; use tokio::time::timeout; -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); +// Bazel CI can spend tens of seconds starting app-server subprocesses or +// processing app-list RPCs under load. +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60); #[tokio::test] async fn list_apps_returns_empty_when_connectors_disabled() -> Result<()> { @@ -117,6 +119,7 @@ async fn list_apps_returns_empty_with_api_key_auth() -> Result<()> { openai_api_key: Some("test-api-key".to_string()), tokens: None, last_refresh: None, + agent_identity: None, }, AuthCredentialsStoreMode::File, )?; diff --git a/codex-rs/app-server/tests/suite/v2/client_metadata.rs b/codex-rs/app-server/tests/suite/v2/client_metadata.rs index c85febd7d4..8d68888e7e 100644 --- a/codex-rs/app-server/tests/suite/v2/client_metadata.rs +++ b/codex-rs/app-server/tests/suite/v2/client_metadata.rs @@ -18,7 +18,9 @@ use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; -const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); +// Bazel CI can spend tens of seconds starting app-server subprocesses or +// processing turn RPCs under load. +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); #[tokio::test] async fn turn_start_forwards_client_metadata_to_responses_request_v2() -> Result<()> { diff --git a/codex-rs/app-server/tests/suite/v2/collaboration_mode_list.rs b/codex-rs/app-server/tests/suite/v2/collaboration_mode_list.rs index 7c36827e6d..3c8a3e573e 100644 --- a/codex-rs/app-server/tests/suite/v2/collaboration_mode_list.rs +++ b/codex-rs/app-server/tests/suite/v2/collaboration_mode_list.rs @@ -21,7 +21,9 @@ use pretty_assertions::assert_eq; use tempfile::TempDir; use tokio::time::timeout; -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); +// Bazel CI can spend tens of seconds starting app-server subprocesses or +// processing list RPCs under load. +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60); /// Confirms the server returns the default collaboration mode presets in a stable order. #[tokio::test] diff --git a/codex-rs/app-server/tests/suite/v2/command_exec.rs b/codex-rs/app-server/tests/suite/v2/command_exec.rs index 50e8285665..d7645f13e3 100644 --- a/codex-rs/app-server/tests/suite/v2/command_exec.rs +++ b/codex-rs/app-server/tests/suite/v2/command_exec.rs @@ -411,11 +411,14 @@ async fn command_exec_streaming_does_not_buffer_output() -> Result<()> { }) .await?; - let delta = read_command_exec_delta(&mut mcp).await?; - assert_eq!(delta.process_id, process_id.as_str()); - assert_eq!(delta.stream, CommandExecOutputStream::Stdout); - assert_eq!(STANDARD.decode(&delta.delta_base64)?, b"abcde"); - assert!(delta.cap_reached); + let output = collect_command_exec_output_until( + CommandExecDeltaReader::Mcp(&mut mcp), + process_id.as_str(), + "capped stdout", + |_output, delta| delta.stream == CommandExecOutputStream::Stdout && delta.cap_reached, + ) + .await?; + assert_eq!(output.stdout, "abcde"); let terminate_request_id = mcp .send_command_exec_terminate_request(CommandExecTerminateParams { process_id: process_id.clone(), @@ -471,21 +474,13 @@ async fn command_exec_pipe_streams_output_and_accepts_write() -> Result<()> { }) .await?; - let first_stdout = read_command_exec_delta(&mut mcp).await?; - let first_stderr = read_command_exec_delta(&mut mcp).await?; - let seen = [first_stdout, first_stderr]; - assert!( - seen.iter() - .all(|delta| delta.process_id == process_id.as_str()) - ); - assert!(seen.iter().any(|delta| { - delta.stream == CommandExecOutputStream::Stdout - && delta.delta_base64 == STANDARD.encode("out-start\n") - })); - assert!(seen.iter().any(|delta| { - delta.stream == CommandExecOutputStream::Stderr - && delta.delta_base64 == STANDARD.encode("err-start\n") - })); + wait_for_command_exec_outputs_contains( + &mut mcp, + process_id.as_str(), + "out-start\n", + "err-start\n", + ) + .await?; let write_request_id = mcp .send_command_exec_write_request(CommandExecWriteParams { @@ -499,21 +494,13 @@ async fn command_exec_pipe_streams_output_and_accepts_write() -> Result<()> { .await?; assert_eq!(write_response.result, serde_json::json!({})); - let next_delta = read_command_exec_delta(&mut mcp).await?; - let final_delta = read_command_exec_delta(&mut mcp).await?; - let seen = [next_delta, final_delta]; - assert!( - seen.iter() - .all(|delta| delta.process_id == process_id.as_str()) - ); - assert!(seen.iter().any(|delta| { - delta.stream == CommandExecOutputStream::Stdout - && delta.delta_base64 == STANDARD.encode("out:hello\n") - })); - assert!(seen.iter().any(|delta| { - delta.stream == CommandExecOutputStream::Stderr - && delta.delta_base64 == STANDARD.encode("err:hello\n") - })); + wait_for_command_exec_outputs_contains( + &mut mcp, + process_id.as_str(), + "out:hello\n", + "err:hello\n", + ) + .await?; let response = mcp .read_stream_until_response_message(RequestId::Integer(command_request_id)) @@ -562,17 +549,13 @@ async fn command_exec_tty_implies_streaming_and_reports_pty_output() -> Result<( }) .await?; - let started_text = read_command_exec_output_until_contains( + wait_for_command_exec_output_contains( &mut mcp, process_id.as_str(), CommandExecOutputStream::Stdout, "tty\n", ) .await?; - assert!( - started_text.contains("tty\n"), - "expected TTY startup output, got {started_text:?}" - ); let write_request_id = mcp .send_command_exec_write_request(CommandExecWriteParams { @@ -586,17 +569,13 @@ async fn command_exec_tty_implies_streaming_and_reports_pty_output() -> Result<( .await?; assert_eq!(write_response.result, serde_json::json!({})); - let echoed_text = read_command_exec_output_until_contains( + wait_for_command_exec_output_contains( &mut mcp, process_id.as_str(), CommandExecOutputStream::Stdout, "echo:world\n", ) .await?; - assert!( - echoed_text.contains("echo:world\n"), - "expected TTY echo output, got {echoed_text:?}" - ); let response = mcp .read_stream_until_response_message(RequestId::Integer(command_request_id)) @@ -643,17 +622,13 @@ async fn command_exec_tty_supports_initial_size_and_resize() -> Result<()> { }) .await?; - let started_text = read_command_exec_output_until_contains( + wait_for_command_exec_output_contains( &mut mcp, process_id.as_str(), CommandExecOutputStream::Stdout, "start:31 101\n", ) .await?; - assert!( - started_text.contains("start:31 101\n"), - "unexpected initial size output: {started_text:?}" - ); let resize_request_id = mcp .send_command_exec_resize_request(CommandExecResizeParams { @@ -681,17 +656,13 @@ async fn command_exec_tty_supports_initial_size_and_resize() -> Result<()> { .await?; assert_eq!(write_response.result, serde_json::json!({})); - let resized_text = read_command_exec_output_until_contains( + wait_for_command_exec_output_contains( &mut mcp, process_id.as_str(), CommandExecOutputStream::Stdout, "after:45 132\n", ) .await?; - assert!( - resized_text.contains("after:45 132\n"), - "unexpected resized output: {resized_text:?}" - ); let response = mcp .read_stream_until_response_message(RequestId::Integer(command_request_id)) @@ -744,11 +715,13 @@ async fn command_exec_process_ids_are_connection_scoped_and_disconnect_terminate ) .await?; - let delta = read_command_exec_delta_ws(&mut ws1).await?; - assert_eq!(delta.process_id, "shared-process"); - assert_eq!(delta.stream, CommandExecOutputStream::Stdout); - let delta_text = String::from_utf8(STANDARD.decode(&delta.delta_base64)?)?; - assert!(delta_text.contains("ready")); + collect_command_exec_output_until( + CommandExecDeltaReader::Websocket(&mut ws1), + "shared-process", + "websocket ready output", + |output, _delta| output.stdout.contains("ready\n"), + ) + .await?; wait_for_process_marker(&marker, /*should_exist*/ true).await?; send_request( @@ -796,31 +769,98 @@ async fn read_command_exec_delta( decode_delta_notification(notification) } -async fn read_command_exec_output_until_contains( +async fn wait_for_command_exec_output_contains( mcp: &mut McpProcess, process_id: &str, stream: CommandExecOutputStream, expected: &str, -) -> Result { +) -> Result<()> { + let stream_name = match stream { + CommandExecOutputStream::Stdout => "stdout", + CommandExecOutputStream::Stderr => "stderr", + }; + collect_command_exec_output_until( + CommandExecDeltaReader::Mcp(mcp), + process_id, + format!("{stream_name} containing {expected:?}"), + |output, _delta| match stream { + CommandExecOutputStream::Stdout => output.stdout.contains(expected), + CommandExecOutputStream::Stderr => output.stderr.contains(expected), + }, + ) + .await?; + Ok(()) +} + +async fn wait_for_command_exec_outputs_contains( + mcp: &mut McpProcess, + process_id: &str, + stdout_expected: &str, + stderr_expected: &str, +) -> Result<()> { + collect_command_exec_output_until( + CommandExecDeltaReader::Mcp(mcp), + process_id, + format!("stdout containing {stdout_expected:?} and stderr containing {stderr_expected:?}"), + |output, _delta| { + output.stdout.contains(stdout_expected) && output.stderr.contains(stderr_expected) + }, + ) + .await?; + Ok(()) +} + +enum CommandExecDeltaReader<'a> { + Mcp(&'a mut McpProcess), + Websocket(&'a mut super::connection_handling_websocket::WsClient), +} + +#[derive(Default)] +struct CollectedCommandExecOutput { + stdout: String, + stderr: String, +} + +async fn collect_command_exec_output_until( + mut reader: CommandExecDeltaReader<'_>, + process_id: &str, + waiting_for: impl Into, + mut should_stop: impl FnMut( + &CollectedCommandExecOutput, + &CommandExecOutputDeltaNotification, + ) -> bool, +) -> Result { + let waiting_for = waiting_for.into(); let deadline = Instant::now() + DEFAULT_READ_TIMEOUT; - let mut collected = String::new(); + let mut output = CollectedCommandExecOutput::default(); loop { let remaining = deadline.saturating_duration_since(Instant::now()); - let delta = timeout(remaining, read_command_exec_delta(mcp)) - .await - .with_context(|| { - format!( - "timed out waiting for {expected:?} in command/exec output for {process_id}; collected {collected:?}" - ) - })??; + let delta = timeout(remaining, async { + match &mut reader { + CommandExecDeltaReader::Mcp(mcp) => read_command_exec_delta(mcp).await, + CommandExecDeltaReader::Websocket(stream) => { + read_command_exec_delta_ws(stream).await + } + } + }) + .await + .with_context(|| { + format!( + "timed out waiting for {waiting_for} in command/exec output for {process_id}; collected stdout={:?}, stderr={:?}", + output.stdout, output.stderr + ) + })??; assert_eq!(delta.process_id, process_id); - assert_eq!(delta.stream, stream); let delta_text = String::from_utf8(STANDARD.decode(&delta.delta_base64)?)?; - collected.push_str(&delta_text.replace('\r', "")); - if collected.contains(expected) { - return Ok(collected); + let delta_text = delta_text.replace('\r', ""); + match delta.stream { + CommandExecOutputStream::Stdout => output.stdout.push_str(&delta_text), + CommandExecOutputStream::Stderr => output.stderr.push_str(&delta_text), + } + if should_stop(&output, &delta) { + return Ok(output); } } } diff --git a/codex-rs/app-server/tests/suite/v2/compaction.rs b/codex-rs/app-server/tests/suite/v2/compaction.rs index e7661546ac..44b5dd6dc6 100644 --- a/codex-rs/app-server/tests/suite/v2/compaction.rs +++ b/codex-rs/app-server/tests/suite/v2/compaction.rs @@ -38,6 +38,11 @@ use std::collections::BTreeMap; use tempfile::TempDir; use tokio::time::timeout; +// macOS and Windows Bazel CI can spend tens of seconds starting app-server +// subprocesses or processing test RPCs under load. +#[cfg(any(target_os = "macos", windows))] +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); +#[cfg(not(any(target_os = "macos", windows)))] const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); const AUTO_COMPACT_LIMIT: i64 = 1_000; const COMPACT_PROMPT: &str = "Summarize the conversation."; diff --git a/codex-rs/app-server/tests/suite/v2/config_rpc.rs b/codex-rs/app-server/tests/suite/v2/config_rpc.rs index 5c04cc3c44..b5f795740c 100644 --- a/codex-rs/app-server/tests/suite/v2/config_rpc.rs +++ b/codex-rs/app-server/tests/suite/v2/config_rpc.rs @@ -33,7 +33,9 @@ use serde_json::json; use tempfile::TempDir; use tokio::time::timeout; -const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); +// Bazel CI can spend tens of seconds starting app-server subprocesses or +// processing config RPCs under load. +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); fn write_config(codex_home: &TempDir, contents: &str) -> Result<()> { Ok(std::fs::write( diff --git a/codex-rs/app-server/tests/suite/v2/connection_handling_websocket.rs b/codex-rs/app-server/tests/suite/v2/connection_handling_websocket.rs index 30caa13761..0cc8b51e56 100644 --- a/codex-rs/app-server/tests/suite/v2/connection_handling_websocket.rs +++ b/codex-rs/app-server/tests/suite/v2/connection_handling_websocket.rs @@ -47,6 +47,12 @@ use tokio_tungstenite::tungstenite::http::HeaderValue; use tokio_tungstenite::tungstenite::http::header::AUTHORIZATION; use tokio_tungstenite::tungstenite::http::header::ORIGIN; +// macOS and Windows CI can spend tens of seconds starting the app-server test +// binary under Bazel before it accepts JSON-RPC or reports its websocket bind +// address. +#[cfg(any(target_os = "macos", windows))] +pub(super) const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(60); +#[cfg(not(any(target_os = "macos", windows)))] pub(super) const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10); pub(super) type WsClient = WebSocketStream>; @@ -338,7 +344,8 @@ async fn websocket_transport_allows_unauthenticated_non_loopback_startup_by_defa } #[tokio::test] -async fn websocket_disconnect_unloads_last_subscribed_thread() -> Result<()> { +async fn websocket_disconnect_keeps_last_subscribed_thread_loaded_until_idle_timeout() -> Result<()> +{ let server = create_mock_responses_server_sequence_unchecked(Vec::new()).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri(), "never")?; @@ -359,7 +366,7 @@ async fn websocket_disconnect_unloads_last_subscribed_thread() -> Result<()> { send_initialize_request(&mut ws2, /*id*/ 4, "ws_reconnect_client").await?; read_response_for_id(&mut ws2, /*id*/ 4).await?; - wait_for_loaded_threads(&mut ws2, /*first_id*/ 5, &[]).await?; + wait_for_loaded_threads(&mut ws2, /*first_id*/ 5, &[thread_id.as_str()]).await?; process .kill() @@ -398,7 +405,7 @@ pub(super) async fn spawn_websocket_server_with_args( .take() .context("failed to capture websocket app-server stderr")?; let mut stderr_reader = BufReader::new(stderr).lines(); - let deadline = Instant::now() + Duration::from_secs(10); + let deadline = Instant::now() + DEFAULT_READ_TIMEOUT; let bind_addr = loop { let line = timeout( deadline.saturating_duration_since(Instant::now()), @@ -456,7 +463,7 @@ pub(super) async fn connect_websocket_with_bearer( ) -> Result { let url = format!("ws://{}", connectable_bind_addr(bind_addr)); let request = websocket_request(url.as_str(), bearer_token, /*origin*/ None)?; - let deadline = Instant::now() + Duration::from_secs(10); + let deadline = Instant::now() + DEFAULT_READ_TIMEOUT; loop { match connect_async(request.clone()).await { Ok((stream, _response)) => return Ok(stream), @@ -523,7 +530,7 @@ async fn run_websocket_server_to_completion_with_args( .stderr(Stdio::piped()) .env("CODEX_HOME", codex_home) .env("RUST_LOG", "debug"); - timeout(Duration::from_secs(10), cmd.output()) + timeout(DEFAULT_READ_TIMEOUT, cmd.output()) .await .context("timed out waiting for websocket app-server to exit")? .context("failed to run websocket app-server") @@ -535,7 +542,7 @@ async fn http_get( path: &str, ) -> Result { let connectable_bind_addr = connectable_bind_addr(bind_addr); - let deadline = Instant::now() + Duration::from_secs(10); + let deadline = Instant::now() + DEFAULT_READ_TIMEOUT; loop { match client .get(format!("http://{connectable_bind_addr}{path}")) diff --git a/codex-rs/app-server/tests/suite/v2/dynamic_tools.rs b/codex-rs/app-server/tests/suite/v2/dynamic_tools.rs index 0ab3f47235..0a3315a07a 100644 --- a/codex-rs/app-server/tests/suite/v2/dynamic_tools.rs +++ b/codex-rs/app-server/tests/suite/v2/dynamic_tools.rs @@ -34,6 +34,11 @@ use tempfile::TempDir; use tokio::time::timeout; use wiremock::MockServer; +// macOS and Windows Bazel CI can spend tens of seconds starting app-server +// subprocesses or processing test RPCs under load. +#[cfg(any(target_os = "macos", windows))] +const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(60); +#[cfg(not(any(target_os = "macos", windows)))] const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10); /// Ensures dynamic tool specs are serialized into the model request payload. diff --git a/codex-rs/app-server/tests/suite/v2/experimental_api.rs b/codex-rs/app-server/tests/suite/v2/experimental_api.rs index 4a532aebc0..2fd457faf2 100644 --- a/codex-rs/app-server/tests/suite/v2/experimental_api.rs +++ b/codex-rs/app-server/tests/suite/v2/experimental_api.rs @@ -11,10 +11,13 @@ use codex_app_server_protocol::JSONRPCMessage; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::MockExperimentalMethodParams; use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ThreadMemoryMode; +use codex_app_server_protocol::ThreadMemoryModeSetParams; use codex_app_server_protocol::ThreadRealtimeStartParams; use codex_app_server_protocol::ThreadRealtimeStartTransport; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; +use codex_protocol::protocol::RealtimeOutputModality; use pretty_assertions::assert_eq; use std::path::Path; use std::time::Duration; @@ -74,6 +77,7 @@ async fn realtime_conversation_start_requires_experimental_api_capability() -> R let request_id = mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: "thr_123".to_string(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("hello".to_string())), session_id: None, transport: None, @@ -89,6 +93,39 @@ async fn realtime_conversation_start_requires_experimental_api_capability() -> R Ok(()) } +#[tokio::test] +async fn thread_memory_mode_set_requires_experimental_api_capability() -> Result<()> { + let codex_home = TempDir::new()?; + let mut mcp = McpProcess::new(codex_home.path()).await?; + + let init = mcp + .initialize_with_capabilities( + default_client_info(), + Some(InitializeCapabilities { + experimental_api: false, + opt_out_notification_methods: None, + }), + ) + .await?; + let JSONRPCMessage::Response(_) = init else { + anyhow::bail!("expected initialize response, got {init:?}"); + }; + + let request_id = mcp + .send_thread_memory_mode_set_request(ThreadMemoryModeSetParams { + thread_id: "thr_123".to_string(), + mode: ThreadMemoryMode::Disabled, + }) + .await?; + let error = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_error_message(RequestId::Integer(request_id)), + ) + .await??; + assert_experimental_capability_error(error, "thread/memoryMode/set"); + Ok(()) +} + #[tokio::test] async fn realtime_webrtc_start_requires_experimental_api_capability() -> Result<()> { let codex_home = TempDir::new()?; @@ -110,6 +147,7 @@ async fn realtime_webrtc_start_requires_experimental_api_capability() -> Result< let request_id = mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: "thr_123".to_string(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("hello".to_string())), session_id: None, transport: Some(ThreadRealtimeStartTransport::Webrtc { diff --git a/codex-rs/app-server/tests/suite/v2/fs.rs b/codex-rs/app-server/tests/suite/v2/fs.rs index 3fd5d62c89..642844eb92 100644 --- a/codex-rs/app-server/tests/suite/v2/fs.rs +++ b/codex-rs/app-server/tests/suite/v2/fs.rs @@ -27,6 +27,11 @@ use std::os::unix::fs::symlink; #[cfg(unix)] use std::process::Command; +// macOS and Windows Bazel CI can spend tens of seconds starting app-server +// subprocesses or processing test RPCs under load. +#[cfg(any(target_os = "macos", windows))] +const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(60); +#[cfg(not(any(target_os = "macos", windows)))] const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10); async fn initialized_mcp(codex_home: &TempDir) -> Result { @@ -89,6 +94,7 @@ async fn fs_get_metadata_returns_only_used_fields() -> Result<()> { "createdAtMs".to_string(), "isDirectory".to_string(), "isFile".to_string(), + "isSymlink".to_string(), "modifiedAtMs".to_string(), ] ); @@ -99,6 +105,7 @@ async fn fs_get_metadata_returns_only_used_fields() -> Result<()> { FsGetMetadataResponse { is_directory: false, is_file: true, + is_symlink: false, created_at_ms: stat.created_at_ms, modified_at_ms: stat.modified_at_ms, } @@ -111,6 +118,35 @@ async fn fs_get_metadata_returns_only_used_fields() -> Result<()> { Ok(()) } +#[cfg(unix)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn fs_get_metadata_reports_symlink() -> Result<()> { + let codex_home = TempDir::new()?; + let file_path = codex_home.path().join("note.txt"); + let symlink_path = codex_home.path().join("note-link.txt"); + std::fs::write(&file_path, "hello")?; + symlink(&file_path, &symlink_path)?; + + let mut mcp = initialized_mcp(&codex_home).await?; + let request_id = mcp + .send_fs_get_metadata_request(codex_app_server_protocol::FsGetMetadataParams { + path: absolute_path(symlink_path), + }) + .await?; + let response = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; + + let stat: FsGetMetadataResponse = to_response(response)?; + assert_eq!(stat.is_directory, false); + assert_eq!(stat.is_file, true); + assert_eq!(stat.is_symlink, true); + + Ok(()) +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn fs_methods_cover_current_fs_utils_surface() -> Result<()> { let codex_home = TempDir::new()?; diff --git a/codex-rs/app-server/tests/suite/v2/marketplace_add.rs b/codex-rs/app-server/tests/suite/v2/marketplace_add.rs new file mode 100644 index 0000000000..cf3c57360f --- /dev/null +++ b/codex-rs/app-server/tests/suite/v2/marketplace_add.rs @@ -0,0 +1,61 @@ +use anyhow::Result; +use app_test_support::McpProcess; +use app_test_support::to_response; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::MarketplaceAddParams; +use codex_app_server_protocol::MarketplaceAddResponse; +use codex_app_server_protocol::RequestId; +use pretty_assertions::assert_eq; +use tempfile::TempDir; +use tokio::time::Duration; +use tokio::time::timeout; + +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); + +#[tokio::test] +async fn marketplace_add_local_directory_source() -> Result<()> { + let codex_home = TempDir::new()?; + let source = codex_home.path().join("marketplace"); + std::fs::create_dir_all(source.join(".agents/plugins"))?; + std::fs::create_dir_all(source.join("plugins/sample/.codex-plugin"))?; + std::fs::write( + source.join(".agents/plugins/marketplace.json"), + r#"{"name":"debug","plugins":[]}"#, + )?; + std::fs::write( + source.join("plugins/sample/.codex-plugin/plugin.json"), + r#"{"name":"sample"}"#, + )?; + std::fs::write(source.join("plugins/sample/marker.txt"), "local ref")?; + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; + + let request_id = mcp + .send_marketplace_add_request(MarketplaceAddParams { + source: "./marketplace".to_string(), + ref_name: None, + sparse_paths: None, + }) + .await?; + + let response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; + let MarketplaceAddResponse { + marketplace_name, + installed_root, + already_added, + } = to_response(response)?; + let expected_root = source.canonicalize()?; + + assert_eq!(marketplace_name, "debug"); + assert_eq!(installed_root.as_path(), expected_root.as_path()); + assert!(!already_added); + assert_eq!( + std::fs::read_to_string(installed_root.as_path().join("plugins/sample/marker.txt"))?, + "local ref" + ); + Ok(()) +} diff --git a/codex-rs/app-server/tests/suite/v2/mcp_resource.rs b/codex-rs/app-server/tests/suite/v2/mcp_resource.rs index 0a38f3b37e..b0ff8888a4 100644 --- a/codex-rs/app-server/tests/suite/v2/mcp_resource.rs +++ b/codex-rs/app-server/tests/suite/v2/mcp_resource.rs @@ -7,7 +7,11 @@ use app_test_support::McpProcess; use app_test_support::to_response; use app_test_support::write_chatgpt_auth; use axum::Router; -use codex_app_server_protocol::JSONRPCError; +use codex_app_server::in_process; +use codex_app_server::in_process::InProcessStartArgs; +use codex_app_server_protocol::ClientInfo; +use codex_app_server_protocol::ClientRequest; +use codex_app_server_protocol::InitializeParams; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::McpResourceContent; use codex_app_server_protocol::McpResourceReadParams; @@ -15,7 +19,14 @@ use codex_app_server_protocol::McpResourceReadResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; +use codex_arg0::Arg0DispatchPaths; use codex_config::types::AuthCredentialsStoreMode; +use codex_core::config::ConfigBuilder; +use codex_core::config_loader::CloudRequirementsLoader; +use codex_core::config_loader::LoaderOverrides; +use codex_exec_server::EnvironmentManager; +use codex_feedback::CodexFeedback; +use codex_protocol::protocol::SessionSource; use core_test_support::responses; use pretty_assertions::assert_eq; use rmcp::handler::server::ServerHandler; @@ -149,24 +160,57 @@ stream_max_retries = 0 #[tokio::test] async fn mcp_resource_read_returns_error_for_unknown_thread() -> Result<()> { let codex_home = TempDir::new()?; - let mut mcp = McpProcess::new(codex_home.path()).await?; - timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; - - let request_id = mcp - .send_mcp_resource_read_request(McpResourceReadParams { - thread_id: "00000000-0000-4000-8000-000000000000".to_string(), - server: "codex_apps".to_string(), - uri: TEST_RESOURCE_URI.to_string(), - }) + let loader_overrides = LoaderOverrides::without_managed_config_for_tests(); + let config = ConfigBuilder::default() + .codex_home(codex_home.path().to_path_buf()) + .fallback_cwd(Some(codex_home.path().to_path_buf())) + .loader_overrides(loader_overrides.clone()) + .build() .await?; - let error: JSONRPCError = timeout( - DEFAULT_READ_TIMEOUT, - mcp.read_stream_until_error_message(RequestId::Integer(request_id)), - ) - .await??; + // This negative-path test does not need the stdio subprocess; keeping it + // in-process avoids child-process teardown timing in nextest leak detection. + let client = in_process::start(InProcessStartArgs { + arg0_paths: Arg0DispatchPaths::default(), + config: Arc::new(config), + cli_overrides: Vec::new(), + loader_overrides, + cloud_requirements: CloudRequirementsLoader::default(), + feedback: CodexFeedback::new(), + log_db: None, + environment_manager: Arc::new(EnvironmentManager::new(/*exec_server_url*/ None)), + config_warnings: Vec::new(), + session_source: SessionSource::Cli, + enable_codex_api_key_env: false, + initialize: InitializeParams { + client_info: ClientInfo { + name: "codex-app-server-tests".to_string(), + title: None, + version: "0.1.0".to_string(), + }, + capabilities: None, + }, + channel_capacity: in_process::DEFAULT_IN_PROCESS_CHANNEL_CAPACITY, + }) + .await?; + let response = client + .request(ClientRequest::McpResourceRead { + request_id: RequestId::Integer(1), + params: McpResourceReadParams { + thread_id: "00000000-0000-4000-8000-000000000000".to_string(), + server: "codex_apps".to_string(), + uri: TEST_RESOURCE_URI.to_string(), + }, + }) + .await; + client.shutdown().await?; + + let error = match response? { + Ok(result) => anyhow::bail!("expected thread-not-found error, got response: {result:?}"), + Err(error) => error, + }; assert!( - error.error.message.contains("thread not found"), + error.message.contains("thread not found"), "expected thread-not-found error, got: {error:?}" ); diff --git a/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs b/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs index 6326677250..13ebe0b99c 100644 --- a/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs +++ b/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs @@ -32,6 +32,7 @@ use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput as V2UserInput; use codex_config::types::AuthCredentialsStoreMode; +use core_test_support::assert_regex_match; use core_test_support::responses; use pretty_assertions::assert_eq; use rmcp::handler::server::ServerHandler; @@ -65,8 +66,9 @@ use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); const CONNECTOR_ID: &str = "calendar"; const CONNECTOR_NAME: &str = "Calendar"; +const TOOL_NAMESPACE: &str = "mcp__codex_apps__calendar"; +const CALLABLE_TOOL_NAME: &str = "_confirm_action"; const TOOL_NAME: &str = "calendar_confirm_action"; -const QUALIFIED_TOOL_NAME: &str = "mcp__codex_apps__calendar_confirm_action"; const TOOL_CALL_ID: &str = "call-calendar-confirm"; const ELICITATION_MESSAGE: &str = "Allow this request?"; @@ -84,9 +86,10 @@ async fn mcp_server_elicitation_round_trip() -> Result<()> { ]), responses::sse(vec![ responses::ev_response_created("resp-1"), - responses::ev_function_call( + responses::ev_function_call_with_namespace( TOOL_CALL_ID, - QUALIFIED_TOOL_NAME, + TOOL_NAMESPACE, + CALLABLE_TOOL_NAME, &tool_call_arguments, ), responses::ev_completed("resp-1"), @@ -274,8 +277,15 @@ async fn mcp_server_elicitation_round_trip() -> Result<()> { .get("output") .and_then(Value::as_str) .expect("function_call_output output should be a JSON string"); + let payload = assert_regex_match( + r#"(?s)^Wall time: [0-9]+(?:\.[0-9]+)? seconds\nOutput:\n(.*)$"#, + output, + ) + .get(1) + .expect("wall-time wrapped output should include payload") + .as_str(); assert_eq!( - serde_json::from_str::(output)?, + serde_json::from_str::(payload)?, json!([{ "type": "text", "text": "accepted" diff --git a/codex-rs/app-server/tests/suite/v2/memory_reset.rs b/codex-rs/app-server/tests/suite/v2/memory_reset.rs new file mode 100644 index 0000000000..3c7ae38671 --- /dev/null +++ b/codex-rs/app-server/tests/suite/v2/memory_reset.rs @@ -0,0 +1,145 @@ +use anyhow::Result; +use app_test_support::McpProcess; +use app_test_support::to_response; +use chrono::Utc; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::MemoryResetResponse; +use codex_app_server_protocol::RequestId; +use codex_protocol::ThreadId; +use codex_protocol::protocol::SessionSource; +use codex_state::Stage1JobClaimOutcome; +use codex_state::StateRuntime; +use codex_state::ThreadMetadataBuilder; +use pretty_assertions::assert_eq; +use std::path::Path; +use std::sync::Arc; +use tempfile::TempDir; +use tokio::time::timeout; +use uuid::Uuid; + +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); + +#[tokio::test] +async fn memory_reset_clears_memory_files_and_rows_preserves_threads() -> Result<()> { + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path())?; + let state_db = init_state_db(codex_home.path()).await?; + + let memory_root = codex_home.path().join("memories"); + tokio::fs::create_dir_all(memory_root.join("rollout_summaries")).await?; + tokio::fs::write(memory_root.join("MEMORY.md"), "stale memory\n").await?; + tokio::fs::write( + memory_root.join("rollout_summaries").join("stale.md"), + "stale rollout summary\n", + ) + .await?; + + let thread_id = seed_stage1_output(&state_db, codex_home.path()).await?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let request_id = mcp + .send_raw_request("memory/reset", /*params*/ None) + .await?; + let response: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; + let _: MemoryResetResponse = to_response::(response)?; + + let stage1_outputs = state_db.list_stage1_outputs_for_global(/*n*/ 10).await?; + assert_eq!(stage1_outputs, Vec::new()); + assert_eq!( + state_db.get_thread_memory_mode(thread_id).await?.as_deref(), + Some("enabled") + ); + + let mut remaining_entries = tokio::fs::read_dir(&memory_root).await?; + assert!( + remaining_entries.next_entry().await?.is_none(), + "memory root should be empty after reset" + ); + + Ok(()) +} + +async fn seed_stage1_output(state_db: &Arc, codex_home: &Path) -> Result { + let now = Utc::now(); + let thread_id = ThreadId::from_string(&Uuid::new_v4().to_string())?; + let worker_id = ThreadId::from_string(&Uuid::new_v4().to_string())?; + let mut builder = ThreadMetadataBuilder::new( + thread_id, + codex_home.join("sessions").join("test.jsonl"), + now, + SessionSource::Cli, + ); + builder.updated_at = Some(now); + builder.cwd = codex_home.to_path_buf(); + let metadata = builder.build("mock_provider"); + state_db.upsert_thread(&metadata).await?; + + let claim = state_db + .try_claim_stage1_job( + thread_id, + worker_id, + now.timestamp(), + /*lease_seconds*/ 3600, + /*max_running_jobs*/ 64, + ) + .await?; + let Stage1JobClaimOutcome::Claimed { ownership_token } = claim else { + anyhow::bail!("unexpected stage1 claim outcome: {claim:?}"); + }; + assert!( + state_db + .mark_stage1_job_succeeded( + thread_id, + ownership_token.as_str(), + now.timestamp(), + "raw memory", + "rollout summary", + /*rollout_slug*/ None, + ) + .await?, + "stage1 success should be recorded" + ); + state_db + .enqueue_global_consolidation(now.timestamp()) + .await?; + + Ok(thread_id) +} + +async fn init_state_db(codex_home: &Path) -> Result> { + let state_db = StateRuntime::init(codex_home.to_path_buf(), "mock_provider".into()).await?; + state_db + .mark_backfill_complete(/*last_watermark*/ None) + .await?; + Ok(state_db) +} + +fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { + let config_toml = codex_home.join("config.toml"); + std::fs::write( + config_toml, + r#" +model = "mock-model" +approval_policy = "never" +sandbox_mode = "read-only" +model_provider = "mock_provider" +suppress_unstable_features_warning = true + +[features] +sqlite = true + +[model_providers.mock_provider] +name = "Mock provider for test" +base_url = "http://127.0.0.1:9/v1" +wire_api = "responses" +request_max_retries = 0 +stream_max_retries = 0 +"#, + ) +} diff --git a/codex-rs/app-server/tests/suite/v2/mod.rs b/codex-rs/app-server/tests/suite/v2/mod.rs index db82c1368f..bbf04006e6 100644 --- a/codex-rs/app-server/tests/suite/v2/mod.rs +++ b/codex-rs/app-server/tests/suite/v2/mod.rs @@ -15,10 +15,12 @@ mod experimental_api; mod experimental_feature_list; mod fs; mod initialize; +mod marketplace_add; mod mcp_resource; mod mcp_server_elicitation; mod mcp_server_status; mod mcp_tool; +mod memory_reset; mod model_list; mod output_schema; mod plan_item; @@ -35,8 +37,10 @@ mod safety_check_downgrade; mod skills_list; mod thread_archive; mod thread_fork; +mod thread_inject_items; mod thread_list; mod thread_loaded_list; +mod thread_memory_mode_set; mod thread_metadata_update; mod thread_name_websocket; mod thread_read; diff --git a/codex-rs/app-server/tests/suite/v2/plugin_install.rs b/codex-rs/app-server/tests/suite/v2/plugin_install.rs index a3bea53172..e51fac725f 100644 --- a/codex-rs/app-server/tests/suite/v2/plugin_install.rs +++ b/codex-rs/app-server/tests/suite/v2/plugin_install.rs @@ -51,7 +51,9 @@ use wiremock::matchers::header; use wiremock::matchers::method; use wiremock::matchers::path; -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); +// Plugin install tests wait on connector discovery after the install response path +// starts, which is noticeably slower on Windows CI. +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60); #[tokio::test] async fn plugin_install_rejects_relative_marketplace_paths() -> Result<()> { diff --git a/codex-rs/app-server/tests/suite/v2/plugin_list.rs b/codex-rs/app-server/tests/suite/v2/plugin_list.rs index 8bc4a8598e..5036101de4 100644 --- a/codex-rs/app-server/tests/suite/v2/plugin_list.rs +++ b/codex-rs/app-server/tests/suite/v2/plugin_list.rs @@ -30,7 +30,8 @@ use wiremock::matchers::method; use wiremock::matchers::path; use wiremock::matchers::query_param; -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); +// These tests start full app-server processes and can also run plugin startup warmers. +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60); const TEST_CURATED_PLUGIN_SHA: &str = "0123456789abcdef0123456789abcdef01234567"; const STARTUP_REMOTE_PLUGIN_SYNC_MARKER_FILE: &str = ".tmp/app-server-remote-plugin-sync-v1"; diff --git a/codex-rs/app-server/tests/suite/v2/realtime_conversation.rs b/codex-rs/app-server/tests/suite/v2/realtime_conversation.rs index f4c0f99ae3..c61b628a6d 100644 --- a/codex-rs/app-server/tests/suite/v2/realtime_conversation.rs +++ b/codex-rs/app-server/tests/suite/v2/realtime_conversation.rs @@ -31,7 +31,8 @@ use codex_app_server_protocol::ThreadRealtimeStartTransport; use codex_app_server_protocol::ThreadRealtimeStartedNotification; use codex_app_server_protocol::ThreadRealtimeStopParams; use codex_app_server_protocol::ThreadRealtimeStopResponse; -use codex_app_server_protocol::ThreadRealtimeTranscriptUpdatedNotification; +use codex_app_server_protocol::ThreadRealtimeTranscriptDeltaNotification; +use codex_app_server_protocol::ThreadRealtimeTranscriptDoneNotification; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::TurnCompletedNotification; @@ -39,6 +40,7 @@ use codex_app_server_protocol::TurnStartedNotification; use codex_features::FEATURES; use codex_features::Feature; use codex_protocol::protocol::RealtimeConversationVersion; +use codex_protocol::protocol::RealtimeOutputModality; use codex_protocol::protocol::RealtimeVoice; use codex_protocol::protocol::RealtimeVoicesList; use core_test_support::responses; @@ -70,9 +72,12 @@ use wiremock::matchers::path; use wiremock::matchers::path_regex; const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); +const DELEGATED_SHELL_TOOL_TIMEOUT_MS: u64 = 30_000; const STARTUP_CONTEXT_HEADER: &str = "Startup context from Codex."; const V2_STEERING_ACKNOWLEDGEMENT: &str = "This was sent to steer the previous background agent task."; +const V2_HANDOFF_COMPLETE_ACKNOWLEDGEMENT: &str = + "Background agent finished. Use the preceding [BACKEND] messages as the result."; #[derive(Debug, Clone, Copy)] enum StartupContextConfig<'a> { @@ -112,6 +117,11 @@ impl Match for RealtimeCallRequestCapture { } } +fn normalized_json_string(raw: &str) -> Result { + let value: Value = serde_json::from_str(raw).context("expected JSON fixture to parse")?; + serde_json::to_string(&value).context("expected JSON fixture to serialize") +} + struct GatedSseResponse { gate_rx: Mutex>>, response: String, @@ -301,6 +311,7 @@ impl RealtimeE2eHarness { .mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: self.thread_id.clone(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("backend prompt".to_string())), session_id: None, transport: Some(ThreadRealtimeStartTransport::Webrtc { @@ -478,6 +489,15 @@ async fn realtime_conversation_streams_v2_notifications() -> Result<()> { "type": "response.output_text.delta", "delta": "working" }), + json!({ + "type": "conversation.item.done", + "item": { + "id": "item_assistant_1", + "type": "message", + "role": "assistant", + "content": [{ "type": "output_text", "text": "working on it" }] + } + }), json!({ "type": "conversation.item.done", "item": { @@ -523,6 +543,7 @@ async fn realtime_conversation_streams_v2_notifications() -> Result<()> { let start_request_id = mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: thread_start.thread.id.clone(), + output_modality: RealtimeOutputModality::Audio, prompt: None, session_id: None, transport: None, @@ -554,6 +575,10 @@ async fn realtime_conversation_streams_v2_notifications() -> Result<()> { startup_context_request.body_json()["session"]["audio"]["output"]["voice"], "cedar" ); + assert_eq!( + startup_context_request.body_json()["session"]["output_modalities"], + json!(["audio"]) + ); let startup_context_instructions = startup_context_request.body_json()["session"]["instructions"] .as_str() @@ -612,24 +637,32 @@ async fn realtime_conversation_streams_v2_notifications() -> Result<()> { assert_eq!(item_added.thread_id, output_audio.thread_id); assert_eq!(item_added.item["type"], json!("message")); - let first_transcript_update = read_notification::( + let first_transcript_delta = read_notification::( &mut mcp, - "thread/realtime/transcriptUpdated", + "thread/realtime/transcript/delta", ) .await?; - assert_eq!(first_transcript_update.thread_id, output_audio.thread_id); - assert_eq!(first_transcript_update.role, "user"); - assert_eq!(first_transcript_update.text, "delegate now"); + assert_eq!(first_transcript_delta.thread_id, output_audio.thread_id); + assert_eq!(first_transcript_delta.role, "user"); + assert_eq!(first_transcript_delta.delta, "delegate now"); - let second_transcript_update = - read_notification::( - &mut mcp, - "thread/realtime/transcriptUpdated", - ) - .await?; - assert_eq!(second_transcript_update.thread_id, output_audio.thread_id); - assert_eq!(second_transcript_update.role, "assistant"); - assert_eq!(second_transcript_update.text, "working"); + let second_transcript_delta = read_notification::( + &mut mcp, + "thread/realtime/transcript/delta", + ) + .await?; + assert_eq!(second_transcript_delta.thread_id, output_audio.thread_id); + assert_eq!(second_transcript_delta.role, "assistant"); + assert_eq!(second_transcript_delta.delta, "working"); + + let final_transcript_done = read_notification::( + &mut mcp, + "thread/realtime/transcript/done", + ) + .await?; + assert_eq!(final_transcript_done.thread_id, output_audio.thread_id); + assert_eq!(final_transcript_done.role, "assistant"); + assert_eq!(final_transcript_done.text, "working on it"); let handoff_item_added = read_notification::( &mut mcp, @@ -693,6 +726,140 @@ async fn realtime_conversation_streams_v2_notifications() -> Result<()> { Ok(()) } +#[tokio::test] +async fn realtime_text_output_modality_requests_text_output_and_final_transcript() -> Result<()> { + skip_if_no_network!(Ok(())); + + let responses_server = create_mock_responses_server_sequence_unchecked(Vec::new()).await; + let realtime_server = start_websocket_server(vec![vec![vec![ + json!({ + "type": "session.updated", + "session": { "id": "sess_text", "instructions": "backend prompt" } + }), + json!({ + "type": "response.output_text.delta", + "delta": "hello " + }), + json!({ + "type": "response.output_text.delta", + "delta": "world" + }), + json!({ + "type": "response.output_audio_transcript.done", + "transcript": "hello world" + }), + json!({ + "type": "conversation.item.done", + "item": { + "id": "item_output_1", + "type": "message", + "role": "assistant", + "content": [{"type": "output_text", "text": "hello world"}] + } + }), + ]]]) + .await; + + let codex_home = TempDir::new()?; + create_config_toml( + codex_home.path(), + &responses_server.uri(), + realtime_server.uri(), + /*realtime_enabled*/ true, + StartupContextConfig::Generated, + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + mcp.initialize().await?; + login_with_api_key(&mut mcp, "sk-test-key").await?; + + let thread_start_request_id = mcp + .send_thread_start_request(ThreadStartParams::default()) + .await?; + let thread_start_response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_start_request_id)), + ) + .await??; + let thread_start: ThreadStartResponse = to_response(thread_start_response)?; + + let start_request_id = mcp + .send_thread_realtime_start_request(ThreadRealtimeStartParams { + thread_id: thread_start.thread.id.clone(), + output_modality: RealtimeOutputModality::Text, + prompt: None, + session_id: None, + transport: None, + voice: None, + }) + .await?; + let start_response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(start_request_id)), + ) + .await??; + let _: ThreadRealtimeStartResponse = to_response(start_response)?; + + let session_update = realtime_server + .wait_for_request(/*connection_index*/ 0, /*request_index*/ 0) + .await; + assert_eq!( + session_update.body_json()["session"]["output_modalities"], + json!(["text"]) + ); + + let first_delta = read_notification::( + &mut mcp, + "thread/realtime/transcript/delta", + ) + .await?; + let second_delta = read_notification::( + &mut mcp, + "thread/realtime/transcript/delta", + ) + .await?; + let done = read_notification::( + &mut mcp, + "thread/realtime/transcript/done", + ) + .await?; + assert_eq!( + vec![first_delta, second_delta], + vec![ + ThreadRealtimeTranscriptDeltaNotification { + thread_id: thread_start.thread.id.clone(), + role: "assistant".to_string(), + delta: "hello ".to_string(), + }, + ThreadRealtimeTranscriptDeltaNotification { + thread_id: thread_start.thread.id.clone(), + role: "assistant".to_string(), + delta: "world".to_string(), + }, + ] + ); + assert_eq!( + done, + ThreadRealtimeTranscriptDoneNotification { + thread_id: thread_start.thread.id, + role: "assistant".to_string(), + text: "hello world".to_string(), + } + ); + assert!( + timeout( + Duration::from_millis(200), + mcp.read_stream_until_notification_message("thread/realtime/transcript/done"), + ) + .await + .is_err(), + "should not emit duplicate transcript done from audio transcript done" + ); + + realtime_server.shutdown().await; + Ok(()) +} + #[tokio::test] async fn realtime_list_voices_returns_supported_names() -> Result<()> { let codex_home = TempDir::new()?; @@ -793,6 +960,7 @@ async fn realtime_conversation_stop_emits_closed_notification() -> Result<()> { let start_request_id = mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: thread_start.thread.id.clone(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("backend prompt".to_string())), session_id: None, transport: None, @@ -889,6 +1057,7 @@ async fn realtime_webrtc_start_emits_sdp_notification() -> Result<()> { let start_request_id = mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: thread_id.clone(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("backend prompt".to_string())), session_id: None, transport: Some(ThreadRealtimeStartTransport::Webrtc { @@ -973,7 +1142,8 @@ async fn realtime_webrtc_start_emits_sdp_notification() -> Result<()> { Some("multipart/form-data; boundary=codex-realtime-call-boundary") ); let body = String::from_utf8(request.body).context("multipart body should be utf-8")?; - let session = r#"{"tool_choice":"auto","type":"realtime","model":"gpt-realtime-1.5","instructions":"backend prompt\n\nstartup context","output_modalities":["audio"],"audio":{"input":{"format":{"type":"audio/pcm","rate":24000},"noise_reduction":{"type":"near_field"},"turn_detection":{"type":"server_vad","interrupt_response":true,"create_response":true}},"output":{"format":{"type":"audio/pcm","rate":24000},"voice":"marin"}},"tools":[{"type":"function","name":"background_agent","description":"Send a user request to the background agent. Use this as the default action. If the background agent is idle, this starts a new task and returns the final result to the user. If the background agent is already working on a task, this sends the request as guidance to steer that previous task. If the user asks to do something next, later, after this, or once current work finishes, call this tool so the work is actually queued instead of merely promising to do it later.","parameters":{"type":"object","properties":{"prompt":{"type":"string","description":"The user request to delegate to the background agent."}},"required":["prompt"],"additionalProperties":false}}]}"#; + let session = r#"{"tool_choice":"auto","type":"realtime","model":"gpt-realtime-1.5","instructions":"backend prompt\n\nstartup context","output_modalities":["audio"],"audio":{"input":{"format":{"type":"audio/pcm","rate":24000},"noise_reduction":{"type":"near_field"},"turn_detection":{"type":"server_vad","interrupt_response":true,"create_response":true,"silence_duration_ms":500}},"output":{"format":{"type":"audio/pcm","rate":24000},"voice":"marin"}},"tools":[{"type":"function","name":"background_agent","description":"Send a user request to the background agent. Use this as the default action. Do not rephrase the user's ask or rewrite it in your own words; pass along the user's own words. If the background agent is idle, this starts a new task and returns the final result to the user. If the background agent is already working on a task, this sends the request as guidance to steer that previous task. If the user asks to do something next, later, after this, or once current work finishes, call this tool so the work is actually queued instead of merely promising to do it later.","parameters":{"type":"object","properties":{"prompt":{"type":"string","description":"The user request to delegate to the background agent."}},"required":["prompt"],"additionalProperties":false}}]}"#; + let session = normalized_json_string(session)?; assert_eq!( body, format!( @@ -1163,11 +1333,11 @@ async fn webrtc_v2_forwards_audio_and_text_between_client_and_sideband() -> Resu harness.append_text(thread_id, "hello").await?; let transcript = harness - .read_notification::( - "thread/realtime/transcriptUpdated", + .read_notification::( + "thread/realtime/transcript/delta", ) .await?; - assert_eq!(transcript.text, "transcribed audio"); + assert_eq!(transcript.delta, "transcribed audio"); let output_audio = harness .read_notification::( "thread/realtime/outputAudio/delta", @@ -1192,7 +1362,8 @@ async fn webrtc_v2_forwards_audio_and_text_between_client_and_sideband() -> Resu request["type"] == "conversation.item.create" && request["item"]["type"] == "message" && request["item"]["role"] == "user" - && request["item"]["content"][0]["text"] == "hello" + && request["item"]["content"][0]["type"] == "input_text" + && request["item"]["content"][0]["text"] == "[USER] hello" }), "sideband requests should include user text item: {requests:?}" ); @@ -1252,11 +1423,11 @@ async fn webrtc_v2_text_input_is_append_only_while_response_is_active() -> Resul "first", ); let transcript = harness - .read_notification::( - "thread/realtime/transcriptUpdated", + .read_notification::( + "thread/realtime/transcript/delta", ) .await?; - assert_eq!(transcript.text, "active response started"); + assert_eq!(transcript.delta, "active response started"); // Phase 3: send a second text turn while `resp_active` is still open. The // user message must reach realtime without requesting another response. @@ -1391,7 +1562,7 @@ async fn webrtc_v2_background_agent_tool_call_delegates_and_returns_function_out assert_v2_progress_update(&progress, "delegated from v2"); let tool_output = harness.sideband_outbound_request(/*request_index*/ 2).await; - assert_v2_function_call_output(&tool_output, "call_v2", "delegated from v2"); + assert_v2_function_call_output(&tool_output, "call_v2", V2_HANDOFF_COMPLETE_ACKNOWLEDGEMENT); assert_eq!( function_call_output_sideband_requests(&harness.realtime_server).len(), 1 @@ -1526,7 +1697,11 @@ async fn webrtc_v2_background_agent_progress_is_sent_before_function_output() -> assert_v2_progress_update(&progress, "progress before final"); let tool_output = harness.sideband_outbound_request(/*request_index*/ 2).await; - assert_v2_function_call_output(&tool_output, "call_progress_order", "progress before final"); + assert_v2_function_call_output( + &tool_output, + "call_progress_order", + V2_HANDOFF_COMPLETE_ACKNOWLEDGEMENT, + ); harness.shutdown().await; Ok(()) @@ -1543,7 +1718,9 @@ async fn webrtc_v2_tool_call_delegated_turn_can_execute_shell_tool() -> Result<( create_shell_command_sse_response( realtime_tool_ok_command(), /*workdir*/ None, - Some(5000), + // Windows CI can spend several seconds starting the nested PowerShell command. This + // test verifies delegated shell-tool plumbing, not timeout enforcement. + Some(DELEGATED_SHELL_TOOL_TIMEOUT_MS), "shell_call", )?, create_final_assistant_message_sse_response("shell tool finished")?, @@ -1610,7 +1787,11 @@ async fn webrtc_v2_tool_call_delegated_turn_can_execute_shell_tool() -> Result<( assert_v2_progress_update(&progress, "shell tool finished"); let tool_output = harness.sideband_outbound_request(/*request_index*/ 2).await; - assert_v2_function_call_output(&tool_output, "call_shell", "shell tool finished"); + assert_v2_function_call_output( + &tool_output, + "call_shell", + V2_HANDOFF_COMPLETE_ACKNOWLEDGEMENT, + ); assert_eq!( function_call_output_sideband_requests(&harness.realtime_server).len(), 1 @@ -1690,7 +1871,11 @@ async fn webrtc_v2_tool_call_does_not_block_sideband_audio() -> Result<()> { assert_v2_progress_update(&progress, "late delegated result"); let tool_output = harness.sideband_outbound_request(/*request_index*/ 2).await; - assert_v2_function_call_output(&tool_output, "call_audio", "late delegated result"); + assert_v2_function_call_output( + &tool_output, + "call_audio", + V2_HANDOFF_COMPLETE_ACKNOWLEDGEMENT, + ); harness.shutdown().await; Ok(()) @@ -1736,6 +1921,7 @@ async fn realtime_webrtc_start_surfaces_backend_error() -> Result<()> { let start_request_id = mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: thread_start.thread.id, + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("backend prompt".to_string())), session_id: None, transport: Some(ThreadRealtimeStartTransport::Webrtc { @@ -1794,6 +1980,7 @@ async fn realtime_conversation_requires_feature_flag() -> Result<()> { let start_request_id = mcp .send_thread_realtime_start_request(ThreadRealtimeStartParams { thread_id: thread_start.thread.id.clone(), + output_modality: RealtimeOutputModality::Audio, prompt: Some(Some("backend prompt".to_string())), session_id: None, transport: None, @@ -1921,7 +2108,7 @@ fn assert_v2_function_call_output(request: &Value, call_id: &str, expected_outpu "item": { "type": "function_call_output", "call_id": call_id, - "output": format!("\"Agent Final Message\":\n\n{expected_output}"), + "output": expected_output, } }) ); @@ -1937,7 +2124,7 @@ fn assert_v2_progress_update(request: &Value, expected_text: &str) { "role": "user", "content": [{ "type": "input_text", - "text": format!("{expected_text}\n\nUpdate from background agent (task hasn't finished yet):") + "text": format!("[BACKEND] {expected_text}") }] } }) @@ -1954,7 +2141,7 @@ fn assert_v2_user_text_item(request: &Value, expected_text: &str) { "role": "user", "content": [{ "type": "input_text", - "text": expected_text + "text": format!("[USER] {expected_text}") }] } }) @@ -2018,6 +2205,7 @@ fn assert_call_create_multipart( Some("multipart/form-data; boundary=codex-realtime-call-boundary") ); let body = String::from_utf8(request.body).context("multipart body should be utf-8")?; + let session = normalized_json_string(session)?; assert_eq!( body, format!( diff --git a/codex-rs/app-server/tests/suite/v2/skills_list.rs b/codex-rs/app-server/tests/suite/v2/skills_list.rs index 0a2bbe0df8..8675b3a429 100644 --- a/codex-rs/app-server/tests/suite/v2/skills_list.rs +++ b/codex-rs/app-server/tests/suite/v2/skills_list.rs @@ -11,6 +11,7 @@ use codex_app_server_protocol::SkillsListExtraRootsForCwd; use codex_app_server_protocol::SkillsListParams; use codex_app_server_protocol::SkillsListResponse; use codex_app_server_protocol::ThreadStartParams; +use codex_exec_server::CODEX_EXEC_SERVER_URL_ENV_VAR; use pretty_assertions::assert_eq; use tempfile::TempDir; use tokio::time::timeout; @@ -54,7 +55,7 @@ async fn skills_list_includes_skills_from_per_cwd_extra_user_roots() -> Result<( .await??; let SkillsListResponse { data } = to_response(response)?; assert_eq!(data.len(), 1); - assert_eq!(data[0].cwd, cwd.path().to_path_buf()); + assert_eq!(data[0].cwd.as_path(), cwd.path()); assert!( data[0] .skills @@ -64,6 +65,56 @@ async fn skills_list_includes_skills_from_per_cwd_extra_user_roots() -> Result<( Ok(()) } +#[tokio::test] +async fn skills_list_skips_cwd_roots_when_environment_disabled() -> Result<()> { + let codex_home = TempDir::new()?; + let cwd = TempDir::new()?; + let extra_root = TempDir::new()?; + write_skill(&codex_home, "home-skill")?; + write_skill(&extra_root, "extra-skill")?; + + let mut mcp = McpProcess::new_with_env( + codex_home.path(), + &[(CODEX_EXEC_SERVER_URL_ENV_VAR, Some("none"))], + ) + .await?; + timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; + + let request_id = mcp + .send_skills_list_request(SkillsListParams { + cwds: vec![cwd.path().to_path_buf()], + force_reload: true, + per_cwd_extra_user_roots: Some(vec![SkillsListExtraRootsForCwd { + cwd: cwd.path().to_path_buf(), + extra_user_roots: vec![extra_root.path().to_path_buf()], + }]), + }) + .await?; + + let response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; + let SkillsListResponse { data } = to_response(response)?; + assert_eq!(data.len(), 1); + assert_eq!(data[0].cwd, cwd.path().to_path_buf()); + assert_eq!(data[0].errors, Vec::new()); + assert!( + data[0] + .skills + .iter() + .any(|skill| skill.name == "home-skill") + ); + assert!( + data[0] + .skills + .iter() + .all(|skill| skill.name != "extra-skill") + ); + Ok(()) +} + #[tokio::test] async fn skills_list_rejects_relative_extra_user_roots() -> Result<()> { let codex_home = TempDir::new()?; @@ -98,6 +149,35 @@ async fn skills_list_rejects_relative_extra_user_roots() -> Result<()> { Ok(()) } +#[tokio::test] +async fn skills_list_accepts_relative_cwds() -> Result<()> { + let codex_home = TempDir::new()?; + let relative_cwd = std::path::PathBuf::from("relative-cwd"); + std::fs::create_dir_all(codex_home.path().join(&relative_cwd))?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; + + let request_id = mcp + .send_skills_list_request(SkillsListParams { + cwds: vec![relative_cwd.clone()], + force_reload: true, + per_cwd_extra_user_roots: None, + }) + .await?; + + let response: JSONRPCResponse = timeout( + DEFAULT_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; + let SkillsListResponse { data } = to_response(response)?; + assert_eq!(data.len(), 1); + assert_eq!(data[0].cwd, relative_cwd); + assert_eq!(data[0].errors, Vec::new()); + Ok(()) +} + #[tokio::test] async fn skills_list_ignores_per_cwd_extra_roots_for_unknown_cwd() -> Result<()> { let codex_home = TempDir::new()?; @@ -127,7 +207,7 @@ async fn skills_list_ignores_per_cwd_extra_roots_for_unknown_cwd() -> Result<()> .await??; let SkillsListResponse { data } = to_response(response)?; assert_eq!(data.len(), 1); - assert_eq!(data[0].cwd, requested_cwd.path().to_path_buf()); + assert_eq!(data[0].cwd.as_path(), requested_cwd.path()); assert!( data[0] .skills diff --git a/codex-rs/app-server/tests/suite/v2/thread_fork.rs b/codex-rs/app-server/tests/suite/v2/thread_fork.rs index 9907fc4b1d..f5bc5a73c9 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_fork.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_fork.rs @@ -2,6 +2,7 @@ use anyhow::Result; use app_test_support::ChatGptAuthFixture; use app_test_support::McpProcess; use app_test_support::create_fake_rollout; +use app_test_support::create_fake_rollout_with_token_usage; use app_test_support::create_mock_responses_server_repeating_assistant; use app_test_support::to_response; use app_test_support::write_chatgpt_auth; @@ -9,6 +10,7 @@ use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCMessage; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ServerNotification; use codex_app_server_protocol::SessionSource; use codex_app_server_protocol::ThreadForkParams; use codex_app_server_protocol::ThreadForkResponse; @@ -43,6 +45,9 @@ use super::analytics::enable_analytics_capture; use super::analytics::thread_initialized_event; use super::analytics::wait_for_analytics_payload; +#[cfg(windows)] +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(25); +#[cfg(not(windows))] const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] @@ -117,9 +122,9 @@ async fn thread_fork_creates_new_thread_and_emits_started() -> Result<()> { assert_eq!(thread.model_provider, "mock_provider"); assert_eq!(thread.status, ThreadStatus::Idle); let thread_path = thread.path.clone().expect("thread path"); - assert!(thread_path.is_absolute()); - assert_ne!(thread_path, original_path); - assert!(thread.cwd.is_absolute()); + assert!(thread_path.as_path().is_absolute()); + assert_ne!(thread_path.as_path(), original_path); + assert!(thread.cwd.as_path().is_absolute()); assert_eq!(thread.source, SessionSource::VsCode); assert_eq!(thread.name, None); @@ -183,6 +188,59 @@ async fn thread_fork_creates_new_thread_and_emits_started() -> Result<()> { Ok(()) } +#[tokio::test] +async fn thread_fork_emits_restored_token_usage_before_next_turn() -> Result<()> { + let server = create_mock_responses_server_repeating_assistant("Done").await; + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + + let conversation_id = create_fake_rollout_with_token_usage( + codex_home.path(), + "2025-01-05T12-00-00", + "2025-01-05T12:00:00Z", + "Saved user message", + Some("mock_provider"), + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let fork_id = mcp + .send_thread_fork_request(ThreadForkParams { + thread_id: conversation_id, + ..Default::default() + }) + .await?; + let fork_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(fork_id)), + ) + .await??; + let ThreadForkResponse { thread, .. } = to_response::(fork_resp)?; + + let note = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("thread/tokenUsage/updated"), + ) + .await??; + let parsed: ServerNotification = note.try_into()?; + let ServerNotification::ThreadTokenUsageUpdated(notification) = parsed else { + panic!("expected thread/tokenUsage/updated notification"); + }; + + assert_eq!(notification.thread_id, thread.id); + assert_eq!(notification.turn_id, thread.turns[0].id); + assert_eq!(notification.token_usage.total.total_tokens, 150); + assert_eq!(notification.token_usage.total.input_tokens, 120); + assert_eq!(notification.token_usage.total.cached_input_tokens, 20); + assert_eq!(notification.token_usage.total.output_tokens, 30); + assert_eq!(notification.token_usage.total.reasoning_output_tokens, 10); + assert_eq!(notification.token_usage.last.total_tokens, 90); + assert_eq!(notification.token_usage.model_context_window, Some(200_000)); + + Ok(()) +} + #[tokio::test] async fn thread_fork_tracks_thread_initialized_analytics() -> Result<()> { let server = create_mock_responses_server_repeating_assistant("Done").await; @@ -205,7 +263,7 @@ async fn thread_fork_tracks_thread_initialized_analytics() -> Result<()> { /*git_info*/ None, )?; - let mut mcp = McpProcess::new(codex_home.path()).await?; + let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let fork_id = mcp @@ -565,7 +623,7 @@ fn create_config_toml_with_chatgpt_base_url( let general_analytics_toml = if general_analytics_enabled { "\ngeneral_analytics = true".to_string() } else { - String::new() + "\ngeneral_analytics = false".to_string() }; let config_toml = codex_home.join("config.toml"); std::fs::write( diff --git a/codex-rs/app-server/tests/suite/v2/thread_inject_items.rs b/codex-rs/app-server/tests/suite/v2/thread_inject_items.rs new file mode 100644 index 0000000000..56fd188c4b --- /dev/null +++ b/codex-rs/app-server/tests/suite/v2/thread_inject_items.rs @@ -0,0 +1,288 @@ +use anyhow::Context; +use anyhow::Result; +use app_test_support::McpProcess; +use app_test_support::to_response; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ThreadInjectItemsParams; +use codex_app_server_protocol::ThreadInjectItemsResponse; +use codex_app_server_protocol::ThreadStartParams; +use codex_app_server_protocol::ThreadStartResponse; +use codex_app_server_protocol::TurnStartParams; +use codex_app_server_protocol::UserInput as V2UserInput; +use codex_core::RolloutRecorder; +use codex_protocol::models::ContentItem; +use codex_protocol::models::ResponseItem; +use codex_protocol::protocol::InitialHistory; +use codex_protocol::protocol::RolloutItem; +use core_test_support::responses; +use serde_json::Value; +use std::path::Path; +use tempfile::TempDir; +use tokio::time::timeout; + +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); + +#[tokio::test] +async fn thread_inject_items_adds_raw_response_items_to_thread_history() -> Result<()> { + let server = responses::start_mock_server().await; + let body = responses::sse(vec![ + responses::ev_response_created("resp-1"), + responses::ev_assistant_message("msg-1", "Done"), + responses::ev_completed("resp-1"), + ]); + let response_mock = responses::mount_sse_once(&server, body).await; + + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let thread_req = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + ..Default::default() + }) + .await?; + let thread_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(thread_resp)?; + + let injected_text = "Injected assistant context"; + let injected_item = ResponseItem::Message { + id: None, + role: "assistant".to_string(), + content: vec![ContentItem::OutputText { + text: injected_text.to_string(), + }], + end_turn: None, + phase: None, + }; + + let inject_req = mcp + .send_thread_inject_items_request(ThreadInjectItemsParams { + thread_id: thread.id.clone(), + items: vec![serde_json::to_value(&injected_item)?], + }) + .await?; + let inject_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(inject_req)), + ) + .await??; + let _response: ThreadInjectItemsResponse = + to_response::(inject_resp)?; + + let rollout_path = thread.path.as_ref().context("thread path missing")?; + let history = RolloutRecorder::get_rollout_history(rollout_path).await?; + let InitialHistory::Resumed(resumed_history) = history else { + panic!("expected resumed rollout history"); + }; + assert!( + resumed_history + .history + .iter() + .any(|item| matches!(item, RolloutItem::ResponseItem(response_item) if response_item == &injected_item)), + "injected item should be persisted in rollout history" + ); + + let turn_req = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id.clone(), + input: vec![V2UserInput::Text { + text: "Hello".to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), + ) + .await??; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + + let injected_value = serde_json::to_value(&injected_item)?; + let model_input = response_mock.single_request().input(); + let environment_context_index = + response_item_text_position(&model_input, "") + .expect("environment context should be injected before the first user turn"); + let injected_index = model_input + .iter() + .position(|item| item == &injected_value) + .expect("injected item should be sent in the next model request"); + let user_prompt_index = response_item_text_position(&model_input, "Hello") + .expect("user prompt should be sent in the next model request"); + assert!( + environment_context_index < injected_index, + "standard initial context should be sent before injected items" + ); + assert!( + injected_index < user_prompt_index, + "injected items should be sent before the user prompt" + ); + + Ok(()) +} + +#[tokio::test] +async fn thread_inject_items_adds_raw_response_items_after_a_turn() -> Result<()> { + let server = responses::start_mock_server().await; + let first_body = responses::sse(vec![ + responses::ev_response_created("resp-1"), + responses::ev_assistant_message("msg-1", "First done"), + responses::ev_completed("resp-1"), + ]); + let second_body = responses::sse(vec![ + responses::ev_response_created("resp-2"), + responses::ev_assistant_message("msg-2", "Second done"), + responses::ev_completed("resp-2"), + ]); + let response_mock = responses::mount_sse_sequence(&server, vec![first_body, second_body]).await; + + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let thread_req = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + ..Default::default() + }) + .await?; + let thread_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(thread_resp)?; + + let first_turn_req = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id.clone(), + input: vec![V2UserInput::Text { + text: "First turn".to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(first_turn_req)), + ) + .await??; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + + let injected_item = ResponseItem::Message { + id: None, + role: "assistant".to_string(), + content: vec![ContentItem::OutputText { + text: "Injected after first turn".to_string(), + }], + end_turn: None, + phase: None, + }; + let injected_value = serde_json::to_value(&injected_item)?; + + let inject_req = mcp + .send_thread_inject_items_request(ThreadInjectItemsParams { + thread_id: thread.id.clone(), + items: vec![injected_value.clone()], + }) + .await?; + let inject_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(inject_req)), + ) + .await??; + let _response: ThreadInjectItemsResponse = + to_response::(inject_resp)?; + + let second_turn_req = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id.clone(), + input: vec![V2UserInput::Text { + text: "Second turn".to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(second_turn_req)), + ) + .await??; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + + let requests = response_mock.requests(); + assert_eq!(requests.len(), 2); + assert!( + !requests[0].input().contains(&injected_value), + "injected item should not be sent before it is injected" + ); + assert!( + requests[1].input().contains(&injected_value), + "injected item should be sent after being injected into existing history" + ); + + Ok(()) +} + +fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> { + let config_toml = codex_home.join("config.toml"); + std::fs::write( + config_toml, + format!( + r#" +model = "mock-model" +approval_policy = "never" +sandbox_mode = "read-only" + +model_provider = "mock_provider" + +[model_providers.mock_provider] +name = "Mock provider for test" +base_url = "{server_uri}/v1" +wire_api = "responses" +request_max_retries = 0 +stream_max_retries = 0 +"# + ), + ) +} + +fn response_item_text_position(items: &[Value], needle: &str) -> Option { + items.iter().position(|item| { + item.get("content") + .and_then(Value::as_array) + .into_iter() + .flatten() + .any(|content| { + content + .get("text") + .and_then(Value::as_str) + .is_some_and(|text| text.contains(needle)) + }) + }) +} diff --git a/codex-rs/app-server/tests/suite/v2/thread_list.rs b/codex-rs/app-server/tests/suite/v2/thread_list.rs index 62faba9c17..8fd4e47303 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_list.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_list.rs @@ -5,6 +5,7 @@ use app_test_support::create_fake_rollout_with_source; use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_responses_server_sequence; use app_test_support::rollout_path; +use app_test_support::test_absolute_path; use app_test_support::to_response; use chrono::DateTime; use chrono::Utc; @@ -37,7 +38,6 @@ use std::fs; use std::fs::FileTimes; use std::fs::OpenOptions; use std::path::Path; -use std::path::PathBuf; use tempfile::TempDir; use tokio::time::timeout; use uuid::Uuid; @@ -372,7 +372,7 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> { assert_eq!(thread.model_provider, "mock_provider"); assert!(thread.created_at > 0); assert_eq!(thread.updated_at, thread.created_at); - assert_eq!(thread.cwd, PathBuf::from("/")); + assert_eq!(thread.cwd, test_absolute_path("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); @@ -399,7 +399,7 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> { assert_eq!(thread.model_provider, "mock_provider"); assert!(thread.created_at > 0); assert_eq!(thread.updated_at, thread.created_at); - assert_eq!(thread.cwd, PathBuf::from("/")); + assert_eq!(thread.cwd, test_absolute_path("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); @@ -455,7 +455,7 @@ async fn thread_list_respects_provider_filter() -> Result<()> { let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp(); assert_eq!(thread.created_at, expected_ts); assert_eq!(thread.updated_at, expected_ts); - assert_eq!(thread.cwd, PathBuf::from("/")); + assert_eq!(thread.cwd, test_absolute_path("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); @@ -518,7 +518,7 @@ async fn thread_list_respects_cwd_filter() -> Result<()> { assert_eq!(data.len(), 1); assert_eq!(data[0].id, filtered_id); assert_ne!(data[0].id, unfiltered_id); - assert_eq!(data[0].cwd, target_cwd); + assert_eq!(data[0].cwd.as_path(), target_cwd.as_path()); Ok(()) } @@ -563,15 +563,34 @@ sqlite = true /*git_info*/ None, )?; - // `thread/list` only applies `search_term` on the sqlite path. In this test we - // create rollouts manually, so we must also create the sqlite DB and mark backfill - // complete; otherwise app-server will permanently use filesystem fallback. + // `thread/list` applies `search_term` on the sqlite fast path. This test creates + // rollouts manually, so mark the DB backfill complete and then run an unsearched + // list large enough to repair every rollout the searched list should find. let state_db = codex_state::StateRuntime::init(codex_home.path().to_path_buf(), "mock_provider".into()) .await?; state_db .mark_backfill_complete(/*last_watermark*/ None) .await?; + let rollout_config = codex_rollout::RolloutConfig { + codex_home: codex_home.path().to_path_buf(), + sqlite_home: codex_home.path().to_path_buf(), + cwd: codex_home.path().to_path_buf(), + model_provider_id: "mock_provider".to_string(), + generate_memories: false, + }; + let repaired_page = codex_core::RolloutRecorder::list_threads( + &rollout_config, + /*page_size*/ 10, + /*cursor*/ None, + codex_core::ThreadSortKey::CreatedAt, + &[], + /*model_providers*/ None, + "mock_provider", + /*search_term*/ None, + ) + .await?; + assert_eq!(repaired_page.items.len(), 3); let mut mcp = init_mcp(codex_home.path()).await?; let request_id = mcp @@ -1032,7 +1051,7 @@ async fn thread_list_includes_git_info() -> Result<()> { }; assert_eq!(thread.git_info, Some(expected_git)); assert_eq!(thread.source, SessionSource::Cli); - assert_eq!(thread.cwd, PathBuf::from("/")); + assert_eq!(thread.cwd, test_absolute_path("/")); assert_eq!(thread.cli_version, "0.0.0"); Ok(()) diff --git a/codex-rs/app-server/tests/suite/v2/thread_memory_mode_set.rs b/codex-rs/app-server/tests/suite/v2/thread_memory_mode_set.rs new file mode 100644 index 0000000000..bf9bba7b2f --- /dev/null +++ b/codex-rs/app-server/tests/suite/v2/thread_memory_mode_set.rs @@ -0,0 +1,138 @@ +use anyhow::Result; +use app_test_support::McpProcess; +use app_test_support::create_fake_rollout; +use app_test_support::create_mock_responses_server_repeating_assistant; +use app_test_support::to_response; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ThreadMemoryMode; +use codex_app_server_protocol::ThreadMemoryModeSetParams; +use codex_app_server_protocol::ThreadMemoryModeSetResponse; +use codex_app_server_protocol::ThreadStartParams; +use codex_app_server_protocol::ThreadStartResponse; +use codex_protocol::ThreadId; +use codex_state::StateRuntime; +use pretty_assertions::assert_eq; +use std::path::Path; +use std::sync::Arc; +use tempfile::TempDir; +use tokio::time::timeout; + +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); + +#[tokio::test] +async fn thread_memory_mode_set_updates_loaded_thread_state() -> Result<()> { + let server = create_mock_responses_server_repeating_assistant("Done").await; + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + let state_db = init_state_db(codex_home.path()).await?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let start_id = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + ..Default::default() + }) + .await?; + let start_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(start_id)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(start_resp)?; + let thread_uuid = ThreadId::from_string(&thread.id)?; + + let set_id = mcp + .send_thread_memory_mode_set_request(ThreadMemoryModeSetParams { + thread_id: thread.id, + mode: ThreadMemoryMode::Disabled, + }) + .await?; + let set_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(set_id)), + ) + .await??; + let _: ThreadMemoryModeSetResponse = to_response::(set_resp)?; + + let memory_mode = state_db.get_thread_memory_mode(thread_uuid).await?; + assert_eq!(memory_mode.as_deref(), Some("disabled")); + Ok(()) +} + +#[tokio::test] +async fn thread_memory_mode_set_updates_stored_thread_state() -> Result<()> { + let server = create_mock_responses_server_repeating_assistant("Done").await; + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + let state_db = init_state_db(codex_home.path()).await?; + + let thread_id = create_fake_rollout( + codex_home.path(), + "2025-01-06T08-30-00", + "2025-01-06T08:30:00Z", + "Stored thread preview", + Some("mock_provider"), + /*git_info*/ None, + )?; + let thread_uuid = ThreadId::from_string(&thread_id)?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + for mode in [ThreadMemoryMode::Disabled, ThreadMemoryMode::Enabled] { + let set_id = mcp + .send_thread_memory_mode_set_request(ThreadMemoryModeSetParams { + thread_id: thread_id.clone(), + mode, + }) + .await?; + let set_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(set_id)), + ) + .await??; + let _: ThreadMemoryModeSetResponse = to_response::(set_resp)?; + } + + let memory_mode = state_db.get_thread_memory_mode(thread_uuid).await?; + assert_eq!(memory_mode.as_deref(), Some("enabled")); + Ok(()) +} + +async fn init_state_db(codex_home: &Path) -> Result> { + let state_db = StateRuntime::init(codex_home.to_path_buf(), "mock_provider".into()).await?; + state_db + .mark_backfill_complete(/*last_watermark*/ None) + .await?; + Ok(state_db) +} + +fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> { + let config_toml = codex_home.join("config.toml"); + std::fs::write( + config_toml, + format!( + r#" +model = "mock-model" +approval_policy = "never" +sandbox_mode = "read-only" + +model_provider = "mock_provider" +suppress_unstable_features_warning = true + +[features] +sqlite = true + +[model_providers.mock_provider] +name = "Mock provider for test" +base_url = "{server_uri}/v1" +wire_api = "responses" +request_max_retries = 0 +stream_max_retries = 0 +"# + ), + ) +} diff --git a/codex-rs/app-server/tests/suite/v2/thread_read.rs b/codex-rs/app-server/tests/suite/v2/thread_read.rs index c5fd699855..4ab1181624 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_read.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_read.rs @@ -2,6 +2,7 @@ use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_fake_rollout_with_text_elements; use app_test_support::create_mock_responses_server_repeating_assistant; +use app_test_support::test_absolute_path; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; @@ -32,10 +33,12 @@ use core_test_support::responses; use pretty_assertions::assert_eq; use serde_json::Value; use std::path::Path; -use std::path::PathBuf; use tempfile::TempDir; use tokio::time::timeout; +#[cfg(windows)] +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(25); +#[cfg(not(windows))] const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] @@ -83,7 +86,7 @@ async fn thread_read_returns_summary_without_turns() -> Result<()> { assert_eq!(thread.model_provider, "mock_provider"); assert!(!thread.ephemeral, "stored rollouts should not be ephemeral"); assert!(thread.path.as_ref().expect("thread path").is_absolute()); - assert_eq!(thread.cwd, PathBuf::from("/")); + assert_eq!(thread.cwd, test_absolute_path("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); diff --git a/codex-rs/app-server/tests/suite/v2/thread_resume.rs b/codex-rs/app-server/tests/suite/v2/thread_resume.rs index 80ae756888..edf792ca1d 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_resume.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_resume.rs @@ -3,11 +3,13 @@ use app_test_support::ChatGptAuthFixture; use app_test_support::McpProcess; use app_test_support::create_apply_patch_sse_response; use app_test_support::create_fake_rollout_with_text_elements; +use app_test_support::create_fake_rollout_with_token_usage; use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_responses_server_repeating_assistant; use app_test_support::create_mock_responses_server_sequence_unchecked; use app_test_support::create_shell_command_sse_response; use app_test_support::rollout_path; +use app_test_support::test_absolute_path; use app_test_support::to_response; use app_test_support::write_chatgpt_auth; use chrono::Utc; @@ -22,6 +24,7 @@ use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::PatchApplyStatus; use codex_app_server_protocol::PatchChangeKind; use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ServerNotification; use codex_app_server_protocol::ServerRequest; use codex_app_server_protocol::SessionSource; use codex_app_server_protocol::ThreadItem; @@ -49,6 +52,11 @@ use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::SessionMeta; use codex_protocol::protocol::SessionMetaLine; use codex_protocol::protocol::SessionSource as RolloutSessionSource; +use codex_protocol::protocol::TokenCountEvent; +use codex_protocol::protocol::TokenUsage; +use codex_protocol::protocol::TokenUsageInfo; +use codex_protocol::protocol::TurnAbortReason; +use codex_protocol::protocol::TurnAbortedEvent; use codex_protocol::protocol::TurnStartedEvent; use codex_protocol::user_input::ByteRange; use codex_protocol::user_input::TextElement; @@ -75,6 +83,9 @@ use super::analytics::enable_analytics_capture; use super::analytics::thread_initialized_event; use super::analytics::wait_for_analytics_payload; +#[cfg(windows)] +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(25); +#[cfg(not(windows))] const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); const CODEX_5_2_INSTRUCTIONS_TEMPLATE_DEFAULT: &str = "You are Codex, a coding agent based on GPT-5. You and the user share the same workspace and collaborate to achieve the user's goals."; @@ -178,7 +189,7 @@ async fn thread_resume_tracks_thread_initialized_analytics() -> Result<()> { /*git_info*/ None, )?; - let mut mcp = McpProcess::new(codex_home.path()).await?; + let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let resume_id = mcp @@ -244,7 +255,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> { assert_eq!(thread.preview, preview); assert_eq!(thread.model_provider, "mock_provider"); assert!(thread.path.as_ref().expect("thread path").is_absolute()); - assert_eq!(thread.cwd, PathBuf::from("/")); + assert_eq!(thread.cwd, test_absolute_path("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); @@ -274,6 +285,268 @@ async fn thread_resume_returns_rollout_history() -> Result<()> { Ok(()) } +#[tokio::test] +async fn thread_resume_emits_restored_token_usage_before_next_turn() -> Result<()> { + let server = create_mock_responses_server_repeating_assistant("Done").await; + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + + let conversation_id = create_fake_rollout_with_token_usage( + codex_home.path(), + "2025-01-05T12-00-00", + "2025-01-05T12:00:00Z", + "Saved user message", + Some("mock_provider"), + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let resume_id = mcp + .send_thread_resume_request(ThreadResumeParams { + thread_id: conversation_id, + ..Default::default() + }) + .await?; + let resume_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(resume_id)), + ) + .await??; + let ThreadResumeResponse { thread, .. } = to_response::(resume_resp)?; + + let note = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("thread/tokenUsage/updated"), + ) + .await??; + let parsed: ServerNotification = note.try_into()?; + let ServerNotification::ThreadTokenUsageUpdated(notification) = parsed else { + panic!("expected thread/tokenUsage/updated notification"); + }; + + assert_eq!(notification.thread_id, thread.id); + assert_eq!(notification.turn_id, thread.turns[0].id); + assert_eq!(notification.token_usage.total.total_tokens, 150); + assert_eq!(notification.token_usage.total.input_tokens, 120); + assert_eq!(notification.token_usage.total.cached_input_tokens, 20); + assert_eq!(notification.token_usage.total.output_tokens, 30); + assert_eq!(notification.token_usage.total.reasoning_output_tokens, 10); + assert_eq!(notification.token_usage.last.total_tokens, 90); + assert_eq!(notification.token_usage.model_context_window, Some(200_000)); + + Ok(()) +} + +#[tokio::test] +async fn thread_resume_token_usage_replay_ignores_stale_interrupted_tail_turn() -> Result<()> { + let server = create_mock_responses_server_repeating_assistant("Done").await; + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + + let filename_ts = "2025-01-05T12-00-00"; + let meta_rfc3339 = "2025-01-05T12:00:00Z"; + let conversation_id = create_fake_rollout_with_token_usage( + codex_home.path(), + filename_ts, + meta_rfc3339, + "Saved user message", + Some("mock_provider"), + )?; + let rollout_file_path = rollout_path(codex_home.path(), filename_ts, &conversation_id); + let persisted_rollout = std::fs::read_to_string(&rollout_file_path)?; + let stale_turn_id = "incomplete-turn-after-token-usage"; + let appended_rollout = [ + json!({ + "timestamp": meta_rfc3339, + "type": "event_msg", + "payload": serde_json::to_value(EventMsg::TurnStarted(TurnStartedEvent { + turn_id: stale_turn_id.to_string(), + started_at: None, + model_context_window: None, + collaboration_mode_kind: Default::default(), + }))?, + }) + .to_string(), + json!({ + "timestamp": meta_rfc3339, + "type": "event_msg", + "payload": serde_json::to_value(EventMsg::AgentMessage(AgentMessageEvent { + message: "Still running".to_string(), + phase: None, + memory_citation: None, + }))?, + }) + .to_string(), + ] + .join("\n"); + std::fs::write( + &rollout_file_path, + format!("{persisted_rollout}{appended_rollout}\n"), + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let resume_id = mcp + .send_thread_resume_request(ThreadResumeParams { + thread_id: conversation_id, + ..Default::default() + }) + .await?; + let resume_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(resume_id)), + ) + .await??; + let ThreadResumeResponse { thread, .. } = to_response::(resume_resp)?; + + assert_eq!(thread.turns.len(), 2); + assert_eq!(thread.turns[0].status, TurnStatus::Completed); + assert_eq!(thread.turns[1].id, stale_turn_id); + assert_eq!(thread.turns[1].status, TurnStatus::Interrupted); + + let note = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("thread/tokenUsage/updated"), + ) + .await??; + let parsed: ServerNotification = note.try_into()?; + let ServerNotification::ThreadTokenUsageUpdated(notification) = parsed else { + panic!("expected thread/tokenUsage/updated notification"); + }; + + assert_eq!(notification.thread_id, thread.id); + assert_eq!(notification.turn_id, thread.turns[0].id); + assert_ne!(notification.turn_id, stale_turn_id); + assert_eq!(notification.token_usage.total.total_tokens, 150); + assert_eq!(notification.token_usage.last.total_tokens, 90); + + Ok(()) +} + +#[tokio::test] +async fn thread_resume_token_usage_replay_can_belong_to_interrupted_turn() -> Result<()> { + let server = create_mock_responses_server_repeating_assistant("Done").await; + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + + let filename_ts = "2025-01-05T12-00-00"; + let meta_rfc3339 = "2025-01-05T12:00:00Z"; + let conversation_id = create_fake_rollout_with_token_usage( + codex_home.path(), + filename_ts, + meta_rfc3339, + "Saved user message", + Some("mock_provider"), + )?; + let rollout_file_path = rollout_path(codex_home.path(), filename_ts, &conversation_id); + let persisted_rollout = std::fs::read_to_string(&rollout_file_path)?; + let interrupted_turn_id = "interrupted-turn-with-token-usage"; + let appended_rollout = [ + json!({ + "timestamp": meta_rfc3339, + "type": "event_msg", + "payload": serde_json::to_value(EventMsg::TurnStarted(TurnStartedEvent { + turn_id: interrupted_turn_id.to_string(), + started_at: None, + model_context_window: None, + collaboration_mode_kind: Default::default(), + }))?, + }) + .to_string(), + json!({ + "timestamp": meta_rfc3339, + "type": "event_msg", + "payload": serde_json::to_value(EventMsg::AgentMessage(AgentMessageEvent { + message: "Interrupted after usage".to_string(), + phase: None, + memory_citation: None, + }))?, + }) + .to_string(), + json!({ + "timestamp": meta_rfc3339, + "type": "event_msg", + "payload": serde_json::to_value(EventMsg::TokenCount(TokenCountEvent { + info: Some(TokenUsageInfo { + total_token_usage: TokenUsage { + input_tokens: 180, + cached_input_tokens: 40, + output_tokens: 50, + reasoning_output_tokens: 15, + total_tokens: 230, + }, + last_token_usage: TokenUsage { + input_tokens: 90, + cached_input_tokens: 30, + output_tokens: 40, + reasoning_output_tokens: 12, + total_tokens: 130, + }, + model_context_window: Some(200_000), + }), + rate_limits: None, + }))?, + }) + .to_string(), + json!({ + "timestamp": meta_rfc3339, + "type": "event_msg", + "payload": serde_json::to_value(EventMsg::TurnAborted(TurnAbortedEvent { + turn_id: Some(interrupted_turn_id.to_string()), + reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, + }))?, + }) + .to_string(), + ] + .join("\n"); + std::fs::write( + &rollout_file_path, + format!("{persisted_rollout}{appended_rollout}\n"), + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let resume_id = mcp + .send_thread_resume_request(ThreadResumeParams { + thread_id: conversation_id, + ..Default::default() + }) + .await?; + let resume_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(resume_id)), + ) + .await??; + let ThreadResumeResponse { thread, .. } = to_response::(resume_resp)?; + + assert_eq!(thread.turns.len(), 2); + assert_eq!(thread.turns[0].status, TurnStatus::Completed); + assert_eq!(thread.turns[1].id, interrupted_turn_id); + assert_eq!(thread.turns[1].status, TurnStatus::Interrupted); + + let note = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("thread/tokenUsage/updated"), + ) + .await??; + let parsed: ServerNotification = note.try_into()?; + let ServerNotification::ThreadTokenUsageUpdated(notification) = parsed else { + panic!("expected thread/tokenUsage/updated notification"); + }; + + assert_eq!(notification.thread_id, thread.id); + assert_eq!(notification.turn_id, interrupted_turn_id); + assert_eq!(notification.token_usage.total.total_tokens, 230); + assert_eq!(notification.token_usage.last.total_tokens, 130); + + Ok(()) +} + #[tokio::test] async fn thread_resume_prefers_persisted_git_metadata_for_local_threads() -> Result<()> { let server = create_mock_responses_server_repeating_assistant("Done").await; @@ -1613,7 +1886,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> { let resume_id = mcp .send_thread_resume_request(ThreadResumeParams { thread_id: "not-a-valid-thread-id".to_string(), - path: Some(thread_path), + path: Some(thread_path.to_path_buf()), ..Default::default() }) .await?; @@ -1742,7 +2015,7 @@ async fn start_materialized_thread_and_restart( Ok(RestartedThreadFixture { mcp: second_mcp, thread_id, - rollout_file_path, + rollout_file_path: rollout_file_path.to_path_buf(), }) } @@ -1901,7 +2174,7 @@ fn create_config_toml_with_chatgpt_base_url( let general_analytics_toml = if general_analytics_enabled { "\ngeneral_analytics = true".to_string() } else { - String::new() + "\ngeneral_analytics = false".to_string() }; let config_toml = codex_home.join("config.toml"); std::fs::write( diff --git a/codex-rs/app-server/tests/suite/v2/thread_start.rs b/codex-rs/app-server/tests/suite/v2/thread_start.rs index c0df2cab07..508c77f7ff 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_start.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_start.rs @@ -1,6 +1,7 @@ use anyhow::Result; use app_test_support::ChatGptAuthFixture; use app_test_support::McpProcess; +use app_test_support::PathBufExt; use app_test_support::create_mock_responses_server_repeating_assistant; use app_test_support::to_response; use app_test_support::write_chatgpt_auth; @@ -20,6 +21,7 @@ use codex_app_server_protocol::ThreadStatus; use codex_app_server_protocol::ThreadStatusChangedNotification; use codex_config::types::AuthCredentialsStoreMode; use codex_core::config::set_project_trust_level; +use codex_exec_server::LOCAL_FS; use codex_git_utils::resolve_root_git_project_for_trust; use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR; use codex_protocol::config_types::ServiceTier; @@ -40,7 +42,7 @@ use wiremock::matchers::method; use wiremock::matchers::path; use super::analytics::assert_basic_thread_initialized_event; -use super::analytics::enable_analytics_capture; +use super::analytics::mount_analytics_capture; use super::analytics::thread_initialized_event; use super::analytics::wait_for_analytics_payload; @@ -211,14 +213,15 @@ async fn thread_start_response_includes_loaded_instruction_sources() -> Result<( } #[cfg(windows)] -fn normalize_path_for_comparison(path: PathBuf) -> PathBuf { +fn normalize_path_for_comparison(path: impl AsRef) -> PathBuf { + let path = path.as_ref(); let path = path.display().to_string(); PathBuf::from(path.strip_prefix(r"\\?\").unwrap_or(&path)) } #[cfg(not(windows))] -fn normalize_path_for_comparison(path: PathBuf) -> PathBuf { - path +fn normalize_path_for_comparison(path: impl AsRef) -> PathBuf { + path.as_ref().to_path_buf() } #[tokio::test] @@ -232,9 +235,9 @@ async fn thread_start_tracks_thread_initialized_analytics() -> Result<()> { &server.uri(), /*general_analytics_enabled*/ true, )?; - enable_analytics_capture(&server, codex_home.path()).await?; + mount_analytics_capture(&server, codex_home.path()).await?; - let mut mcp = McpProcess::new(codex_home.path()).await?; + let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let req_id = mcp @@ -265,9 +268,9 @@ async fn thread_start_does_not_track_thread_initialized_analytics_without_featur &server.uri(), /*general_analytics_enabled*/ false, )?; - enable_analytics_capture(&server, codex_home.path()).await?; + mount_analytics_capture(&server, codex_home.path()).await?; - let mut mcp = McpProcess::new(codex_home.path()).await?; + let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let req_id = mcp @@ -280,11 +283,25 @@ async fn thread_start_does_not_track_thread_initialized_analytics_without_featur .await??; let _ = to_response::(resp)?; - let payload = wait_for_analytics_payload(&server, Duration::from_millis(250)).await; - assert!( - payload.is_err(), - "thread analytics should be gated off when general_analytics is disabled" - ); + assert_no_thread_initialized_analytics(&server, Duration::from_millis(250)).await?; + Ok(()) +} + +async fn assert_no_thread_initialized_analytics( + server: &MockServer, + wait_duration: Duration, +) -> Result<()> { + tokio::time::sleep(wait_duration).await; + let requests = server.received_requests().await.unwrap_or_default(); + for request in requests.iter().filter(|request| { + request.method == "POST" && request.url.path() == "/codex/analytics-events/events" + }) { + let payload: Value = serde_json::from_slice(&request.body)?; + assert!( + thread_initialized_event(&payload).is_err(), + "thread analytics should be gated off when general_analytics is disabled; payload={payload}" + ); + } Ok(()) } @@ -701,9 +718,11 @@ model_reasoning_effort = "high" assert_eq!(reasoning_effort, Some(ReasoningEffort::High)); let config_toml = std::fs::read_to_string(codex_home.path().join("config.toml"))?; - let trusted_root = resolve_root_git_project_for_trust(workspace.path()) - .unwrap_or_else(|| workspace.path().to_path_buf()); - assert!(config_toml.contains(&persisted_trust_path(&trusted_root))); + let workspace_abs = workspace.path().to_path_buf().abs(); + let trusted_root = resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &workspace_abs) + .await + .unwrap_or(workspace_abs); + assert!(config_toml.contains(&persisted_trust_path(trusted_root.as_path()))); assert!(config_toml.contains("trust_level = \"trusted\"")); Ok(()) @@ -738,9 +757,11 @@ async fn thread_start_with_nested_git_cwd_trusts_repo_root() -> Result<()> { .await??; let config_toml = std::fs::read_to_string(codex_home.path().join("config.toml"))?; - let trusted_root = - resolve_root_git_project_for_trust(&nested).expect("git root should resolve"); - assert!(config_toml.contains(&persisted_trust_path(&trusted_root))); + let nested_abs = nested.abs(); + let trusted_root = resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &nested_abs) + .await + .expect("git root should resolve"); + assert!(config_toml.contains(&persisted_trust_path(trusted_root.as_path()))); assert!(!config_toml.contains(&persisted_trust_path(&nested))); Ok(()) @@ -888,7 +909,7 @@ fn create_config_toml_with_chatgpt_base_url( let general_analytics_toml = if general_analytics_enabled { "\ngeneral_analytics = true".to_string() } else { - String::new() + "\ngeneral_analytics = false".to_string() }; let config_toml = codex_home.join("config.toml"); std::fs::write( diff --git a/codex-rs/app-server/tests/suite/v2/thread_unsubscribe.rs b/codex-rs/app-server/tests/suite/v2/thread_unsubscribe.rs index 5808f0fe79..e91a8654cb 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_unsubscribe.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_unsubscribe.rs @@ -1,16 +1,15 @@ -use anyhow::Context; use anyhow::Result; use app_test_support::McpProcess; -use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_responses_server_repeating_assistant; -use app_test_support::create_mock_responses_server_sequence_unchecked; -use app_test_support::create_shell_command_sse_response; use app_test_support::to_response; +use codex_app_server_protocol::DynamicToolCallOutputContentItem; +use codex_app_server_protocol::DynamicToolCallParams; +use codex_app_server_protocol::DynamicToolCallResponse; +use codex_app_server_protocol::DynamicToolSpec; use codex_app_server_protocol::ItemStartedNotification; -use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; -use codex_app_server_protocol::ServerNotification; +use codex_app_server_protocol::ServerRequest; use codex_app_server_protocol::ThreadItem; use codex_app_server_protocol::ThreadLoadedListParams; use codex_app_server_protocol::ThreadLoadedListResponse; @@ -21,7 +20,6 @@ use codex_app_server_protocol::ThreadResumeResponse; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::ThreadStatus; -use codex_app_server_protocol::ThreadStatusChangedNotification; use codex_app_server_protocol::ThreadUnsubscribeParams; use codex_app_server_protocol::ThreadUnsubscribeResponse; use codex_app_server_protocol::ThreadUnsubscribeStatus; @@ -29,59 +27,16 @@ use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::UserInput as V2UserInput; use core_test_support::responses; +use core_test_support::streaming_sse::StreamingSseChunk; +use core_test_support::streaming_sse::start_streaming_sse_server; use pretty_assertions::assert_eq; +use serde_json::json; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); - -async fn wait_for_responses_request_count_to_stabilize( - server: &wiremock::MockServer, - expected_count: usize, - settle_duration: std::time::Duration, -) -> Result<()> { - timeout(DEFAULT_READ_TIMEOUT, async { - let mut stable_since: Option = None; - loop { - let requests = server - .received_requests() - .await - .context("failed to fetch received requests")?; - let responses_request_count = requests - .iter() - .filter(|request| { - request.method == "POST" && request.url.path().ends_with("/responses") - }) - .count(); - - if responses_request_count > expected_count { - anyhow::bail!( - "expected exactly {expected_count} /responses requests, got {responses_request_count}" - ); - } - - if responses_request_count == expected_count { - match stable_since { - Some(stable_since) if stable_since.elapsed() >= settle_duration => { - return Ok::<(), anyhow::Error>(()); - } - None => stable_since = Some(tokio::time::Instant::now()), - Some(_) => {} - } - } else { - stable_since = None; - } - - tokio::time::sleep(std::time::Duration::from_millis(10)).await; - } - }) - .await??; - - Ok(()) -} - #[tokio::test] -async fn thread_unsubscribe_unloads_thread_and_emits_thread_closed_notification() -> Result<()> { +async fn thread_unsubscribe_keeps_thread_loaded_until_idle_timeout() -> Result<()> { let server = create_mock_responses_server_repeating_assistant("Done").await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; @@ -104,20 +59,14 @@ async fn thread_unsubscribe_unloads_thread_and_emits_thread_closed_notification( let unsubscribe = to_response::(unsubscribe_resp)?; assert_eq!(unsubscribe.status, ThreadUnsubscribeStatus::Unsubscribed); - let closed_notif: JSONRPCNotification = timeout( - DEFAULT_READ_TIMEOUT, - mcp.read_stream_until_notification_message("thread/closed"), - ) - .await??; - let parsed: ServerNotification = closed_notif.try_into()?; - let ServerNotification::ThreadClosed(payload) = parsed else { - anyhow::bail!("expected thread/closed notification"); - }; - assert_eq!(payload.thread_id, thread_id); - - let status_changed = wait_for_thread_status_not_loaded(&mut mcp, &payload.thread_id).await?; - assert_eq!(status_changed.thread_id, payload.thread_id); - assert_eq!(status_changed.status, ThreadStatus::NotLoaded); + assert!( + timeout( + std::time::Duration::from_millis(250), + mcp.read_stream_until_notification_message("thread/closed"), + ) + .await + .is_err() + ); let list_id = mcp .send_thread_loaded_list_request(ThreadLoadedListParams::default()) @@ -129,22 +78,18 @@ async fn thread_unsubscribe_unloads_thread_and_emits_thread_closed_notification( .await??; let ThreadLoadedListResponse { data, next_cursor } = to_response::(list_resp)?; - assert_eq!(data, Vec::::new()); + assert_eq!(data, vec![thread_id]); assert_eq!(next_cursor, None); Ok(()) } #[tokio::test] -async fn thread_unsubscribe_during_turn_interrupts_turn_and_emits_thread_closed() -> Result<()> { - #[cfg(target_os = "windows")] - let shell_command = vec![ - "powershell".to_string(), - "-Command".to_string(), - "Start-Sleep -Seconds 10".to_string(), - ]; - #[cfg(not(target_os = "windows"))] - let shell_command = vec!["sleep".to_string(), "10".to_string()]; +async fn thread_unsubscribe_during_turn_keeps_turn_running() -> Result<()> { + let call_id = "deterministic-wait-call"; + let tool_name = "deterministic_wait"; + let tool_args = json!({}); + let tool_call_arguments = serde_json::to_string(&tool_args)?; let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); @@ -152,28 +97,61 @@ async fn thread_unsubscribe_during_turn_interrupts_turn_and_emits_thread_closed( let working_directory = tmp.path().join("workdir"); std::fs::create_dir(&working_directory)?; - let server = create_mock_responses_server_sequence_unchecked(vec![ - create_shell_command_sse_response( - shell_command.clone(), - Some(&working_directory), - Some(10_000), - "call_sleep", - )?, - create_final_assistant_message_sse_response("Done")?, + let (server, mut completions) = start_streaming_sse_server(vec![ + vec![StreamingSseChunk { + gate: None, + body: responses::sse(vec![ + responses::ev_response_created("resp-1"), + responses::ev_function_call(call_id, tool_name, &tool_call_arguments), + responses::ev_completed("resp-1"), + ]), + }], + vec![StreamingSseChunk { + gate: None, + body: responses::sse(vec![ + responses::ev_response_created("resp-2"), + responses::ev_assistant_message("msg-1", "Done"), + responses::ev_completed("resp-2"), + ]), + }], ]) .await; - create_config_toml(&codex_home, &server.uri())?; + let first_response_completed = completions.remove(0); + let final_response_completed = completions.remove(0); + create_config_toml(&codex_home, server.uri())?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; - let thread_id = start_thread(&mut mcp).await?; + let thread_req = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + dynamic_tools: Some(vec![DynamicToolSpec { + name: tool_name.to_string(), + description: "Deterministic wait tool".to_string(), + input_schema: json!({ + "type": "object", + "properties": {}, + "additionalProperties": false, + }), + defer_loading: false, + }]), + ..Default::default() + }) + .await?; + let thread_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(thread_resp)?; + let thread_id = thread.id; let turn_req = mcp .send_turn_start_request(TurnStartParams { thread_id: thread_id.clone(), input: vec![V2UserInput::Text { - text: "run sleep".to_string(), + text: "run deterministic tool".to_string(), text_elements: Vec::new(), }], cwd: Some(working_directory), @@ -189,9 +167,37 @@ async fn thread_unsubscribe_during_turn_interrupts_turn_and_emits_thread_closed( timeout( DEFAULT_READ_TIMEOUT, - wait_for_command_execution_item_started(&mut mcp), + server.wait_for_request_count(/*count*/ 1), + ) + .await?; + timeout(DEFAULT_READ_TIMEOUT, first_response_completed).await??; + + let started = timeout( + DEFAULT_READ_TIMEOUT, + wait_for_dynamic_tool_started(&mut mcp, call_id), ) .await??; + assert_eq!(started.thread_id, thread_id); + + let request = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_request_message(), + ) + .await??; + let (request_id, params) = match request { + ServerRequest::DynamicToolCall { request_id, params } => (request_id, params), + other => panic!("expected DynamicToolCall request, got {other:?}"), + }; + assert_eq!( + params, + DynamicToolCallParams { + thread_id: thread_id.clone(), + turn_id: started.turn_id, + call_id: call_id.to_string(), + tool: tool_name.to_string(), + arguments: tool_args, + } + ); let unsubscribe_id = mcp .send_thread_unsubscribe_request(ThreadUnsubscribeParams { @@ -206,29 +212,35 @@ async fn thread_unsubscribe_during_turn_interrupts_turn_and_emits_thread_closed( let unsubscribe = to_response::(unsubscribe_resp)?; assert_eq!(unsubscribe.status, ThreadUnsubscribeStatus::Unsubscribed); - let closed_notif: JSONRPCNotification = timeout( - DEFAULT_READ_TIMEOUT, + let closed_while_tool_call_blocked = timeout( + std::time::Duration::from_millis(250), mcp.read_stream_until_notification_message("thread/closed"), - ) - .await??; - let parsed: ServerNotification = closed_notif.try_into()?; - let ServerNotification::ThreadClosed(payload) = parsed else { - anyhow::bail!("expected thread/closed notification"); - }; - assert_eq!(payload.thread_id, thread_id); + ); + let closed_while_tool_call_blocked = closed_while_tool_call_blocked.await; + assert!(closed_while_tool_call_blocked.is_err()); - wait_for_responses_request_count_to_stabilize( - &server, - /*expected_count*/ 1, - std::time::Duration::from_millis(200), + let response = DynamicToolCallResponse { + content_items: vec![DynamicToolCallOutputContentItem::InputText { + text: "dynamic-ok".to_string(), + }], + success: true, + }; + mcp.send_response(request_id, serde_json::to_value(response)?) + .await?; + + timeout( + DEFAULT_READ_TIMEOUT, + server.wait_for_request_count(/*count*/ 2), ) .await?; + timeout(DEFAULT_READ_TIMEOUT, final_response_completed).await??; + server.shutdown().await; Ok(()) } #[tokio::test] -async fn thread_unsubscribe_clears_cached_status_before_resume() -> Result<()> { +async fn thread_unsubscribe_preserves_cached_status_before_idle_unload() -> Result<()> { let server = responses::start_mock_server().await; let _response_mock = responses::mount_sse_once( &server, @@ -291,11 +303,14 @@ async fn thread_unsubscribe_clears_cached_status_before_resume() -> Result<()> { .await??; let unsubscribe = to_response::(unsubscribe_resp)?; assert_eq!(unsubscribe.status, ThreadUnsubscribeStatus::Unsubscribed); - timeout( - DEFAULT_READ_TIMEOUT, - mcp.read_stream_until_notification_message("thread/closed"), - ) - .await??; + assert!( + timeout( + std::time::Duration::from_millis(250), + mcp.read_stream_until_notification_message("thread/closed"), + ) + .await + .is_err() + ); let resume_id = mcp .send_thread_resume_request(ThreadResumeParams { @@ -309,13 +324,13 @@ async fn thread_unsubscribe_clears_cached_status_before_resume() -> Result<()> { ) .await??; let resume: ThreadResumeResponse = to_response::(resume_resp)?; - assert_eq!(resume.thread.status, ThreadStatus::Idle); + assert_eq!(resume.thread.status, ThreadStatus::SystemError); Ok(()) } #[tokio::test] -async fn thread_unsubscribe_reports_not_loaded_after_thread_is_unloaded() -> Result<()> { +async fn thread_unsubscribe_reports_not_subscribed_before_idle_unload() -> Result<()> { let server = create_mock_responses_server_repeating_assistant("Done").await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; @@ -341,12 +356,6 @@ async fn thread_unsubscribe_reports_not_loaded_after_thread_is_unloaded() -> Res ThreadUnsubscribeStatus::Unsubscribed ); - timeout( - DEFAULT_READ_TIMEOUT, - mcp.read_stream_until_notification_message("thread/closed"), - ) - .await??; - let second_unsubscribe_id = mcp .send_thread_unsubscribe_request(ThreadUnsubscribeParams { thread_id }) .await?; @@ -358,43 +367,26 @@ async fn thread_unsubscribe_reports_not_loaded_after_thread_is_unloaded() -> Res let second_unsubscribe = to_response::(second_unsubscribe_resp)?; assert_eq!( second_unsubscribe.status, - ThreadUnsubscribeStatus::NotLoaded + ThreadUnsubscribeStatus::NotSubscribed ); Ok(()) } -async fn wait_for_command_execution_item_started(mcp: &mut McpProcess) -> Result<()> { +async fn wait_for_dynamic_tool_started( + mcp: &mut McpProcess, + call_id: &str, +) -> Result { loop { - let started_notif = mcp + let notification = mcp .read_stream_until_notification_message("item/started") .await?; - let started_params = started_notif.params.context("item/started params")?; - let started: ItemStartedNotification = serde_json::from_value(started_params)?; - if let ThreadItem::CommandExecution { .. } = started.item { - return Ok(()); - } - } -} - -async fn wait_for_thread_status_not_loaded( - mcp: &mut McpProcess, - thread_id: &str, -) -> Result { - loop { - let status_changed_notif: JSONRPCNotification = timeout( - DEFAULT_READ_TIMEOUT, - mcp.read_stream_until_notification_message("thread/status/changed"), - ) - .await??; - let status_changed_params = status_changed_notif - .params - .context("thread/status/changed params must be present")?; - let status_changed: ThreadStatusChangedNotification = - serde_json::from_value(status_changed_params)?; - if status_changed.thread_id == thread_id && status_changed.status == ThreadStatus::NotLoaded - { - return Ok(status_changed); + let Some(params) = notification.params else { + continue; + }; + let started: ItemStartedNotification = serde_json::from_value(params)?; + if matches!(&started.item, ThreadItem::DynamicToolCall { id, .. } if id == call_id) { + return Ok(started); } } } diff --git a/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs b/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs index 2850c7b74f..f8eaf799da 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs @@ -3,6 +3,7 @@ use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_mock_responses_server_sequence; +use app_test_support::create_mock_responses_server_sequence_unchecked; use app_test_support::create_shell_command_sse_response; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCNotification; @@ -43,14 +44,15 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> { std::fs::create_dir(&working_directory)?; // Mock server: long-running shell command then (after abort) nothing else needed. - let server = create_mock_responses_server_sequence(vec![create_shell_command_sse_response( - shell_command.clone(), - Some(&working_directory), - Some(10_000), - "call_sleep", - )?]) - .await; - create_config_toml(&codex_home, &server.uri(), "never", "danger-full-access")?; + let server = + create_mock_responses_server_sequence_unchecked(vec![create_shell_command_sse_response( + shell_command.clone(), + Some(&working_directory), + Some(10_000), + "call_sleep", + )?]) + .await; + create_config_toml(&codex_home, &server.uri(), "never", "workspace-write")?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; @@ -87,6 +89,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> { ) .await??; let TurnStartResponse { turn } = to_response::(turn_resp)?; + let turn_id = turn.id.clone(); // Give the command a brief moment to start. tokio::time::sleep(std::time::Duration::from_secs(1)).await; @@ -96,7 +99,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> { let interrupt_id = mcp .send_turn_interrupt_request(TurnInterruptParams { thread_id: thread_id.clone(), - turn_id: turn.id, + turn_id: turn_id.clone(), }) .await?; let interrupt_resp: JSONRPCResponse = timeout( @@ -124,10 +127,17 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> { #[tokio::test] async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<()> { + #[cfg(target_os = "windows")] + let shell_command = vec![ + "powershell".to_string(), + "-Command".to_string(), + "Start-Sleep -Seconds 10".to_string(), + ]; + #[cfg(not(target_os = "windows"))] let shell_command = vec![ "python3".to_string(), "-c".to_string(), - "print(42)".to_string(), + "import time; time.sleep(10)".to_string(), ]; let tmp = TempDir::new()?; @@ -140,7 +150,7 @@ async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<() shell_command.clone(), Some(&working_directory), Some(10_000), - "call_python_approval", + "call_sleep_approval", )?]) .await; create_config_toml(&codex_home, &server.uri(), "untrusted", "read-only")?; @@ -169,6 +179,7 @@ async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<() text_elements: Vec::new(), }], cwd: Some(working_directory), + approval_policy: Some(codex_app_server_protocol::AskForApproval::UnlessTrusted), ..Default::default() }) .await?; @@ -187,7 +198,7 @@ async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<() let ServerRequest::CommandExecutionRequestApproval { request_id, params } = request else { panic!("expected CommandExecutionRequestApproval request"); }; - assert_eq!(params.item_id, "call_python_approval"); + assert_eq!(params.item_id, "call_sleep_approval"); assert_eq!(params.thread_id, thread.id); assert_eq!(params.turn_id, turn.id); @@ -248,6 +259,7 @@ fn create_config_toml( r#" model = "mock-model" approval_policy = "{approval_policy}" +approvals_reviewer = "user" sandbox_mode = "{sandbox_mode}" model_provider = "mock_provider" diff --git a/codex-rs/app-server/tests/suite/v2/turn_start.rs b/codex-rs/app-server/tests/suite/v2/turn_start.rs index d81a5d27a0..e8682d7325 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_start.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_start.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use app_test_support::DEFAULT_CLIENT_NAME; use app_test_support::McpProcess; use app_test_support::create_apply_patch_sse_response; use app_test_support::create_exec_command_sse_response; @@ -9,6 +10,7 @@ use app_test_support::create_mock_responses_server_sequence_unchecked; use app_test_support::create_shell_command_sse_response; use app_test_support::format_with_current_shell_display; use app_test_support::to_response; +use app_test_support::write_mock_responses_config_toml_with_chatgpt_base_url; use codex_app_server::INPUT_TOO_LARGE_ERROR_CODE; use codex_app_server::INVALID_PARAMS_ERROR_CODE; use codex_app_server_protocol::ByteRange; @@ -64,6 +66,10 @@ use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; +use super::analytics::enable_analytics_capture; +use super::analytics::mount_analytics_capture; +use super::analytics::wait_for_analytics_event; + #[cfg(windows)] const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(25); #[cfg(not(windows))] @@ -328,6 +334,163 @@ async fn thread_start_omits_empty_instruction_overrides_from_model_request() -> Ok(()) } +#[tokio::test] +async fn turn_start_tracks_turn_event_analytics() -> Result<()> { + let responses = vec![create_final_assistant_message_sse_response("Done")?]; + let server = create_mock_responses_server_sequence_unchecked(responses).await; + + let codex_home = TempDir::new()?; + write_mock_responses_config_toml_with_chatgpt_base_url( + codex_home.path(), + &server.uri(), + &server.uri(), + )?; + enable_analytics_capture(&server, codex_home.path()).await?; + + let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let thread_req = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + ..Default::default() + }) + .await?; + let thread_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(thread_resp)?; + + let turn_req = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id.clone(), + input: vec![V2UserInput::Image { + url: "https://example.com/a.png".to_string(), + }], + ..Default::default() + }) + .await?; + let turn_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), + ) + .await??; + let TurnStartResponse { turn } = to_response::(turn_resp)?; + + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + + let event = wait_for_analytics_event(&server, DEFAULT_READ_TIMEOUT, "codex_turn_event").await?; + assert_eq!(event["event_params"]["thread_id"], thread.id); + assert_eq!(event["event_params"]["turn_id"], turn.id); + assert_eq!( + event["event_params"]["app_server_client"]["product_client_id"], + DEFAULT_CLIENT_NAME + ); + assert_eq!(event["event_params"]["model"], "mock-model"); + assert_eq!(event["event_params"]["model_provider"], "mock_provider"); + assert_eq!(event["event_params"]["sandbox_policy"], "read_only"); + assert_eq!(event["event_params"]["ephemeral"], false); + assert_eq!(event["event_params"]["thread_source"], "user"); + assert_eq!(event["event_params"]["initialization_mode"], "new"); + assert_eq!( + event["event_params"]["subagent_source"], + serde_json::Value::Null + ); + assert_eq!( + event["event_params"]["parent_thread_id"], + serde_json::Value::Null + ); + assert_eq!(event["event_params"]["num_input_images"], 1); + assert_eq!(event["event_params"]["status"], "completed"); + assert!(event["event_params"]["started_at"].as_u64().is_some()); + assert!(event["event_params"]["completed_at"].as_u64().is_some()); + assert!(event["event_params"]["duration_ms"].as_u64().is_some()); + assert_eq!(event["event_params"]["input_tokens"], 0); + assert_eq!(event["event_params"]["cached_input_tokens"], 0); + assert_eq!(event["event_params"]["output_tokens"], 0); + assert_eq!(event["event_params"]["reasoning_output_tokens"], 0); + assert_eq!(event["event_params"]["total_tokens"], 0); + + Ok(()) +} + +#[tokio::test] +async fn turn_start_does_not_track_turn_event_analytics_without_feature() -> Result<()> { + let responses = vec![create_final_assistant_message_sse_response("Done")?]; + let server = create_mock_responses_server_sequence_unchecked(responses).await; + + let codex_home = TempDir::new()?; + write_mock_responses_config_toml_with_chatgpt_base_url( + codex_home.path(), + &server.uri(), + &server.uri(), + )?; + let config_path = codex_home.path().join("config.toml"); + let config_toml = std::fs::read_to_string(&config_path)?; + std::fs::write( + &config_path, + format!("{config_toml}\n[features]\ngeneral_analytics = false\n"), + )?; + mount_analytics_capture(&server, codex_home.path()).await?; + + let mut mcp = McpProcess::new_without_managed_config(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let thread_req = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + ..Default::default() + }) + .await?; + let thread_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(thread_resp)?; + + let turn_req = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id, + input: vec![V2UserInput::Text { + text: "hello".to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + let turn_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), + ) + .await??; + let _ = to_response::(turn_resp)?; + + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + + let turn_event = wait_for_analytics_event( + &server, + std::time::Duration::from_millis(250), + "codex_turn_event", + ) + .await; + assert!( + turn_event.is_err(), + "turn analytics should be gated off when general_analytics is disabled" + ); + Ok(()) +} + #[tokio::test] async fn turn_start_accepts_text_at_limit_with_mention_item() -> Result<()> { let responses = vec![create_final_assistant_message_sse_response("Done")?]; @@ -1553,7 +1716,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> { else { unreachable!("loop ensures we break on command execution items"); }; - assert_eq!(cwd, second_cwd); + assert_eq!(cwd.as_path(), second_cwd.as_path()); let expected_command = format_with_current_shell_display("echo second turn"); assert_eq!(command, expected_command); assert_eq!(status, CommandExecutionStatus::InProgress); diff --git a/codex-rs/app-server/tests/suite/v2/turn_start_zsh_fork.rs b/codex-rs/app-server/tests/suite/v2/turn_start_zsh_fork.rs index 105ae54542..eda24358ce 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_start_zsh_fork.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_start_zsh_fork.rs @@ -166,7 +166,7 @@ async fn turn_start_shell_zsh_fork_executes_command_v2() -> Result<()> { assert!(command.contains("/bin/sh -c")); assert!(command.contains("sleep 0.01")); assert!(command.contains(&release_marker.display().to_string())); - assert_eq!(cwd, workspace); + assert_eq!(cwd.as_path(), workspace.as_path()); mcp.interrupt_turn_and_wait_for_aborted(thread.id, turn.id, DEFAULT_READ_TIMEOUT) .await?; diff --git a/codex-rs/app-server/tests/suite/v2/turn_steer.rs b/codex-rs/app-server/tests/suite/v2/turn_steer.rs index a93bf6c6ab..16e28d6cc5 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_steer.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_steer.rs @@ -6,6 +6,7 @@ use app_test_support::create_mock_responses_server_sequence; use app_test_support::create_mock_responses_server_sequence_unchecked; use app_test_support::create_shell_command_sse_response; use app_test_support::to_response; +use app_test_support::write_mock_responses_config_toml_with_chatgpt_base_url; use codex_app_server::INPUT_TOO_LARGE_ERROR_CODE; use codex_app_server::INVALID_PARAMS_ERROR_CODE; use codex_app_server_protocol::JSONRPCError; @@ -23,6 +24,9 @@ use codex_protocol::user_input::MAX_USER_INPUT_TEXT_CHARS; use tempfile::TempDir; use tokio::time::timeout; +use super::analytics::enable_analytics_capture; +use super::analytics::wait_for_analytics_event; + const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] @@ -32,9 +36,14 @@ async fn turn_steer_requires_active_turn() -> Result<()> { std::fs::create_dir(&codex_home)?; let server = create_mock_responses_server_sequence(vec![]).await; - create_config_toml(&codex_home, &server.uri())?; + write_mock_responses_config_toml_with_chatgpt_base_url( + &codex_home, + &server.uri(), + &server.uri(), + )?; + enable_analytics_capture(&server, &codex_home).await?; - let mut mcp = McpProcess::new(&codex_home).await?; + let mut mcp = McpProcess::new_without_managed_config(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_req = mcp @@ -52,7 +61,7 @@ async fn turn_steer_requires_active_turn() -> Result<()> { let steer_req = mcp .send_turn_steer_request(TurnSteerParams { - thread_id: thread.id, + thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "steer".to_string(), text_elements: Vec::new(), @@ -68,6 +77,21 @@ async fn turn_steer_requires_active_turn() -> Result<()> { .await??; assert_eq!(steer_err.error.code, -32600); + let event = + wait_for_analytics_event(&server, DEFAULT_READ_TIMEOUT, "codex_turn_steer_event").await?; + assert_eq!(event["event_params"]["thread_id"], thread.id); + assert_eq!(event["event_params"]["result"], "rejected"); + assert_eq!(event["event_params"]["num_input_images"], 0); + assert_eq!( + event["event_params"]["expected_turn_id"], + "turn-does-not-exist" + ); + assert_eq!( + event["event_params"]["accepted_turn_id"], + serde_json::Value::Null + ); + assert_eq!(event["event_params"]["rejection_reason"], "no_active_turn"); + Ok(()) } @@ -96,9 +120,14 @@ async fn turn_steer_rejects_oversized_text_input() -> Result<()> { "call_sleep", )?]) .await; - create_config_toml(&codex_home, &server.uri())?; + write_mock_responses_config_toml_with_chatgpt_base_url( + &codex_home, + &server.uri(), + &server.uri(), + )?; + enable_analytics_capture(&server, &codex_home).await?; - let mut mcp = McpProcess::new(&codex_home).await?; + let mut mcp = McpProcess::new_without_managed_config(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_req = mcp @@ -200,9 +229,14 @@ async fn turn_steer_returns_active_turn_id() -> Result<()> { "call_sleep", )?]) .await; - create_config_toml(&codex_home, &server.uri())?; + write_mock_responses_config_toml_with_chatgpt_base_url( + &codex_home, + &server.uri(), + &server.uri(), + )?; + enable_analytics_capture(&server, &codex_home).await?; - let mut mcp = McpProcess::new(&codex_home).await?; + let mut mcp = McpProcess::new_without_managed_config(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_req = mcp @@ -261,31 +295,20 @@ async fn turn_steer_returns_active_turn_id() -> Result<()> { let steer: TurnSteerResponse = to_response::(steer_resp)?; assert_eq!(steer.turn_id, turn.id); + let event = + wait_for_analytics_event(&server, DEFAULT_READ_TIMEOUT, "codex_turn_steer_event").await?; + assert_eq!(event["event_params"]["thread_id"], thread.id); + assert_eq!(event["event_params"]["result"], "accepted"); + assert_eq!(event["event_params"]["num_input_images"], 0); + assert_eq!(event["event_params"]["expected_turn_id"], turn.id); + assert_eq!(event["event_params"]["accepted_turn_id"], turn.id); + assert_eq!( + event["event_params"]["rejection_reason"], + serde_json::Value::Null + ); + mcp.interrupt_turn_and_wait_for_aborted(thread.id, steer.turn_id, DEFAULT_READ_TIMEOUT) .await?; Ok(()) } - -fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> { - let config_toml = codex_home.join("config.toml"); - std::fs::write( - config_toml, - format!( - r#" -model = "mock-model" -approval_policy = "never" -sandbox_mode = "danger-full-access" - -model_provider = "mock_provider" - -[model_providers.mock_provider] -name = "Mock provider for test" -base_url = "{server_uri}/v1" -wire_api = "responses" -request_max_retries = 0 -stream_max_retries = 0 -"# - ), - ) -} diff --git a/codex-rs/apply-patch/src/invocation.rs b/codex-rs/apply-patch/src/invocation.rs index 3b0db2fa9c..075c94c60c 100644 --- a/codex-rs/apply-patch/src/invocation.rs +++ b/codex-rs/apply-patch/src/invocation.rs @@ -135,6 +135,7 @@ pub async fn maybe_parse_apply_patch_verified( argv: &[String], cwd: &AbsolutePathBuf, fs: &dyn ExecutorFileSystem, + sandbox: Option<&codex_exec_server::FileSystemSandboxContext>, ) -> MaybeApplyPatchVerified { // Detect a raw patch body passed directly as the command or as the body of a shell // script. In these cases, report an explicit error rather than applying the patch. @@ -170,7 +171,7 @@ pub async fn maybe_parse_apply_patch_verified( ); } Hunk::DeleteFile { .. } => { - let content = match fs.read_file_text(&path).await { + let content = match fs.read_file_text(&path, sandbox).await { Ok(content) => content, Err(e) => { return MaybeApplyPatchVerified::CorrectnessError( @@ -192,7 +193,7 @@ pub async fn maybe_parse_apply_patch_verified( let ApplyPatchFileUpdate { unified_diff, content: contents, - } = match unified_diff_from_chunks(&path, &chunks, fs).await { + } = match unified_diff_from_chunks(&path, &chunks, fs, sandbox).await { Ok(diff) => diff, Err(e) => { return MaybeApplyPatchVerified::CorrectnessError(e); @@ -467,7 +468,8 @@ mod tests { maybe_parse_apply_patch_verified( &args, &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), - LOCAL_FS.as_ref() + LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await, MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation) @@ -483,7 +485,8 @@ mod tests { maybe_parse_apply_patch_verified( &args, &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), - LOCAL_FS.as_ref() + LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await, MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation) @@ -693,9 +696,10 @@ PATCH"#, }; let path_abs = path.as_path().abs(); - let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) - .await - .unwrap(); + let diff = + unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref(), /*sandbox*/ None) + .await + .unwrap(); let expected_diff = r#"@@ -2,2 +2,2 @@ bar -baz @@ -731,9 +735,10 @@ PATCH"#, }; let path_abs = path.as_path().abs(); - let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) - .await - .unwrap(); + let diff = + unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref(), /*sandbox*/ None) + .await + .unwrap(); let expected_diff = r#"@@ -3 +3,2 @@ baz +quux @@ -770,6 +775,7 @@ PATCH"#, &argv, &AbsolutePathBuf::from_absolute_path(session_dir.path()).unwrap(), LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await; @@ -823,6 +829,7 @@ PATCH"#, &argv, &AbsolutePathBuf::from_absolute_path(session_dir.path()).unwrap(), LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await; let action = match result { diff --git a/codex-rs/apply-patch/src/lib.rs b/codex-rs/apply-patch/src/lib.rs index 6b6aba2df3..09d777ef6f 100644 --- a/codex-rs/apply-patch/src/lib.rs +++ b/codex-rs/apply-patch/src/lib.rs @@ -12,13 +12,15 @@ use anyhow::Context; use anyhow::Result; use codex_exec_server::CreateDirectoryOptions; use codex_exec_server::ExecutorFileSystem; +use codex_exec_server::FileSystemSandboxContext; use codex_exec_server::RemoveOptions; use codex_utils_absolute_path::AbsolutePathBuf; pub use parser::Hunk; pub use parser::ParseError; use parser::ParseError::*; -use parser::UpdateFileChunk; +pub use parser::UpdateFileChunk; pub use parser::parse_patch; +pub use parser::parse_patch_streaming; use similar::TextDiff; use thiserror::Error; @@ -34,9 +36,9 @@ pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_too /// internal `apply_patch` path. /// /// Although this constant lives in `codex-apply-patch` (to avoid forcing -/// `codex-arg0` to depend on `codex-core`), it is part of the "codex core" -/// process-invocation contract between the apply-patch runtime and the arg0 -/// dispatcher. +/// `codex-arg0` to depend on `codex-core`), it remains part of the "codex core" +/// process-invocation contract for the standalone `apply_patch` command +/// surface. pub const CODEX_CORE_APPLY_PATCH_ARG1: &str = "--codex-run-as-apply-patch"; #[derive(Debug, Error, PartialEq)] @@ -133,8 +135,8 @@ pub enum MaybeApplyPatchVerified { pub struct ApplyPatchAction { changes: HashMap, - /// The raw patch argument that can be used with `apply_patch` as an exec - /// call. i.e., if the original arg was parsed in "lenient" mode with a + /// The raw patch argument that can be used to apply the patch. i.e., if the + /// original arg was parsed in "lenient" mode with a /// heredoc, this should be the value without the heredoc wrapper. pub patch: String, @@ -184,6 +186,7 @@ pub async fn apply_patch( stdout: &mut impl std::io::Write, stderr: &mut impl std::io::Write, fs: &dyn ExecutorFileSystem, + sandbox: Option<&FileSystemSandboxContext>, ) -> Result<(), ApplyPatchError> { let hunks = match parse_patch(patch) { Ok(source) => source.hunks, @@ -207,7 +210,7 @@ pub async fn apply_patch( } }; - apply_hunks(&hunks, cwd, stdout, stderr, fs).await?; + apply_hunks(&hunks, cwd, stdout, stderr, fs, sandbox).await?; Ok(()) } @@ -219,9 +222,10 @@ pub async fn apply_hunks( stdout: &mut impl std::io::Write, stderr: &mut impl std::io::Write, fs: &dyn ExecutorFileSystem, + sandbox: Option<&FileSystemSandboxContext>, ) -> Result<(), ApplyPatchError> { // Delegate to a helper that applies each hunk to the filesystem. - match apply_hunks_to_files(hunks, cwd, fs).await { + match apply_hunks_to_files(hunks, cwd, fs, sandbox).await { Ok(affected) => { print_summary(&affected, stdout).map_err(ApplyPatchError::from)?; Ok(()) @@ -257,6 +261,7 @@ async fn apply_hunks_to_files( hunks: &[Hunk], cwd: &AbsolutePathBuf, fs: &dyn ExecutorFileSystem, + sandbox: Option<&FileSystemSandboxContext>, ) -> anyhow::Result { if hunks.is_empty() { anyhow::bail!("No files were modified."); @@ -270,24 +275,18 @@ async fn apply_hunks_to_files( let path_abs = hunk.resolve_path(cwd); match hunk { Hunk::AddFile { contents, .. } => { - if let Some(parent_abs) = path_abs.parent() { - fs.create_directory(&parent_abs, CreateDirectoryOptions { recursive: true }) - .await - .with_context(|| { - format!( - "Failed to create parent directories for {}", - path_abs.display() - ) - })?; - } - fs.write_file(&path_abs, contents.clone().into_bytes()) - .await - .with_context(|| format!("Failed to write file {}", path_abs.display()))?; + write_file_with_missing_parent_retry( + fs, + &path_abs, + contents.clone().into_bytes(), + sandbox, + ) + .await?; added.push(affected_path); } Hunk::DeleteFile { .. } => { let result: io::Result<()> = async { - let metadata = fs.get_metadata(&path_abs).await?; + let metadata = fs.get_metadata(&path_abs, sandbox).await?; if metadata.is_directory { return Err(io::Error::new( io::ErrorKind::InvalidInput, @@ -300,6 +299,7 @@ async fn apply_hunks_to_files( recursive: false, force: false, }, + sandbox, ) .await } @@ -311,27 +311,18 @@ async fn apply_hunks_to_files( move_path, chunks, .. } => { let AppliedPatch { new_contents, .. } = - derive_new_contents_from_chunks(&path_abs, chunks, fs).await?; + derive_new_contents_from_chunks(&path_abs, chunks, fs, sandbox).await?; if let Some(dest) = move_path { let dest_abs = AbsolutePathBuf::resolve_path_against_base(dest, cwd); - if let Some(parent_abs) = dest_abs.parent() { - fs.create_directory( - &parent_abs, - CreateDirectoryOptions { recursive: true }, - ) - .await - .with_context(|| { - format!( - "Failed to create parent directories for {}", - dest_abs.display() - ) - })?; - } - fs.write_file(&dest_abs, new_contents.into_bytes()) - .await - .with_context(|| format!("Failed to write file {}", dest_abs.display()))?; + write_file_with_missing_parent_retry( + fs, + &dest_abs, + new_contents.into_bytes(), + sandbox, + ) + .await?; let result: io::Result<()> = async { - let metadata = fs.get_metadata(&path_abs).await?; + let metadata = fs.get_metadata(&path_abs, sandbox).await?; if metadata.is_directory { return Err(io::Error::new( io::ErrorKind::InvalidInput, @@ -344,6 +335,7 @@ async fn apply_hunks_to_files( recursive: false, force: false, }, + sandbox, ) .await } @@ -353,7 +345,7 @@ async fn apply_hunks_to_files( })?; modified.push(affected_path); } else { - fs.write_file(&path_abs, new_contents.into_bytes()) + fs.write_file(&path_abs, new_contents.into_bytes(), sandbox) .await .with_context(|| format!("Failed to write file {}", path_abs.display()))?; modified.push(affected_path); @@ -368,6 +360,40 @@ async fn apply_hunks_to_files( }) } +async fn write_file_with_missing_parent_retry( + fs: &dyn ExecutorFileSystem, + path_abs: &AbsolutePathBuf, + contents: Vec, + sandbox: Option<&FileSystemSandboxContext>, +) -> anyhow::Result<()> { + match fs.write_file(path_abs, contents.clone(), sandbox).await { + Ok(()) => Ok(()), + Err(err) if err.kind() == io::ErrorKind::NotFound => { + if let Some(parent_abs) = path_abs.parent() { + fs.create_directory( + &parent_abs, + CreateDirectoryOptions { recursive: true }, + sandbox, + ) + .await + .with_context(|| { + format!( + "Failed to create parent directories for {}", + path_abs.display() + ) + })?; + } + fs.write_file(path_abs, contents, sandbox) + .await + .with_context(|| format!("Failed to write file {}", path_abs.display()))?; + Ok(()) + } + Err(err) => { + Err(err).with_context(|| format!("Failed to write file {}", path_abs.display())) + } + } +} + struct AppliedPatch { original_contents: String, new_contents: String, @@ -379,8 +405,9 @@ async fn derive_new_contents_from_chunks( path_abs: &AbsolutePathBuf, chunks: &[UpdateFileChunk], fs: &dyn ExecutorFileSystem, + sandbox: Option<&FileSystemSandboxContext>, ) -> std::result::Result { - let original_contents = fs.read_file_text(path_abs).await.map_err(|err| { + let original_contents = fs.read_file_text(path_abs, sandbox).await.map_err(|err| { ApplyPatchError::IoError(IoError { context: format!("Failed to read file to update {}", path_abs.display()), source: err, @@ -540,8 +567,9 @@ pub async fn unified_diff_from_chunks( path_abs: &AbsolutePathBuf, chunks: &[UpdateFileChunk], fs: &dyn ExecutorFileSystem, + sandbox: Option<&FileSystemSandboxContext>, ) -> std::result::Result { - unified_diff_from_chunks_with_context(path_abs, chunks, /*context*/ 1, fs).await + unified_diff_from_chunks_with_context(path_abs, chunks, /*context*/ 1, fs, sandbox).await } pub async fn unified_diff_from_chunks_with_context( @@ -549,11 +577,12 @@ pub async fn unified_diff_from_chunks_with_context( chunks: &[UpdateFileChunk], context: usize, fs: &dyn ExecutorFileSystem, + sandbox: Option<&FileSystemSandboxContext>, ) -> std::result::Result { let AppliedPatch { original_contents, new_contents, - } = derive_new_contents_from_chunks(path_abs, chunks, fs).await?; + } = derive_new_contents_from_chunks(path_abs, chunks, fs, sandbox).await?; let text_diff = TextDiff::from_lines(&original_contents, &new_contents); let unified_diff = text_diff.unified_diff().context_radius(context).to_string(); Ok(ApplyPatchFileUpdate { @@ -614,6 +643,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -667,9 +697,16 @@ mod tests { let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &cwd, &mut stdout, &mut stderr, LOCAL_FS.as_ref()) - .await - .unwrap(); + apply_patch( + &patch, + &cwd, + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + /*sandbox*/ None, + ) + .await + .unwrap(); assert_eq!(fs::read_to_string(&relative_add).unwrap(), "relative add\n"); assert_eq!(fs::read_to_string(&absolute_add).unwrap(), "absolute add\n"); @@ -709,6 +746,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -744,6 +782,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -783,6 +822,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -831,6 +871,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -888,6 +929,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -931,6 +973,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -973,6 +1016,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -1019,9 +1063,14 @@ mod tests { _ => panic!("Expected a single UpdateFile hunk"), }; let path_abs = path.as_path().abs(); - let diff = unified_diff_from_chunks(&path_abs, update_file_chunks, LOCAL_FS.as_ref()) - .await - .unwrap(); + let diff = unified_diff_from_chunks( + &path_abs, + update_file_chunks, + LOCAL_FS.as_ref(), + /*sandbox*/ None, + ) + .await + .unwrap(); let expected_diff = r#"@@ -1,4 +1,4 @@ foo -bar @@ -1061,9 +1110,10 @@ mod tests { }; let path_abs = path.as_path().abs(); - let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) - .await - .unwrap(); + let diff = + unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref(), /*sandbox*/ None) + .await + .unwrap(); let expected_diff = r#"@@ -1,2 +1,2 @@ -foo +FOO @@ -1101,9 +1151,10 @@ mod tests { }; let path_abs = path.as_path().abs(); - let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) - .await - .unwrap(); + let diff = + unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref(), /*sandbox*/ None) + .await + .unwrap(); let expected_diff = r#"@@ -2,2 +2,2 @@ bar -baz @@ -1139,9 +1190,10 @@ mod tests { }; let path_abs = path.as_path().abs(); - let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) - .await - .unwrap(); + let diff = + unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref(), /*sandbox*/ None) + .await + .unwrap(); let expected_diff = r#"@@ -3 +3,2 @@ baz +quux @@ -1188,9 +1240,10 @@ mod tests { }; let path_abs = path.as_path().abs(); - let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) - .await - .unwrap(); + let diff = + unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref(), /*sandbox*/ None) + .await + .unwrap(); let expected_diff = r#"@@ -1,6 +1,7 @@ a @@ -1219,6 +1272,7 @@ mod tests { &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await .unwrap(); @@ -1258,6 +1312,7 @@ g &mut stdout, &mut stderr, LOCAL_FS.as_ref(), + /*sandbox*/ None, ) .await; assert!(result.is_err()); diff --git a/codex-rs/apply-patch/src/parser.rs b/codex-rs/apply-patch/src/parser.rs index 24ebcb1463..64d3685d59 100644 --- a/codex-rs/apply-patch/src/parser.rs +++ b/codex-rs/apply-patch/src/parser.rs @@ -132,6 +132,14 @@ pub fn parse_patch(patch: &str) -> Result { parse_patch_text(patch, mode) } +/// Parses streamed patch text that may not have reached `*** End Patch` yet. +/// +/// This entry point is for progress reporting only; callers must not use its +/// output to apply a patch. +pub fn parse_patch_streaming(patch: &str) -> Result { + parse_patch_text(patch, ParseMode::Streaming) +} + enum ParseMode { /// Parse the patch text argument as is. Strict, @@ -169,32 +177,33 @@ enum ParseMode { /// `<<'EOF'` and ends with `EOF\n`. If so, we strip off these markers, /// trim() the result, and treat what is left as the patch text. Lenient, + + /// Parse partial patch text for progress reporting while the model is + /// still streaming tool input. This mode requires a begin marker but does + /// not require an end marker, and its output must not be used to apply a + /// patch. + Streaming, } fn parse_patch_text(patch: &str, mode: ParseMode) -> Result { let lines: Vec<&str> = patch.trim().lines().collect(); - let lines: &[&str] = match check_patch_boundaries_strict(&lines) { - Ok(()) => &lines, - Err(e) => match mode { - ParseMode::Strict => { - return Err(e); - } - ParseMode::Lenient => check_patch_boundaries_lenient(&lines, e)?, - }, + let (patch_lines, hunk_lines) = match mode { + ParseMode::Strict => check_patch_boundaries_strict(&lines)?, + ParseMode::Lenient => check_patch_boundaries_lenient(&lines)?, + ParseMode::Streaming => check_patch_boundaries_streaming(&lines)?, }; let mut hunks: Vec = Vec::new(); - // The above checks ensure that lines.len() >= 2. - let last_line_index = lines.len().saturating_sub(1); - let mut remaining_lines = &lines[1..last_line_index]; + let mut remaining_lines = hunk_lines; let mut line_number = 2; + let allow_incomplete = matches!(mode, ParseMode::Streaming); while !remaining_lines.is_empty() { - let (hunk, hunk_lines) = parse_one_hunk(remaining_lines, line_number)?; + let (hunk, hunk_lines) = parse_one_hunk(remaining_lines, line_number, allow_incomplete)?; hunks.push(hunk); line_number += hunk_lines; remaining_lines = &remaining_lines[hunk_lines..] } - let patch = lines.join("\n"); + let patch = patch_lines.join("\n"); Ok(ApplyPatchArgs { hunks, patch, @@ -202,15 +211,37 @@ fn parse_patch_text(patch: &str, mode: ParseMode) -> Result( + original_lines: &'a [&'a str], +) -> Result<(&'a [&'a str], &'a [&'a str]), ParseError> { + match original_lines { + [first, ..] if first.trim() == BEGIN_PATCH_MARKER => { + let body_lines = if original_lines + .last() + .is_some_and(|line| line.trim() == END_PATCH_MARKER) + { + &original_lines[1..original_lines.len() - 1] + } else { + &original_lines[1..] + }; + Ok((original_lines, body_lines)) + } + _ => check_patch_boundaries_strict(original_lines), + } +} + /// Checks the start and end lines of the patch text for `apply_patch`, /// returning an error if they do not match the expected markers. -fn check_patch_boundaries_strict(lines: &[&str]) -> Result<(), ParseError> { +fn check_patch_boundaries_strict<'a>( + lines: &'a [&'a str], +) -> Result<(&'a [&'a str], &'a [&'a str]), ParseError> { let (first_line, last_line) = match lines { [] => (None, None), [first] => (Some(first), Some(first)), [first, .., last] => (Some(first), Some(last)), }; - check_start_and_end_lines_strict(first_line, last_line) + check_start_and_end_lines_strict(first_line, last_line)?; + Ok((lines, &lines[1..lines.len() - 1])) } /// If we are in lenient mode, we check if the first line starts with `< Result<(), ParseError> { /// contents, excluding the heredoc markers. fn check_patch_boundaries_lenient<'a>( original_lines: &'a [&'a str], - original_parse_error: ParseError, -) -> Result<&'a [&'a str], ParseError> { +) -> Result<(&'a [&'a str], &'a [&'a str]), ParseError> { + let original_parse_error = match check_patch_boundaries_strict(original_lines) { + Ok(lines) => return Ok(lines), + Err(e) => e, + }; + match original_lines { [first, .., last] => { if (first == &"<( && original_lines.len() >= 4 { let inner_lines = &original_lines[1..original_lines.len() - 1]; - match check_patch_boundaries_strict(inner_lines) { - Ok(()) => Ok(inner_lines), - Err(e) => Err(e), - } + check_patch_boundaries_strict(inner_lines) } else { Err(original_parse_error) } @@ -265,7 +297,11 @@ fn check_start_and_end_lines_strict( /// Attempts to parse a single hunk from the start of lines. /// Returns the parsed hunk and the number of lines parsed (or a ParseError). -fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), ParseError> { +fn parse_one_hunk( + lines: &[&str], + line_number: usize, + allow_incomplete: bool, +) -> Result<(Hunk, usize), ParseError> { // Be tolerant of case mismatches and extra padding around marker strings. let first_line = lines[0].trim(); if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER) { @@ -321,15 +357,26 @@ fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), P continue; } - if remaining_lines[0].starts_with("***") { + if remaining_lines[0].starts_with('*') { break; } - let (chunk, chunk_lines) = parse_update_file_chunk( + if allow_incomplete && remaining_lines[0] == "@" { + break; + } + + let parsed_chunk = parse_update_file_chunk( remaining_lines, line_number + parsed_lines, chunks.is_empty(), - )?; + ); + let (chunk, chunk_lines) = match parsed_chunk { + Ok(parsed) => parsed, + Err(InvalidHunkError { .. }) if allow_incomplete && !chunks.is_empty() => { + break; + } + Err(err) => return Err(err), + }; chunks.push(chunk); parsed_lines += chunk_lines; remaining_lines = &remaining_lines[chunk_lines..] @@ -453,6 +500,166 @@ fn parse_update_file_chunk( Ok((chunk, parsed_lines + start_index)) } +#[test] +fn test_parse_patch_streaming() { + assert_eq!( + parse_patch_streaming("*** Begin Patch\n*** Add File: src/hello.txt\n+hello\n+wor"), + Ok(ApplyPatchArgs { + hunks: vec![AddFile { + path: PathBuf::from("src/hello.txt"), + contents: "hello\nwor\n".to_string(), + }], + patch: "*** Begin Patch\n*** Add File: src/hello.txt\n+hello\n+wor".to_string(), + workdir: None, + }) + ); + + assert_eq!( + parse_patch_streaming( + "*** Begin Patch\n*** Update File: src/old.rs\n*** Move to: src/new.rs\n@@\n-old\n+new", + ), + Ok(ApplyPatchArgs { + hunks: vec![UpdateFile { + path: PathBuf::from("src/old.rs"), + move_path: Some(PathBuf::from("src/new.rs")), + chunks: vec![UpdateFileChunk { + change_context: None, + old_lines: vec!["old".to_string()], + new_lines: vec!["new".to_string()], + is_end_of_file: false, + }], + }], + patch: "*** Begin Patch\n*** Update File: src/old.rs\n*** Move to: src/new.rs\n@@\n-old\n+new".to_string(), + workdir: None, + }) + ); + + assert!( + parse_patch_text( + "*** Begin Patch\n*** Delete File: gone.txt", + ParseMode::Streaming + ) + .is_ok() + ); + assert!( + parse_patch_text( + "*** Begin Patch\n*** Delete File: gone.txt", + ParseMode::Strict + ) + .is_err() + ); + + assert_eq!( + parse_patch_streaming( + "*** Begin Patch\n*** Add File: src/one.txt\n+one\n*** Delete File: src/two.txt\n", + ), + Ok(ApplyPatchArgs { + hunks: vec![ + AddFile { + path: PathBuf::from("src/one.txt"), + contents: "one\n".to_string(), + }, + DeleteFile { + path: PathBuf::from("src/two.txt"), + }, + ], + patch: "*** Begin Patch\n*** Add File: src/one.txt\n+one\n*** Delete File: src/two.txt" + .to_string(), + workdir: None, + }) + ); +} + +#[test] +fn test_parse_patch_streaming_large_patch_by_character() { + let patch = "\ +*** Begin Patch +*** Add File: docs/release-notes.md ++# Release notes ++ ++## CLI ++- Surface apply_patch progress while arguments stream. ++- Keep final patch application gated on the completed tool call. ++- Include file summaries in the progress event payload. +*** Update File: src/config.rs +@@ impl Config +- pub apply_patch_progress: bool, ++ pub stream_apply_patch_progress: bool, + pub include_diagnostics: bool, +@@ fn default_progress_interval() +- Duration::from_millis(500) ++ Duration::from_millis(250) +*** Delete File: src/legacy_patch_progress.rs +*** Update File: crates/cli/src/main.rs +*** Move to: crates/cli/src/bin/codex.rs +@@ fn run() +- let args = Args::parse(); +- dispatch(args) ++ let cli = Cli::parse(); ++ dispatch(cli) +*** Add File: tests/fixtures/apply_patch_progress.json ++{ ++ \"type\": \"apply_patch_progress\", ++ \"hunks\": [ ++ { \"operation\": \"add\", \"path\": \"docs/release-notes.md\" }, ++ { \"operation\": \"update\", \"path\": \"src/config.rs\" } ++ ] ++} +*** Update File: README.md +@@ Development workflow + Build the Rust workspace before opening a pull request. ++When touching streamed tool calls, include parser coverage for partial input. ++Prefer tests that exercise the exact event payload shape. +*** Delete File: docs/old-apply-patch-progress.md +*** End Patch"; + + let mut max_hunk_count = 0; + let mut saw_hunk_counts = Vec::new(); + for i in 1..=patch.len() { + let partial = &patch[..i]; + if let Ok(parsed) = parse_patch_streaming(partial) { + let hunk_count = parsed.hunks.len(); + assert!( + hunk_count >= max_hunk_count, + "hunk count should never decrease while streaming: {hunk_count} < {max_hunk_count} for {partial:?}", + ); + if hunk_count > max_hunk_count { + saw_hunk_counts.push(hunk_count); + max_hunk_count = hunk_count; + } + } + } + + assert_eq!(saw_hunk_counts, vec![1, 2, 3, 4, 5, 6, 7]); + let parsed = parse_patch_streaming(patch).unwrap(); + assert_eq!(parsed.hunks.len(), 7); + assert_eq!( + parsed + .hunks + .iter() + .map(|hunk| match hunk { + AddFile { .. } => "add", + DeleteFile { .. } => "delete", + UpdateFile { + move_path: Some(_), .. + } => "move-update", + UpdateFile { + move_path: None, .. + } => "update", + }) + .collect::>(), + vec![ + "add", + "update", + "delete", + "move-update", + "add", + "update", + "delete" + ] + ); +} + #[test] fn test_parse_patch() { assert_eq!( @@ -794,7 +1001,7 @@ fn test_parse_patch_lenient() { #[test] fn test_parse_one_hunk() { assert_eq!( - parse_one_hunk(&["bad"], /*line_number*/ 234), + parse_one_hunk(&["bad"], /*line_number*/ 234, /*allow_incomplete*/ false), Err(InvalidHunkError { message: "'bad' is not a valid hunk header. \ Valid hunk headers: '*** Add File: {path}', '*** Delete File: {path}', '*** Update File: {path}'".to_string(), diff --git a/codex-rs/apply-patch/src/standalone_executable.rs b/codex-rs/apply-patch/src/standalone_executable.rs index 149bfd3382..093bda543b 100644 --- a/codex-rs/apply-patch/src/standalone_executable.rs +++ b/codex-rs/apply-patch/src/standalone_executable.rs @@ -71,6 +71,7 @@ pub fn run_main() -> i32 { &mut stdout, &mut stderr, codex_exec_server::LOCAL_FS.as_ref(), + /*sandbox*/ None, )) { Ok(()) => { // Flush to ensure output ordering when used in pipelines. diff --git a/codex-rs/arg0/src/lib.rs b/codex-rs/arg0/src/lib.rs index f8b61796b5..38f88452af 100644 --- a/codex-rs/arg0/src/lib.rs +++ b/codex-rs/arg0/src/lib.rs @@ -4,6 +4,7 @@ use std::path::Path; use std::path::PathBuf; use codex_apply_patch::CODEX_CORE_APPLY_PATCH_ARG1; +use codex_exec_server::CODEX_FS_HELPER_ARG1; use codex_sandboxing::landlock::CODEX_LINUX_SANDBOX_ARG0; use codex_utils_home_dir::find_codex_home; #[cfg(unix)] @@ -93,6 +94,9 @@ pub fn arg0_dispatch() -> Option { } let argv1 = args.next().unwrap_or_default(); + if argv1 == CODEX_FS_HELPER_ARG1 { + codex_exec_server::run_fs_helper_main(); + } if argv1 == CODEX_CORE_APPLY_PATCH_ARG1 { let patch_arg = args.next().and_then(|s| s.to_str().map(str::to_owned)); let exit_code = match patch_arg { @@ -116,6 +120,7 @@ pub fn arg0_dispatch() -> Option { &mut stdout, &mut stderr, codex_exec_server::LOCAL_FS.as_ref(), + /*sandbox*/ None, )) { Ok(()) => 0, Err(_) => 1, @@ -249,7 +254,7 @@ where /// /// - UNIX: `apply_patch` symlink to the current executable /// - WINDOWS: `apply_patch.bat` batch script to invoke the current executable -/// with the "secret" --codex-run-as-apply-patch flag. +/// with the hidden `--codex-run-as-apply-patch` flag. /// /// This temporary directory is prepended to the PATH environment variable so /// that `apply_patch` can be on the PATH without requiring the user to @@ -325,13 +330,13 @@ pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result, user_agent: Option, chatgpt_account_id: Option, + chatgpt_account_is_fedramp: bool, path_style: PathStyle, } @@ -129,6 +130,7 @@ impl Client { bearer_token: None, user_agent: None, chatgpt_account_id: None, + chatgpt_account_is_fedramp: false, path_style, }) } @@ -141,6 +143,9 @@ impl Client { if let Some(account_id) = auth.get_account_id() { client = client.with_chatgpt_account_id(account_id); } + if auth.is_fedramp_account() { + client = client.with_fedramp_routing_header(); + } Ok(client) } @@ -161,6 +166,11 @@ impl Client { self } + pub fn with_fedramp_routing_header(mut self) -> Self { + self.chatgpt_account_is_fedramp = true; + self + } + pub fn with_path_style(mut self, style: PathStyle) -> Self { self.path_style = style; self @@ -185,6 +195,11 @@ impl Client { { h.insert(name, hv); } + if self.chatgpt_account_is_fedramp + && let Ok(name) = HeaderName::from_bytes(b"X-OpenAI-Fedramp") + { + h.insert(name, HeaderValue::from_static("true")); + } h } diff --git a/codex-rs/chatgpt/Cargo.toml b/codex-rs/chatgpt/Cargo.toml index 381b4fc873..354449934a 100644 --- a/codex-rs/chatgpt/Cargo.toml +++ b/codex-rs/chatgpt/Cargo.toml @@ -10,6 +10,7 @@ workspace = true [dependencies] anyhow = { workspace = true } clap = { workspace = true, features = ["derive"] } +codex-app-server-protocol = { workspace = true } codex-connectors = { workspace = true } codex-config = { workspace = true } codex-core = { workspace = true } diff --git a/codex-rs/chatgpt/src/connectors.rs b/codex-rs/chatgpt/src/connectors.rs index 1ea293f974..5f6efbc124 100644 --- a/codex-rs/chatgpt/src/connectors.rs +++ b/codex-rs/chatgpt/src/connectors.rs @@ -1,7 +1,3 @@ -use codex_core::config::Config; -use codex_login::AuthManager; -use codex_login::CodexAuth; -use codex_login::token_data::TokenData; use std::collections::HashSet; use std::time::Duration; @@ -9,27 +5,30 @@ use crate::chatgpt_client::chatgpt_get_request_with_timeout; use crate::chatgpt_token::get_chatgpt_token_data; use crate::chatgpt_token::init_chatgpt_token_from_auth; +use codex_app_server_protocol::AppInfo; use codex_connectors::AllConnectorsCacheKey; use codex_connectors::DirectoryListResponse; - -pub use codex_core::connectors::AppInfo; -pub use codex_core::connectors::connector_display_label; -use codex_core::connectors::filter_disallowed_connectors; +use codex_connectors::filter::filter_disallowed_connectors; +use codex_connectors::merge::merge_connectors; +use codex_connectors::merge::merge_plugin_connectors; +use codex_core::config::Config; pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools; pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools_with_options; pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools_with_options_and_status; pub use codex_core::connectors::list_cached_accessible_connectors_from_mcp_tools; -use codex_core::connectors::merge_connectors; -use codex_core::connectors::merge_plugin_apps; pub use codex_core::connectors::with_app_enabled_state; use codex_core::plugins::AppConnectorId; use codex_core::plugins::PluginsManager; +use codex_login::AuthManager; +use codex_login::CodexAuth; +use codex_login::default_client::originator; +use codex_login::token_data::TokenData; const DIRECTORY_CONNECTORS_TIMEOUT: Duration = Duration::from_secs(60); async fn apps_enabled(config: &Config) -> bool { let auth_manager = AuthManager::shared( - config.codex_home.clone(), + config.codex_home.to_path_buf(), /*enable_codex_api_key_env*/ false, config.cli_auth_credentials_store_mode, ); @@ -73,10 +72,18 @@ pub async fn list_cached_all_connectors(config: &Config) -> Option> } let token_data = get_chatgpt_token_data()?; let cache_key = all_connectors_cache_key(config, &token_data); - codex_connectors::cached_all_connectors(&cache_key).map(|connectors| { - let connectors = merge_plugin_apps(connectors, plugin_apps_for_config(config)); - filter_disallowed_connectors(connectors) - }) + let connectors = codex_connectors::cached_all_connectors(&cache_key)?; + let connectors = merge_plugin_connectors( + connectors, + plugin_apps_for_config(config) + .await + .into_iter() + .map(|connector_id| connector_id.0), + ); + Some(filter_disallowed_connectors( + connectors, + originator().value.as_str(), + )) } pub async fn list_all_connectors_with_options( @@ -106,8 +113,17 @@ pub async fn list_all_connectors_with_options( }, ) .await?; - let connectors = merge_plugin_apps(connectors, plugin_apps_for_config(config)); - Ok(filter_disallowed_connectors(connectors)) + let connectors = merge_plugin_connectors( + connectors, + plugin_apps_for_config(config) + .await + .into_iter() + .map(|connector_id| connector_id.0), + ); + Ok(filter_disallowed_connectors( + connectors, + originator().value.as_str(), + )) } fn all_connectors_cache_key(config: &Config, token_data: &TokenData) -> AllConnectorsCacheKey { @@ -119,9 +135,10 @@ fn all_connectors_cache_key(config: &Config, token_data: &TokenData) -> AllConne ) } -fn plugin_apps_for_config(config: &Config) -> Vec { - PluginsManager::new(config.codex_home.clone()) +async fn plugin_apps_for_config(config: &Config) -> Vec { + PluginsManager::new(config.codex_home.to_path_buf()) .plugins_for_config(config) + .await .effective_apps() } @@ -134,7 +151,13 @@ pub fn connectors_for_plugin_apps( .map(|connector_id| connector_id.0.as_str()) .collect::>(); - filter_disallowed_connectors(merge_plugin_apps(connectors, plugin_apps.to_vec())) + let connectors = merge_plugin_connectors( + connectors, + plugin_apps + .iter() + .map(|connector_id| connector_id.0.clone()), + ); + filter_disallowed_connectors(connectors, originator().value.as_str()) .into_iter() .filter(|connector| plugin_app_ids.contains(connector.id.as_str())) .collect() @@ -158,13 +181,13 @@ pub fn merge_connectors_with_accessible( accessible_connectors }; let merged = merge_connectors(connectors, accessible_connectors); - filter_disallowed_connectors(merged) + filter_disallowed_connectors(merged, originator().value.as_str()) } #[cfg(test)] mod tests { use super::*; - use codex_core::connectors::connector_install_url; + use codex_connectors::metadata::connector_install_url; use codex_core::plugins::AppConnectorId; use pretty_assertions::assert_eq; @@ -186,46 +209,6 @@ mod tests { } } - #[test] - fn allows_asdk_connectors() { - let filtered = filter_disallowed_connectors(vec![app("asdk_app_hidden"), app("alpha")]); - assert_eq!(filtered, vec![app("asdk_app_hidden"), app("alpha")]); - } - - #[test] - fn allows_whitelisted_asdk_connectors() { - let filtered = filter_disallowed_connectors(vec![ - app("asdk_app_69781557cc1481919cf5e9824fa2e792"), - app("beta"), - ]); - assert_eq!( - filtered, - vec![ - app("asdk_app_69781557cc1481919cf5e9824fa2e792"), - app("beta") - ] - ); - } - - #[test] - fn filters_openai_prefixed_connectors() { - let filtered = filter_disallowed_connectors(vec![ - app("connector_openai_foo"), - app("connector_openai_bar"), - app("gamma"), - ]); - assert_eq!(filtered, vec![app("gamma")]); - } - - #[test] - fn filters_disallowed_connector_ids() { - let filtered = filter_disallowed_connectors(vec![ - app("asdk_app_6938a94a61d881918ef32cb999ff937c"), - app("delta"), - ]); - assert_eq!(filtered, vec![app("delta")]); - } - fn merged_app(id: &str, is_accessible: bool) -> AppInfo { AppInfo { id: id.to_string(), diff --git a/codex-rs/cli/Cargo.toml b/codex-rs/cli/Cargo.toml index ab0d86de2c..11d3eb3bbd 100644 --- a/codex-rs/cli/Cargo.toml +++ b/codex-rs/cli/Cargo.toml @@ -24,6 +24,7 @@ codex-app-server = { workspace = true } codex-app-server-protocol = { workspace = true } codex-app-server-test-client = { workspace = true } codex-arg0 = { workspace = true } +codex-api = { workspace = true } codex-chatgpt = { workspace = true } codex-cloud-tasks = { path = "../cloud-tasks" } codex-utils-cli = { workspace = true } @@ -44,6 +45,7 @@ codex-state = { workspace = true } codex-stdio-to-uds = { workspace = true } codex-terminal-detection = { workspace = true } codex-tui = { workspace = true } +codex-utils-absolute-path = { workspace = true } codex-utils-path = { workspace = true } libc = { workspace = true } owo-colors = { workspace = true } diff --git a/codex-rs/cli/src/debug_sandbox.rs b/codex-rs/cli/src/debug_sandbox.rs index 0ad2f36382..a2cbf61874 100644 --- a/codex-rs/cli/src/debug_sandbox.rs +++ b/codex-rs/cli/src/debug_sandbox.rs @@ -18,7 +18,10 @@ use codex_protocol::config_types::SandboxMode; use codex_protocol::permissions::NetworkSandboxPolicy; use codex_sandboxing::landlock::create_linux_sandbox_command_args_for_policies; #[cfg(target_os = "macos")] -use codex_sandboxing::seatbelt::create_seatbelt_command_args_for_policies; +use codex_sandboxing::seatbelt::CreateSeatbeltCommandArgsParams; +#[cfg(target_os = "macos")] +use codex_sandboxing::seatbelt::create_seatbelt_command_args; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_cli::CliConfigOverrides; use tokio::process::Child; use tokio::process::Command as TokioCommand; @@ -39,6 +42,7 @@ pub async fn run_command_under_seatbelt( ) -> anyhow::Result<()> { let SeatbeltCommand { full_auto, + allow_unix_sockets, log_denials, config_overrides, command, @@ -50,6 +54,7 @@ pub async fn run_command_under_seatbelt( codex_linux_sandbox_exe, SandboxType::Seatbelt, log_denials, + &allow_unix_sockets, ) .await } @@ -78,6 +83,7 @@ pub async fn run_command_under_landlock( codex_linux_sandbox_exe, SandboxType::Landlock, /*log_denials*/ false, + &[], ) .await } @@ -98,6 +104,7 @@ pub async fn run_command_under_windows( codex_linux_sandbox_exe, SandboxType::Windows, /*log_denials*/ false, + &[], ) .await } @@ -116,6 +123,8 @@ async fn run_command_under_sandbox( codex_linux_sandbox_exe: Option, sandbox_type: SandboxType, log_denials: bool, + #[cfg_attr(not(target_os = "macos"), allow(unused_variables))] + allow_unix_sockets: &[AbsolutePathBuf], ) -> anyhow::Result<()> { let config = load_debug_sandbox_config( config_overrides @@ -252,14 +261,15 @@ async fn run_command_under_sandbox( let mut child = match sandbox_type { #[cfg(target_os = "macos")] SandboxType::Seatbelt => { - let args = create_seatbelt_command_args_for_policies( + let args = create_seatbelt_command_args(CreateSeatbeltCommandArgsParams { command, - &config.permissions.file_system_sandbox_policy, - config.permissions.network_sandbox_policy, - sandbox_policy_cwd.as_path(), - /*enforce_managed_network*/ false, - network.as_ref(), - ); + file_system_sandbox_policy: &config.permissions.file_system_sandbox_policy, + network_sandbox_policy: config.permissions.network_sandbox_policy, + sandbox_policy_cwd: sandbox_policy_cwd.as_path(), + enforce_managed_network: false, + network: network.as_ref(), + extra_allow_unix_sockets: allow_unix_sockets, + }); let network_policy = config.permissions.network_sandbox_policy; spawn_debug_sandbox_child( PathBuf::from("/usr/bin/sandbox-exec"), diff --git a/codex-rs/cli/src/lib.rs b/codex-rs/cli/src/lib.rs index 848391293c..cac34b3b61 100644 --- a/codex-rs/cli/src/lib.rs +++ b/codex-rs/cli/src/lib.rs @@ -3,6 +3,7 @@ mod exit_status; pub(crate) mod login; use clap::Parser; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_cli::CliConfigOverrides; pub use debug_sandbox::run_command_under_landlock; @@ -22,6 +23,10 @@ pub struct SeatbeltCommand { #[arg(long = "full-auto", default_value_t = false)] pub full_auto: bool, + /// Allow the sandboxed command to bind/connect AF_UNIX sockets rooted at this path. Relative paths are resolved against the current directory. Repeat to allow multiple paths. + #[arg(long = "allow-unix-socket", value_parser = parse_allow_unix_socket_path)] + pub allow_unix_sockets: Vec, + /// While the command runs, capture macOS sandbox denials via `log stream` and print them after exit #[arg(long = "log-denials", default_value_t = false)] pub log_denials: bool, @@ -34,6 +39,11 @@ pub struct SeatbeltCommand { pub command: Vec, } +fn parse_allow_unix_socket_path(raw: &str) -> Result { + AbsolutePathBuf::relative_to_current_dir(raw) + .map_err(|err| format!("invalid path {raw}: {err}")) +} + #[derive(Debug, Parser)] pub struct LandlockCommand { /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) diff --git a/codex-rs/cli/src/login.rs b/codex-rs/cli/src/login.rs index 9fa7dc4508..bd17a546a1 100644 --- a/codex-rs/cli/src/login.rs +++ b/codex-rs/cli/src/login.rs @@ -141,7 +141,7 @@ pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> let forced_chatgpt_workspace_id = config.forced_chatgpt_workspace_id.clone(); match login_with_chatgpt( - config.codex_home, + config.codex_home.to_path_buf(), forced_chatgpt_workspace_id, config.cli_auth_credentials_store_mode, ) @@ -229,7 +229,7 @@ pub async fn run_login_with_device_code( } let forced_chatgpt_workspace_id = config.forced_chatgpt_workspace_id.clone(); let mut opts = ServerOptions::new( - config.codex_home, + config.codex_home.to_path_buf(), client_id.unwrap_or(CLIENT_ID.to_string()), forced_chatgpt_workspace_id, config.cli_auth_credentials_store_mode, @@ -268,7 +268,7 @@ pub async fn run_login_with_device_code_fallback_to_browser( let forced_chatgpt_workspace_id = config.forced_chatgpt_workspace_id.clone(); let mut opts = ServerOptions::new( - config.codex_home, + config.codex_home.to_path_buf(), client_id.unwrap_or(CLIENT_ID.to_string()), forced_chatgpt_workspace_id, config.cli_auth_credentials_store_mode, diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index b581898606..32fb25ad3d 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -40,12 +40,16 @@ mod app_cmd; mod desktop_app; mod marketplace_cmd; mod mcp_cmd; +mod responses_cmd; #[cfg(not(windows))] mod wsl_paths; use crate::marketplace_cmd::MarketplaceCli; use crate::mcp_cmd::McpCli; +use crate::responses_cmd::ResponsesCommand; +use crate::responses_cmd::run_responses_command; +use codex_core::clear_memory_roots_contents; use codex_core::config::Config; use codex_core::config::ConfigOverrides; use codex_core::config::edit::ConfigEditsBuilder; @@ -107,8 +111,8 @@ enum Subcommand { /// Manage external MCP servers for Codex. Mcp(McpCli), - /// Manage plugin marketplaces for Codex. - Marketplace(MarketplaceCli), + /// Manage Codex plugins. + Plugin(PluginCli), /// Start Codex as an MCP server (stdio). McpServer, @@ -151,17 +155,36 @@ enum Subcommand { #[clap(hide = true)] ResponsesApiProxy(ResponsesApiProxyArgs), + /// Internal: send one raw Responses API payload through Codex auth. + #[clap(hide = true)] + Responses(ResponsesCommand), + /// Internal: relay stdio to a Unix domain socket. #[clap(hide = true, name = "stdio-to-uds")] StdioToUds(StdioToUdsCommand), - /// [EXPERIMENTAL] Run the standalone exec-server binary. + /// [EXPERIMENTAL] Run the standalone exec-server service. ExecServer(ExecServerCommand), /// Inspect feature flags. Features(FeaturesCli), } +#[derive(Debug, Parser)] +struct PluginCli { + #[clap(flatten)] + pub config_overrides: CliConfigOverrides, + + #[command(subcommand)] + subcommand: PluginSubcommand, +} + +#[derive(Debug, clap::Subcommand)] +enum PluginSubcommand { + /// Manage plugin marketplaces for Codex. + Marketplace(MarketplaceCli), +} + #[derive(Debug, Parser)] struct CompletionCommand { /// Shell to generate completions for @@ -509,10 +532,19 @@ fn run_update_action(action: UpdateAction) -> anyhow::Result<()> { let status = { #[cfg(windows)] { - // On Windows, run via cmd.exe so .CMD/.BAT are correctly resolved (PATHEXT semantics). - std::process::Command::new("cmd") - .args(["/C", &cmd_str]) - .status()? + if action == UpdateAction::StandaloneWindows { + let (cmd, args) = action.command_args(); + // Run the standalone PowerShell installer with PowerShell + // itself. Routing this through `cmd.exe /C` would parse + // PowerShell metacharacters like `|` before PowerShell sees + // the installer command. + std::process::Command::new(cmd).args(args).status()? + } else { + // On Windows, run via cmd.exe so .CMD/.BAT are correctly resolved (PATHEXT semantics). + std::process::Command::new("cmd") + .args(["/C", &cmd_str]) + .status()? + } } #[cfg(not(windows))] { @@ -709,17 +741,23 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { prepend_config_flags(&mut mcp_cli.config_overrides, root_config_overrides.clone()); mcp_cli.run().await?; } - Some(Subcommand::Marketplace(mut marketplace_cli)) => { + Some(Subcommand::Plugin(plugin_cli)) => { reject_remote_mode_for_subcommand( root_remote.as_deref(), root_remote_auth_token_env.as_deref(), - "marketplace", + "plugin", )?; - prepend_config_flags( - &mut marketplace_cli.config_overrides, - root_config_overrides.clone(), - ); - marketplace_cli.run().await?; + let PluginCli { + mut config_overrides, + subcommand, + } = plugin_cli; + prepend_config_flags(&mut config_overrides, root_config_overrides.clone()); + match subcommand { + PluginSubcommand::Marketplace(mut marketplace_cli) => { + prepend_config_flags(&mut marketplace_cli.config_overrides, config_overrides); + marketplace_cli.run().await?; + } + } } Some(Subcommand::AppServer(app_server_cli)) => { let AppServerCommand { @@ -1015,6 +1053,14 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { tokio::task::spawn_blocking(move || codex_responses_api_proxy::run_main(args)) .await??; } + Some(Subcommand::Responses(ResponsesCommand {})) => { + reject_remote_mode_for_subcommand( + root_remote.as_deref(), + root_remote_auth_token_env.as_deref(), + "responses", + )?; + run_responses_command(root_config_overrides).await?; + } Some(Subcommand::StdioToUds(cmd)) => { reject_remote_mode_for_subcommand( root_remote.as_deref(), @@ -1031,7 +1077,7 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { root_remote_auth_token_env.as_deref(), "exec-server", )?; - run_exec_server_command(cmd).await?; + run_exec_server_command(cmd, &arg0_paths).await?; } Some(Subcommand::Features(FeaturesCli { sub })) => match sub { FeaturesSubcommand::List => { @@ -1103,8 +1149,19 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { Ok(()) } -async fn run_exec_server_command(cmd: ExecServerCommand) -> anyhow::Result<()> { - codex_exec_server::run_main_with_listen_url(&cmd.listen) +async fn run_exec_server_command( + cmd: ExecServerCommand, + arg0_paths: &Arg0DispatchPaths, +) -> anyhow::Result<()> { + let codex_self_exe = arg0_paths + .codex_self_exe + .clone() + .ok_or_else(|| anyhow::anyhow!("Codex executable path is not configured"))?; + let runtime_paths = codex_exec_server::ExecServerRuntimePaths::new( + codex_self_exe, + arg0_paths.codex_linux_sandbox_exe.clone(), + )?; + codex_exec_server::run_main(&cmd.listen, runtime_paths) .await .map_err(anyhow::Error::from_boxed) } @@ -1243,31 +1300,21 @@ async fn run_debug_clear_memories_command( let state_db = StateRuntime::init(config.sqlite_home.clone(), config.model_provider_id.clone()) .await?; - state_db.reset_memory_data_for_fresh_start().await?; + state_db.clear_memory_data().await?; cleared_state_db = true; } - let memory_root = config.codex_home.join("memories"); - let removed_memory_root = match tokio::fs::remove_dir_all(&memory_root).await { - Ok(()) => true, - Err(err) if err.kind() == std::io::ErrorKind::NotFound => false, - Err(err) => return Err(err.into()), - }; + clear_memory_roots_contents(&config.codex_home).await?; let mut message = if cleared_state_db { format!("Cleared memory state from {}.", state_path.display()) } else { format!("No state db found at {}.", state_path.display()) }; - - if removed_memory_root { - message.push_str(&format!(" Removed {}.", memory_root.display())); - } else { - message.push_str(&format!( - " No memory directory found at {}.", - memory_root.display() - )); - } + message.push_str(&format!( + " Cleared memory directories under {}.", + config.codex_home.display() + )); println!("{message}"); @@ -1655,6 +1702,44 @@ mod tests { ); } + #[test] + fn responses_subcommand_is_hidden_from_help_but_parses() { + let help = MultitoolCli::command().render_help().to_string(); + assert!(!help.contains("responses")); + + let cli = MultitoolCli::try_parse_from(["codex", "responses"]).expect("parse"); + assert!(matches!(cli.subcommand, Some(Subcommand::Responses(_)))); + } + + #[test] + fn plugin_marketplace_add_parses_under_plugin() { + let cli = + MultitoolCli::try_parse_from(["codex", "plugin", "marketplace", "add", "owner/repo"]) + .expect("parse"); + + assert!(matches!(cli.subcommand, Some(Subcommand::Plugin(_)))); + } + + #[test] + fn plugin_marketplace_upgrade_parses_under_plugin() { + let cli = + MultitoolCli::try_parse_from(["codex", "plugin", "marketplace", "upgrade", "debug"]) + .expect("parse"); + + assert!(matches!(cli.subcommand, Some(Subcommand::Plugin(_)))); + } + + #[test] + fn marketplace_no_longer_parses_at_top_level() { + let add_result = + MultitoolCli::try_parse_from(["codex", "marketplace", "add", "owner/repo"]); + assert!(add_result.is_err()); + + let upgrade_result = + MultitoolCli::try_parse_from(["codex", "marketplace", "upgrade", "debug"]); + assert!(upgrade_result.is_err()); + } + fn sample_exit_info(conversation_id: Option<&str>, thread_name: Option<&str>) -> AppExitInfo { let token_usage = TokenUsage { output_tokens: 2, @@ -2170,6 +2255,19 @@ mod tests { ); } + #[test] + fn feature_toggles_accept_removed_image_detail_original_flag() { + let toggles = FeatureToggles { + enable: vec!["image_detail_original".to_string()], + disable: Vec::new(), + }; + let overrides = toggles.to_overrides().expect("valid features"); + assert_eq!( + overrides, + vec!["features.image_detail_original=true".to_string(),] + ); + } + #[test] fn feature_toggles_unknown_feature_errors() { let toggles = FeatureToggles { diff --git a/codex-rs/cli/src/marketplace_cmd.rs b/codex-rs/cli/src/marketplace_cmd.rs index 6a898c9ad6..ce1f99390a 100644 --- a/codex-rs/cli/src/marketplace_cmd.rs +++ b/codex-rs/cli/src/marketplace_cmd.rs @@ -2,21 +2,13 @@ use anyhow::Context; use anyhow::Result; use anyhow::bail; use clap::Parser; -use codex_config::MarketplaceConfigUpdate; -use codex_config::record_user_marketplace; +use codex_core::config::Config; use codex_core::config::find_codex_home; -use codex_core::plugins::OPENAI_CURATED_MARKETPLACE_NAME; -use codex_core::plugins::marketplace_install_root; -use codex_core::plugins::validate_marketplace_root; -use codex_core::plugins::validate_plugin_segment; +use codex_core::plugins::MarketplaceAddRequest; +use codex_core::plugins::PluginMarketplaceUpgradeOutcome; +use codex_core::plugins::PluginsManager; +use codex_core::plugins::add_marketplace; use codex_utils_cli::CliConfigOverrides; -use std::fs; -use std::path::Path; -use std::time::SystemTime; -use std::time::UNIX_EPOCH; - -mod metadata; -mod ops; #[derive(Debug, Parser)] pub struct MarketplaceCli { @@ -29,20 +21,19 @@ pub struct MarketplaceCli { #[derive(Debug, clap::Subcommand)] enum MarketplaceSubcommand { - /// Add a remote marketplace repository. Add(AddMarketplaceArgs), + Upgrade(UpgradeMarketplaceArgs), } #[derive(Debug, Parser)] struct AddMarketplaceArgs { - /// Marketplace source. Supports owner/repo[@ref], HTTP(S) Git URLs, or SSH URLs. + /// Marketplace source. Supports owner/repo[@ref], HTTP(S) Git URLs, SSH URLs, + /// or local marketplace root directories. source: String, - /// Git ref to check out. Overrides any @ref or #ref suffix in SOURCE. #[arg(long = "ref", value_name = "REF")] ref_name: Option, - /// Sparse-checkout path to use while cloning git sources. Repeat to include multiple paths. #[arg( long = "sparse", value_name = "PATH", @@ -51,12 +42,9 @@ struct AddMarketplaceArgs { sparse_paths: Vec, } -#[derive(Debug, PartialEq, Eq)] -pub(super) enum MarketplaceSource { - Git { - url: String, - ref_name: Option, - }, +#[derive(Debug, Parser)] +struct UpgradeMarketplaceArgs { + marketplace_name: Option, } impl MarketplaceCli { @@ -66,14 +54,13 @@ impl MarketplaceCli { subcommand, } = self; - // Validate overrides now. This command writes to CODEX_HOME only; marketplace discovery - // happens from that cache root after the next plugin/list or app-server start. - config_overrides + let overrides = config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; match subcommand { MarketplaceSubcommand::Add(args) => run_add(args).await?, + MarketplaceSubcommand::Upgrade(args) => run_upgrade(overrides, args).await?, } Ok(()) @@ -87,307 +74,88 @@ async fn run_add(args: AddMarketplaceArgs) -> Result<()> { sparse_paths, } = args; - let source = parse_marketplace_source(&source, ref_name)?; - let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; - let install_root = marketplace_install_root(&codex_home); - fs::create_dir_all(&install_root).with_context(|| { - format!( - "failed to create marketplace install directory {}", - install_root.display() - ) - })?; - let install_metadata = - metadata::MarketplaceInstallMetadata::from_source(&source, &sparse_paths); - if let Some(existing_root) = metadata::installed_marketplace_root_for_source( - &codex_home, - &install_root, - &install_metadata, - )? { - let marketplace_name = validate_marketplace_root(&existing_root).with_context(|| { - format!( - "failed to validate installed marketplace at {}", - existing_root.display() - ) - })?; - record_added_marketplace(&codex_home, &marketplace_name, &install_metadata)?; + let outcome = add_marketplace( + codex_home.to_path_buf(), + MarketplaceAddRequest { + source, + ref_name, + sparse_paths, + }, + ) + .await?; + + if outcome.already_added { println!( - "Marketplace `{marketplace_name}` is already added from {}.", - source.display() + "Marketplace `{}` is already added from {}.", + outcome.marketplace_name, outcome.source_display ); - println!("Installed marketplace root: {}", existing_root.display()); - return Ok(()); - } - - let staging_root = ops::marketplace_staging_root(&install_root); - fs::create_dir_all(&staging_root).with_context(|| { - format!( - "failed to create marketplace staging directory {}", - staging_root.display() - ) - })?; - let staged_dir = tempfile::Builder::new() - .prefix("marketplace-add-") - .tempdir_in(&staging_root) - .with_context(|| { - format!( - "failed to create temporary marketplace directory in {}", - staging_root.display() - ) - })?; - let staged_root = staged_dir.path().to_path_buf(); - - let MarketplaceSource::Git { url, ref_name } = &source; - ops::clone_git_source(url, ref_name.as_deref(), &sparse_paths, &staged_root)?; - - let marketplace_name = validate_marketplace_source_root(&staged_root) - .with_context(|| format!("failed to validate marketplace from {}", source.display()))?; - if marketplace_name == OPENAI_CURATED_MARKETPLACE_NAME { - bail!( - "marketplace `{OPENAI_CURATED_MARKETPLACE_NAME}` is reserved and cannot be added from {}", - source.display() - ); - } - let destination = install_root.join(safe_marketplace_dir_name(&marketplace_name)?); - ensure_marketplace_destination_is_inside_install_root(&install_root, &destination)?; - if destination.exists() { - bail!( - "marketplace `{marketplace_name}` is already added from a different source; remove it before adding {}", - source.display() - ); - } - ops::replace_marketplace_root(&staged_root, &destination) - .with_context(|| format!("failed to install marketplace at {}", destination.display()))?; - if let Err(err) = record_added_marketplace(&codex_home, &marketplace_name, &install_metadata) { - if let Err(rollback_err) = fs::rename(&destination, &staged_root) { - bail!( - "{err}; additionally failed to roll back installed marketplace at {}: {rollback_err}", - destination.display() - ); - } - return Err(err); - } - - println!( - "Added marketplace `{marketplace_name}` from {}.", - source.display() - ); - println!("Installed marketplace root: {}", destination.display()); - - Ok(()) -} - -fn record_added_marketplace( - codex_home: &Path, - marketplace_name: &str, - install_metadata: &metadata::MarketplaceInstallMetadata, -) -> Result<()> { - let source = install_metadata.config_source(); - let last_updated = utc_timestamp_now()?; - let update = MarketplaceConfigUpdate { - last_updated: &last_updated, - source_type: install_metadata.config_source_type(), - source: &source, - ref_name: install_metadata.ref_name(), - sparse_paths: install_metadata.sparse_paths(), - }; - record_user_marketplace(codex_home, marketplace_name, &update).with_context(|| { - format!("failed to add marketplace `{marketplace_name}` to user config.toml") - })?; - Ok(()) -} - -fn validate_marketplace_source_root(root: &Path) -> Result { - let marketplace_name = validate_marketplace_root(root)?; - validate_plugin_segment(&marketplace_name, "marketplace name").map_err(anyhow::Error::msg)?; - Ok(marketplace_name) -} - -fn parse_marketplace_source( - source: &str, - explicit_ref: Option, -) -> Result { - let source = source.trim(); - if source.is_empty() { - bail!("marketplace source must not be empty"); - } - - let (base_source, parsed_ref) = split_source_ref(source); - let ref_name = explicit_ref.or(parsed_ref); - - if looks_like_local_path(&base_source) { - bail!( - "local marketplace sources are not supported yet; use an HTTP(S) Git URL, SSH Git URL, or GitHub owner/repo" - ); - } - - if is_ssh_git_url(&base_source) || is_git_url(&base_source) { - let url = normalize_git_url(&base_source); - return Ok(MarketplaceSource::Git { url, ref_name }); - } - - if looks_like_github_shorthand(&base_source) { - let url = format!("https://github.com/{base_source}.git"); - return Ok(MarketplaceSource::Git { url, ref_name }); - } - - bail!("invalid marketplace source format: {source}"); -} - -fn split_source_ref(source: &str) -> (String, Option) { - if let Some((base, ref_name)) = source.rsplit_once('#') { - return (base.to_string(), non_empty_ref(ref_name)); - } - if !source.contains("://") - && !is_ssh_git_url(source) - && let Some((base, ref_name)) = source.rsplit_once('@') - { - return (base.to_string(), non_empty_ref(ref_name)); - } - (source.to_string(), None) -} - -fn non_empty_ref(ref_name: &str) -> Option { - let ref_name = ref_name.trim(); - (!ref_name.is_empty()).then(|| ref_name.to_string()) -} - -fn normalize_git_url(url: &str) -> String { - let url = url.trim_end_matches('/'); - if url.starts_with("https://github.com/") && !url.ends_with(".git") { - format!("{url}.git") } else { - url.to_string() - } -} - -fn looks_like_local_path(source: &str) -> bool { - source.starts_with("./") - || source.starts_with("../") - || source.starts_with('/') - || source.starts_with("~/") - || source == "." - || source == ".." -} - -fn is_ssh_git_url(source: &str) -> bool { - source.starts_with("ssh://") || source.starts_with("git@") && source.contains(':') -} - -fn is_git_url(source: &str) -> bool { - source.starts_with("http://") || source.starts_with("https://") -} - -fn looks_like_github_shorthand(source: &str) -> bool { - let mut segments = source.split('/'); - let owner = segments.next(); - let repo = segments.next(); - let extra = segments.next(); - owner.is_some_and(is_github_shorthand_segment) - && repo.is_some_and(is_github_shorthand_segment) - && extra.is_none() -} - -fn is_github_shorthand_segment(segment: &str) -> bool { - !segment.is_empty() - && segment - .chars() - .all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '-' | '_' | '.')) -} - -fn safe_marketplace_dir_name(marketplace_name: &str) -> Result { - let safe = marketplace_name - .chars() - .map(|ch| { - if ch.is_ascii_alphanumeric() || matches!(ch, '-' | '_' | '.') { - ch - } else { - '-' - } - }) - .collect::(); - let safe = safe.trim_matches('.').to_string(); - if safe.is_empty() || safe == ".." { - bail!("marketplace name `{marketplace_name}` cannot be used as an install directory"); - } - Ok(safe) -} - -fn ensure_marketplace_destination_is_inside_install_root( - install_root: &Path, - destination: &Path, -) -> Result<()> { - let install_root = install_root.canonicalize().with_context(|| { - format!( - "failed to resolve marketplace install root {}", - install_root.display() - ) - })?; - let destination_parent = destination - .parent() - .context("marketplace destination has no parent")? - .canonicalize() - .with_context(|| { - format!( - "failed to resolve marketplace destination parent {}", - destination.display() - ) - })?; - if !destination_parent.starts_with(&install_root) { - bail!( - "marketplace destination {} is outside install root {}", - destination.display(), - install_root.display() + println!( + "Added marketplace `{}` from {}.", + outcome.marketplace_name, outcome.source_display ); } + println!( + "Installed marketplace root: {}", + outcome.installed_root.as_path().display() + ); + Ok(()) } -fn utc_timestamp_now() -> Result { - let duration = SystemTime::now() - .duration_since(UNIX_EPOCH) - .context("system clock is before Unix epoch")?; - Ok(format_utc_timestamp(duration.as_secs() as i64)) +async fn run_upgrade( + overrides: Vec<(String, toml::Value)>, + args: UpgradeMarketplaceArgs, +) -> Result<()> { + let UpgradeMarketplaceArgs { marketplace_name } = args; + let config = Config::load_with_cli_overrides(overrides) + .await + .context("failed to load configuration")?; + let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; + let manager = PluginsManager::new(codex_home.to_path_buf()); + let outcome = manager + .upgrade_configured_marketplaces_for_config(&config, marketplace_name.as_deref()) + .map_err(anyhow::Error::msg)?; + print_upgrade_outcome(&outcome, marketplace_name.as_deref()) } -fn format_utc_timestamp(seconds_since_epoch: i64) -> String { - const SECONDS_PER_DAY: i64 = 86_400; - let days = seconds_since_epoch.div_euclid(SECONDS_PER_DAY); - let seconds_of_day = seconds_since_epoch.rem_euclid(SECONDS_PER_DAY); - let (year, month, day) = civil_from_days(days); - let hour = seconds_of_day / 3_600; - let minute = (seconds_of_day % 3_600) / 60; - let second = seconds_of_day % 60; - format!("{year:04}-{month:02}-{day:02}T{hour:02}:{minute:02}:{second:02}Z") -} +fn print_upgrade_outcome( + outcome: &PluginMarketplaceUpgradeOutcome, + marketplace_name: Option<&str>, +) -> Result<()> { + for error in &outcome.errors { + eprintln!( + "Failed to upgrade marketplace `{}`: {}", + error.marketplace_name, error.message + ); + } + if !outcome.all_succeeded() { + bail!("{} upgrade failure(s) occurred.", outcome.errors.len()); + } -fn civil_from_days(days_since_epoch: i64) -> (i64, i64, i64) { - let days = days_since_epoch + 719_468; - let era = if days >= 0 { days } else { days - 146_096 } / 146_097; - let day_of_era = days - era * 146_097; - let year_of_era = - (day_of_era - day_of_era / 1_460 + day_of_era / 36_524 - day_of_era / 146_096) / 365; - let mut year = year_of_era + era * 400; - let day_of_year = day_of_era - (365 * year_of_era + year_of_era / 4 - year_of_era / 100); - let month_prime = (5 * day_of_year + 2) / 153; - let day = day_of_year - (153 * month_prime + 2) / 5 + 1; - let month = month_prime + if month_prime < 10 { 3 } else { -9 }; - year += if month <= 2 { 1 } else { 0 }; - (year, month, day) -} - -impl MarketplaceSource { - fn display(&self) -> String { - match self { - Self::Git { url, ref_name } => { - if let Some(ref_name) = ref_name { - format!("{url}#{ref_name}") - } else { - url.clone() - } - } + let selection_label = marketplace_name.unwrap_or("all configured Git marketplaces"); + if outcome.selected_marketplaces.is_empty() { + println!("No configured Git marketplaces to upgrade."); + } else if outcome.upgraded_roots.is_empty() { + if marketplace_name.is_some() { + println!("Marketplace `{selection_label}` is already up to date."); + } else { + println!("All configured Git marketplaces are already up to date."); + } + } else if marketplace_name.is_some() { + println!("Upgraded marketplace `{selection_label}` to the latest configured revision."); + for root in &outcome.upgraded_roots { + println!("Installed marketplace root: {}", root.display()); + } + } else { + println!("Upgraded {} marketplace(s).", outcome.upgraded_roots.len()); + for root in &outcome.upgraded_roots { + println!("Installed marketplace root: {}", root.display()); } } + + Ok(()) } #[cfg(test)] @@ -395,141 +163,6 @@ mod tests { use super::*; use pretty_assertions::assert_eq; - #[test] - fn github_shorthand_parses_ref_suffix() { - assert_eq!( - parse_marketplace_source("owner/repo@main", /*explicit_ref*/ None).unwrap(), - MarketplaceSource::Git { - url: "https://github.com/owner/repo.git".to_string(), - ref_name: Some("main".to_string()), - } - ); - } - - #[test] - fn git_url_parses_fragment_ref() { - assert_eq!( - parse_marketplace_source( - "https://example.com/team/repo.git#v1", - /*explicit_ref*/ None, - ) - .unwrap(), - MarketplaceSource::Git { - url: "https://example.com/team/repo.git".to_string(), - ref_name: Some("v1".to_string()), - } - ); - } - - #[test] - fn explicit_ref_overrides_source_ref() { - assert_eq!( - parse_marketplace_source( - "owner/repo@main", - /*explicit_ref*/ Some("release".to_string()), - ) - .unwrap(), - MarketplaceSource::Git { - url: "https://github.com/owner/repo.git".to_string(), - ref_name: Some("release".to_string()), - } - ); - } - - #[test] - fn github_shorthand_and_git_url_normalize_to_same_source() { - let shorthand = parse_marketplace_source("owner/repo", /*explicit_ref*/ None).unwrap(); - let git_url = parse_marketplace_source( - "https://github.com/owner/repo.git", - /*explicit_ref*/ None, - ) - .unwrap(); - - assert_eq!(shorthand, git_url); - assert_eq!( - shorthand, - MarketplaceSource::Git { - url: "https://github.com/owner/repo.git".to_string(), - ref_name: None, - } - ); - } - - #[test] - fn github_url_with_trailing_slash_normalizes_without_extra_path_segment() { - assert_eq!( - parse_marketplace_source("https://github.com/owner/repo/", /*explicit_ref*/ None) - .unwrap(), - MarketplaceSource::Git { - url: "https://github.com/owner/repo.git".to_string(), - ref_name: None, - } - ); - } - - #[test] - fn non_github_https_source_parses_as_git_url() { - assert_eq!( - parse_marketplace_source("https://gitlab.com/owner/repo", /*explicit_ref*/ None) - .unwrap(), - MarketplaceSource::Git { - url: "https://gitlab.com/owner/repo".to_string(), - ref_name: None, - } - ); - } - - #[test] - fn file_url_source_is_rejected() { - let err = - parse_marketplace_source("file:///tmp/marketplace.git", /*explicit_ref*/ None) - .unwrap_err(); - - assert!( - err.to_string() - .contains("invalid marketplace source format"), - "unexpected error: {err}" - ); - } - - #[test] - fn local_path_source_is_rejected() { - let err = parse_marketplace_source("./marketplace", /*explicit_ref*/ None).unwrap_err(); - - assert!( - err.to_string() - .contains("local marketplace sources are not supported yet"), - "unexpected error: {err}" - ); - } - - #[test] - fn ssh_url_parses_as_git_url() { - assert_eq!( - parse_marketplace_source( - "ssh://git@github.com/owner/repo.git#main", - /*explicit_ref*/ None, - ) - .unwrap(), - MarketplaceSource::Git { - url: "ssh://git@github.com/owner/repo.git".to_string(), - ref_name: Some("main".to_string()), - } - ); - } - - #[test] - fn utc_timestamp_formats_unix_epoch_as_rfc3339_utc() { - assert_eq!( - format_utc_timestamp(/*seconds_since_epoch*/ 0), - "1970-01-01T00:00:00Z" - ); - assert_eq!( - format_utc_timestamp(/*seconds_since_epoch*/ 1_775_779_200), - "2026-04-10T00:00:00Z" - ); - } - #[test] fn sparse_paths_parse_before_or_after_source() { let sparse_before_source = @@ -559,4 +192,13 @@ mod tests { vec!["plugins/foo", "skills/bar"] ); } + + #[test] + fn upgrade_subcommand_parses_optional_marketplace_name() { + let upgrade_all = UpgradeMarketplaceArgs::try_parse_from(["upgrade"]).unwrap(); + assert_eq!(upgrade_all.marketplace_name, None); + + let upgrade_one = UpgradeMarketplaceArgs::try_parse_from(["upgrade", "debug"]).unwrap(); + assert_eq!(upgrade_one.marketplace_name.as_deref(), Some("debug")); + } } diff --git a/codex-rs/cli/src/marketplace_cmd/metadata.rs b/codex-rs/cli/src/marketplace_cmd/metadata.rs deleted file mode 100644 index db268840bb..0000000000 --- a/codex-rs/cli/src/marketplace_cmd/metadata.rs +++ /dev/null @@ -1,150 +0,0 @@ -use super::MarketplaceSource; -use anyhow::Context; -use anyhow::Result; -use codex_config::CONFIG_TOML_FILE; -use codex_core::plugins::validate_marketplace_root; -use std::io::ErrorKind; -use std::path::Path; -use std::path::PathBuf; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub(super) struct MarketplaceInstallMetadata { - source: InstalledMarketplaceSource, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum InstalledMarketplaceSource { - Git { - url: String, - ref_name: Option, - sparse_paths: Vec, - }, -} - -pub(super) fn installed_marketplace_root_for_source( - codex_home: &Path, - install_root: &Path, - install_metadata: &MarketplaceInstallMetadata, -) -> Result> { - let config_path = codex_home.join(CONFIG_TOML_FILE); - let config = match std::fs::read_to_string(&config_path) { - Ok(config) => config, - Err(err) if err.kind() == ErrorKind::NotFound => return Ok(None), - Err(err) => { - return Err(err) - .with_context(|| format!("failed to read user config {}", config_path.display())); - } - }; - let config: toml::Value = toml::from_str(&config) - .with_context(|| format!("failed to parse user config {}", config_path.display()))?; - let Some(marketplaces) = config.get("marketplaces").and_then(toml::Value::as_table) else { - return Ok(None); - }; - - for (marketplace_name, marketplace) in marketplaces { - if !install_metadata.matches_config(marketplace) { - continue; - } - let root = install_root.join(marketplace_name); - if validate_marketplace_root(&root).is_ok() { - return Ok(Some(root)); - } - } - - Ok(None) -} - -impl MarketplaceInstallMetadata { - pub(super) fn from_source(source: &MarketplaceSource, sparse_paths: &[String]) -> Self { - let source = match source { - MarketplaceSource::Git { url, ref_name } => InstalledMarketplaceSource::Git { - url: url.clone(), - ref_name: ref_name.clone(), - sparse_paths: sparse_paths.to_vec(), - }, - }; - Self { source } - } - - pub(super) fn config_source_type(&self) -> &'static str { - match &self.source { - InstalledMarketplaceSource::Git { .. } => "git", - } - } - - pub(super) fn config_source(&self) -> String { - match &self.source { - InstalledMarketplaceSource::Git { url, .. } => url.clone(), - } - } - - pub(super) fn ref_name(&self) -> Option<&str> { - match &self.source { - InstalledMarketplaceSource::Git { ref_name, .. } => ref_name.as_deref(), - } - } - - pub(super) fn sparse_paths(&self) -> &[String] { - match &self.source { - InstalledMarketplaceSource::Git { sparse_paths, .. } => sparse_paths, - } - } - - fn matches_config(&self, marketplace: &toml::Value) -> bool { - marketplace.get("source_type").and_then(toml::Value::as_str) - == Some(self.config_source_type()) - && marketplace.get("source").and_then(toml::Value::as_str) - == Some(self.config_source().as_str()) - && marketplace.get("ref").and_then(toml::Value::as_str) == self.ref_name() - && config_sparse_paths(marketplace) == self.sparse_paths() - } -} - -fn config_sparse_paths(marketplace: &toml::Value) -> Vec { - marketplace - .get("sparse_paths") - .and_then(toml::Value::as_array) - .map(|paths| { - paths - .iter() - .filter_map(toml::Value::as_str) - .map(str::to_string) - .collect() - }) - .unwrap_or_default() -} - -#[cfg(test)] -mod tests { - use super::*; - use pretty_assertions::assert_eq; - use tempfile::TempDir; - - #[test] - fn installed_marketplace_root_for_source_propagates_config_read_errors() -> Result<()> { - let codex_home = TempDir::new()?; - let config_path = codex_home.path().join(CONFIG_TOML_FILE); - std::fs::create_dir(&config_path)?; - - let install_root = codex_home.path().join("marketplaces"); - let source = MarketplaceSource::Git { - url: "https://github.com/owner/repo.git".to_string(), - ref_name: None, - }; - let install_metadata = MarketplaceInstallMetadata::from_source(&source, &[]); - - let err = installed_marketplace_root_for_source( - codex_home.path(), - &install_root, - &install_metadata, - ) - .unwrap_err(); - - assert_eq!( - err.to_string(), - format!("failed to read user config {}", config_path.display()) - ); - - Ok(()) - } -} diff --git a/codex-rs/cli/src/marketplace_cmd/ops.rs b/codex-rs/cli/src/marketplace_cmd/ops.rs deleted file mode 100644 index ffb777fdbd..0000000000 --- a/codex-rs/cli/src/marketplace_cmd/ops.rs +++ /dev/null @@ -1,118 +0,0 @@ -use anyhow::Context; -use anyhow::Result; -use anyhow::bail; -use std::fs; -use std::path::Path; -use std::path::PathBuf; -use std::process::Command; - -pub(super) fn clone_git_source( - url: &str, - ref_name: Option<&str>, - sparse_paths: &[String], - destination: &Path, -) -> Result<()> { - let destination = destination.to_string_lossy().to_string(); - if sparse_paths.is_empty() { - run_git(&["clone", url, destination.as_str()], /*cwd*/ None)?; - if let Some(ref_name) = ref_name { - run_git(&["checkout", ref_name], Some(Path::new(&destination)))?; - } - return Ok(()); - } - - run_git( - &[ - "clone", - "--filter=blob:none", - "--no-checkout", - url, - destination.as_str(), - ], - /*cwd*/ None, - )?; - let mut sparse_args = vec!["sparse-checkout", "set"]; - sparse_args.extend(sparse_paths.iter().map(String::as_str)); - let destination = Path::new(&destination); - run_git(&sparse_args, Some(destination))?; - run_git(&["checkout", ref_name.unwrap_or("HEAD")], Some(destination))?; - Ok(()) -} - -fn run_git(args: &[&str], cwd: Option<&Path>) -> Result<()> { - let mut command = Command::new("git"); - command.args(args); - command.env("GIT_TERMINAL_PROMPT", "0"); - if let Some(cwd) = cwd { - command.current_dir(cwd); - } - - let output = command - .output() - .with_context(|| format!("failed to run git {}", args.join(" ")))?; - if output.status.success() { - return Ok(()); - } - - let stderr = String::from_utf8_lossy(&output.stderr); - let stdout = String::from_utf8_lossy(&output.stdout); - bail!( - "git {} failed with status {}\nstdout:\n{}\nstderr:\n{}", - args.join(" "), - output.status, - stdout.trim(), - stderr.trim() - ); -} - -pub(super) fn replace_marketplace_root(staged_root: &Path, destination: &Path) -> Result<()> { - if let Some(parent) = destination.parent() { - fs::create_dir_all(parent)?; - } - if destination.exists() { - bail!( - "marketplace destination already exists: {}", - destination.display() - ); - } - - fs::rename(staged_root, destination).map_err(Into::into) -} - -pub(super) fn marketplace_staging_root(install_root: &Path) -> PathBuf { - install_root.join(".staging") -} - -#[cfg(test)] -mod tests { - use super::*; - use pretty_assertions::assert_eq; - use tempfile::TempDir; - - #[test] - fn replace_marketplace_root_rejects_existing_destination() { - let temp_dir = TempDir::new().unwrap(); - let staged_root = temp_dir.path().join("staged"); - let destination = temp_dir.path().join("destination"); - fs::create_dir_all(&staged_root).unwrap(); - fs::write(staged_root.join("marker.txt"), "staged").unwrap(); - fs::create_dir_all(&destination).unwrap(); - fs::write(destination.join("marker.txt"), "installed").unwrap(); - - let err = replace_marketplace_root(&staged_root, &destination).unwrap_err(); - - assert!( - err.to_string() - .contains("marketplace destination already exists"), - "unexpected error: {err}" - ); - assert_eq!( - fs::read_to_string(staged_root.join("marker.txt")).unwrap(), - "staged" - ); - assert_eq!( - fs::read_to_string(destination.join("marker.txt")).unwrap(), - "installed" - ); - } -} diff --git a/codex-rs/cli/src/mcp_cmd.rs b/codex-rs/cli/src/mcp_cmd.rs index f544ca82b7..d413f72ddc 100644 --- a/codex-rs/cli/src/mcp_cmd.rs +++ b/codex-rs/cli/src/mcp_cmd.rs @@ -6,6 +6,7 @@ use anyhow::Result; use anyhow::anyhow; use anyhow::bail; use clap::ArgGroup; +use codex_config::types::AppToolApproval; use codex_config::types::McpServerConfig; use codex_config::types::McpServerTransportConfig; use codex_core::McpManager; @@ -297,11 +298,14 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re let new_entry = McpServerConfig { transport: transport.clone(), + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -390,8 +394,10 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; - let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone()))); - let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None); + let mcp_manager = McpManager::new(Arc::new(PluginsManager::new( + config.codex_home.to_path_buf(), + ))); + let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None).await; let LoginArgs { name, scopes } = login_args; @@ -441,8 +447,10 @@ async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutAr let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; - let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone()))); - let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None); + let mcp_manager = McpManager::new(Arc::new(PluginsManager::new( + config.codex_home.to_path_buf(), + ))); + let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None).await; let LogoutArgs { name } = logout_args; @@ -471,8 +479,10 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; - let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone()))); - let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None); + let mcp_manager = McpManager::new(Arc::new(PluginsManager::new( + config.codex_home.to_path_buf(), + ))); + let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None).await; let mut entries: Vec<_> = mcp_servers.iter().collect(); entries.sort_by(|(a, _), (b, _)| a.cmp(b)); @@ -720,8 +730,10 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; - let mcp_manager = McpManager::new(Arc::new(PluginsManager::new(config.codex_home.clone()))); - let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None); + let mcp_manager = McpManager::new(Arc::new(PluginsManager::new( + config.codex_home.to_path_buf(), + ))); + let mcp_servers = mcp_manager.effective_servers(&config, /*auth*/ None).await; let Some(server) = mcp_servers.get(&get_args.name) else { bail!("No MCP server named '{name}' found.", name = get_args.name); @@ -869,6 +881,14 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re if let Some(timeout) = server.tool_timeout_sec { println!(" tool_timeout_sec: {}", timeout.as_secs_f64()); } + if let Some(approval_mode) = server.default_tools_approval_mode { + let approval_mode = match approval_mode { + AppToolApproval::Auto => "auto", + AppToolApproval::Prompt => "prompt", + AppToolApproval::Approve => "approve", + }; + println!(" default_tools_approval_mode: {approval_mode}"); + } println!(" remove: codex mcp remove {}", get_args.name); Ok(()) diff --git a/codex-rs/cli/src/responses_cmd.rs b/codex-rs/cli/src/responses_cmd.rs new file mode 100644 index 0000000000..2dc7a67146 --- /dev/null +++ b/codex-rs/cli/src/responses_cmd.rs @@ -0,0 +1,249 @@ +use clap::Parser; +use codex_core::config::Config; +use codex_utils_cli::CliConfigOverrides; +use serde_json::json; +use tokio::io::AsyncReadExt; + +#[derive(Debug, Parser)] +pub(crate) struct ResponsesCommand {} + +pub(crate) async fn run_responses_command( + root_config_overrides: CliConfigOverrides, +) -> anyhow::Result<()> { + let mut payload_text = String::new(); + tokio::io::stdin().read_to_string(&mut payload_text).await?; + if payload_text.trim().is_empty() { + anyhow::bail!("expected Responses API JSON payload on stdin"); + } + + let payload: serde_json::Value = serde_json::from_str(&payload_text) + .map_err(|err| anyhow::anyhow!("failed to parse Responses API JSON payload: {err}"))?; + if payload.get("stream").and_then(serde_json::Value::as_bool) != Some(true) { + anyhow::bail!("codex responses expects a streaming payload with `\"stream\": true`"); + } + + let cli_overrides = root_config_overrides + .parse_overrides() + .map_err(anyhow::Error::msg)?; + let config = Config::load_with_cli_overrides(cli_overrides).await?; + let base_auth_manager = codex_login::AuthManager::shared_from_config( + &config, /*enable_codex_api_key_env*/ true, + ); + let auth_manager = + codex_login::auth_manager_for_provider(Some(base_auth_manager), &config.model_provider); + let auth = match auth_manager { + Some(auth_manager) => auth_manager.auth().await, + None => None, + }; + let api_provider = config + .model_provider + .to_api_provider(auth.as_ref().map(codex_login::CodexAuth::auth_mode))?; + let api_auth = codex_login::auth_provider_from_auth(auth, &config.model_provider)?; + let client = codex_api::ResponsesClient::new( + codex_api::ReqwestTransport::new(codex_login::default_client::build_reqwest_client()), + api_provider, + api_auth, + ); + + let mut stream = client + .stream( + payload, + Default::default(), + codex_api::Compression::None, + /*turn_state*/ None, + ) + .await?; + while let Some(event) = stream.rx_event.recv().await { + let event = event?; + println!("{}", serde_json::to_string(&response_event_to_json(event))?); + } + + Ok(()) +} + +fn response_event_to_json(event: codex_api::ResponseEvent) -> serde_json::Value { + match event { + codex_api::ResponseEvent::Created => { + json!({ "type": "response.created", "response": {} }) + } + codex_api::ResponseEvent::OutputItemDone(item) => { + json!({ "type": "response.output_item.done", "item": item }) + } + codex_api::ResponseEvent::OutputItemAdded(item) => { + json!({ "type": "response.output_item.added", "item": item }) + } + codex_api::ResponseEvent::ServerModel(model) => { + json!({ "type": "response.server_model", "model": model }) + } + codex_api::ResponseEvent::ServerReasoningIncluded(included) => { + json!({ "type": "response.server_reasoning_included", "included": included }) + } + codex_api::ResponseEvent::Completed { + response_id, + token_usage, + } => { + let response = match token_usage { + Some(token_usage) => json!({ + "id": response_id, + "usage": { + "input_tokens": token_usage.input_tokens, + "input_tokens_details": { + "cached_tokens": token_usage.cached_input_tokens, + }, + "output_tokens": token_usage.output_tokens, + "output_tokens_details": { + "reasoning_tokens": token_usage.reasoning_output_tokens, + }, + "total_tokens": token_usage.total_tokens, + }, + }), + None => json!({ "id": response_id }), + }; + json!({ "type": "response.completed", "response": response }) + } + codex_api::ResponseEvent::OutputTextDelta(delta) => { + json!({ "type": "response.output_text.delta", "delta": delta }) + } + codex_api::ResponseEvent::ToolCallInputDelta { + item_id, + call_id, + delta, + } => { + json!({ + "type": "response.tool_call_input.delta", + "item_id": item_id, + "call_id": call_id, + "delta": delta, + }) + } + codex_api::ResponseEvent::ReasoningSummaryDelta { + delta, + summary_index, + } => json!({ + "type": "response.reasoning_summary_text.delta", + "delta": delta, + "summary_index": summary_index, + }), + codex_api::ResponseEvent::ReasoningContentDelta { + delta, + content_index, + } => json!({ + "type": "response.reasoning_text.delta", + "delta": delta, + "content_index": content_index, + }), + codex_api::ResponseEvent::ReasoningSummaryPartAdded { summary_index } => { + json!({ + "type": "response.reasoning_summary_part.added", + "summary_index": summary_index, + }) + } + codex_api::ResponseEvent::RateLimits(rate_limits) => { + json!({ "type": "response.rate_limits", "rate_limits": rate_limits }) + } + codex_api::ResponseEvent::ModelsEtag(etag) => { + json!({ "type": "response.models_etag", "etag": etag }) + } + } +} + +#[cfg(test)] +mod tests { + use super::response_event_to_json; + use codex_protocol::protocol::TokenUsage; + use pretty_assertions::assert_eq; + use serde_json::json; + + #[test] + fn response_events_keep_replayable_response_envelopes() { + let created = response_event_to_json(codex_api::ResponseEvent::Created); + assert_eq!(created, json!({"type": "response.created", "response": {}})); + + let completed = response_event_to_json(codex_api::ResponseEvent::Completed { + response_id: "resp-1".to_string(), + token_usage: Some(TokenUsage { + input_tokens: 10, + cached_input_tokens: 4, + output_tokens: 7, + reasoning_output_tokens: 3, + total_tokens: 17, + }), + }); + assert_eq!( + completed, + json!({ + "type": "response.completed", + "response": { + "id": "resp-1", + "usage": { + "input_tokens": 10, + "input_tokens_details": { + "cached_tokens": 4, + }, + "output_tokens": 7, + "output_tokens_details": { + "reasoning_tokens": 3, + }, + "total_tokens": 17, + }, + }, + }) + ); + + let completed_without_usage = response_event_to_json(codex_api::ResponseEvent::Completed { + response_id: "resp-2".to_string(), + token_usage: None, + }); + assert_eq!( + completed_without_usage, + json!({"type": "response.completed", "response": {"id": "resp-2"}}) + ); + } + + #[test] + fn reasoning_deltas_use_responses_event_names() { + let summary = response_event_to_json(codex_api::ResponseEvent::ReasoningSummaryDelta { + delta: "plan".to_string(), + summary_index: 1, + }); + assert_eq!( + summary, + json!({ + "type": "response.reasoning_summary_text.delta", + "delta": "plan", + "summary_index": 1, + }) + ); + + let content = response_event_to_json(codex_api::ResponseEvent::ReasoningContentDelta { + delta: "detail".to_string(), + content_index: 2, + }); + assert_eq!( + content, + json!({ + "type": "response.reasoning_text.delta", + "delta": "detail", + "content_index": 2, + }) + ); + } + + #[test] + fn tool_call_input_delta_uses_responses_event_name() { + let delta = response_event_to_json(codex_api::ResponseEvent::ToolCallInputDelta { + item_id: "item-1".to_string(), + call_id: Some("call-1".to_string()), + delta: "patch".to_string(), + }); + assert_eq!( + delta, + json!({ + "type": "response.tool_call_input.delta", + "item_id": "item-1", + "call_id": "call-1", + "delta": "patch", + }) + ); + } +} diff --git a/codex-rs/cli/tests/debug_clear_memories.rs b/codex-rs/cli/tests/debug_clear_memories.rs index c68172ba35..a24f7ebdde 100644 --- a/codex-rs/cli/tests/debug_clear_memories.rs +++ b/codex-rs/cli/tests/debug_clear_memories.rs @@ -125,13 +125,8 @@ INSERT INTO jobs ( .fetch_one(&pool) .await?; assert_eq!(memory_jobs_count, 0); - - let memory_mode: String = sqlx::query_scalar("SELECT memory_mode FROM threads WHERE id = ?") - .bind(thread_id) - .fetch_one(&pool) - .await?; - assert_eq!(memory_mode, "disabled"); - assert!(!memory_root.exists()); + assert!(memory_root.exists()); + assert_eq!(std::fs::read_dir(memory_root)?.count(), 0); Ok(()) } diff --git a/codex-rs/cli/tests/marketplace_add.rs b/codex-rs/cli/tests/marketplace_add.rs index 9cc5c65a5c..e4256f52b2 100644 --- a/codex-rs/cli/tests/marketplace_add.rs +++ b/codex-rs/cli/tests/marketplace_add.rs @@ -1,6 +1,8 @@ use anyhow::Result; +use codex_config::CONFIG_TOML_FILE; use codex_core::plugins::marketplace_install_root; use predicates::str::contains; +use pretty_assertions::assert_eq; use std::path::Path; use tempfile::TempDir; @@ -37,7 +39,7 @@ fn write_marketplace_source(source: &Path, marker: &str) -> Result<()> { } #[tokio::test] -async fn marketplace_add_rejects_local_directory_source() -> Result<()> { +async fn marketplace_add_local_directory_source() -> Result<()> { let codex_home = TempDir::new()?; let source = TempDir::new()?; write_marketplace_source(source.path(), "local ref")?; @@ -46,18 +48,71 @@ async fn marketplace_add_rejects_local_directory_source() -> Result<()> { codex_command(codex_home.path())? .current_dir(source_parent) - .args(["marketplace", "add", source_arg.as_str()]) + .args(["plugin", "marketplace", "add", source_arg.as_str()]) .assert() - .failure() - .stderr(contains( - "local marketplace sources are not supported yet; use an HTTP(S) Git URL, SSH Git URL, or GitHub owner/repo", - )); + .success(); - assert!( - !marketplace_install_root(codex_home.path()) - .join("debug") - .exists() + let installed_root = marketplace_install_root(codex_home.path()).join("debug"); + assert!(!installed_root.exists()); + + let config = std::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE))?; + let config: toml::Value = toml::from_str(&config)?; + let expected_source = source.path().canonicalize()?.display().to_string(); + assert_eq!( + config["marketplaces"]["debug"]["source_type"].as_str(), + Some("local") + ); + assert_eq!( + config["marketplaces"]["debug"]["source"].as_str(), + Some(expected_source.as_str()) ); Ok(()) } + +#[tokio::test] +async fn marketplace_add_rejects_local_manifest_file_source() -> Result<()> { + let codex_home = TempDir::new()?; + let source = TempDir::new()?; + write_marketplace_source(source.path(), "local ref")?; + let manifest_path = source.path().join(".agents/plugins/marketplace.json"); + + codex_command(codex_home.path())? + .args([ + "plugin", + "marketplace", + "add", + manifest_path.to_str().unwrap(), + ]) + .assert() + .failure() + .stderr(contains( + "local marketplace source must be a directory, not a file", + )); + + Ok(()) +} + +#[tokio::test] +async fn marketplace_add_rejects_sparse_for_local_directory_source() -> Result<()> { + let codex_home = TempDir::new()?; + let source = TempDir::new()?; + write_marketplace_source(source.path(), "local ref")?; + + codex_command(codex_home.path())? + .args([ + "plugin", + "marketplace", + "add", + "--sparse", + ".agents", + source.path().to_str().unwrap(), + ]) + .assert() + .failure() + .stderr(contains( + "--sparse is only supported for git marketplace sources", + )); + + Ok(()) +} diff --git a/codex-rs/cli/tests/marketplace_upgrade.rs b/codex-rs/cli/tests/marketplace_upgrade.rs new file mode 100644 index 0000000000..081203ebef --- /dev/null +++ b/codex-rs/cli/tests/marketplace_upgrade.rs @@ -0,0 +1,36 @@ +use anyhow::Result; +use predicates::str::contains; +use std::path::Path; +use tempfile::TempDir; + +fn codex_command(codex_home: &Path) -> Result { + let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?); + cmd.env("CODEX_HOME", codex_home); + Ok(cmd) +} + +#[tokio::test] +async fn marketplace_upgrade_runs_under_plugin() -> Result<()> { + let codex_home = TempDir::new()?; + + codex_command(codex_home.path())? + .args(["plugin", "marketplace", "upgrade"]) + .assert() + .success() + .stdout(contains("No configured Git marketplaces to upgrade.")); + + Ok(()) +} + +#[tokio::test] +async fn marketplace_upgrade_no_longer_runs_at_top_level() -> Result<()> { + let codex_home = TempDir::new()?; + + codex_command(codex_home.path())? + .args(["marketplace", "upgrade"]) + .assert() + .failure() + .stderr(contains("unexpected argument 'upgrade' found")); + + Ok(()) +} diff --git a/codex-rs/cloud-tasks/src/util.rs b/codex-rs/cloud-tasks/src/util.rs index 090eec227e..cbaed17bea 100644 --- a/codex-rs/cloud-tasks/src/util.rs +++ b/codex-rs/cloud-tasks/src/util.rs @@ -63,7 +63,7 @@ pub async fn load_auth_manager() -> Option { // TODO: pass in cli overrides once cloud tasks properly support them. let config = Config::load_with_cli_overrides(Vec::new()).await.ok()?; Some(AuthManager::new( - config.codex_home, + config.codex_home.to_path_buf(), /*enable_codex_api_key_env*/ false, config.cli_auth_credentials_store_mode, )) diff --git a/codex-rs/code-mode/Cargo.toml b/codex-rs/code-mode/Cargo.toml index e821ca0e4d..d2f42359d4 100644 --- a/codex-rs/code-mode/Cargo.toml +++ b/codex-rs/code-mode/Cargo.toml @@ -14,6 +14,8 @@ workspace = true [dependencies] async-trait = { workspace = true } +codex-protocol = { workspace = true } +deno_core_icudata = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } tokio = { workspace = true, features = ["macros", "rt", "sync", "time"] } diff --git a/codex-rs/code-mode/src/description.rs b/codex-rs/code-mode/src/description.rs index ac18fafd89..7f5e47fa25 100644 --- a/codex-rs/code-mode/src/description.rs +++ b/codex-rs/code-mode/src/description.rs @@ -1,3 +1,4 @@ +use codex_protocol::ToolName; use serde::Deserialize; use serde::Serialize; use serde_json::Value as JsonValue; @@ -8,6 +9,8 @@ use crate::PUBLIC_TOOL_NAME; const MAX_JS_SAFE_INTEGER: u64 = (1_u64 << 53) - 1; const CODE_MODE_ONLY_PREFACE: &str = "Use `exec/wait` tool to run all other tools, do not attempt to use any other tools directly"; +const DEFERRED_NESTED_TOOLS_GUIDANCE: &str = r#"Some nested MCP/app tools may be omitted from this description. They are still available on the global `tools` object and listed in `ALL_TOOLS`. +To find one, filter `ALL_TOOLS` by `name` and `description`; do not print the full `ALL_TOOLS` array. Print only a small set of relevant matches if you need to inspect them."#; const EXEC_DESCRIPTION_TEMPLATE: &str = r#"Run JavaScript code to orchestrate/compose tool calls - Evaluates the provided JavaScript code in a fresh V8 isolate as an async module. - All nested tools are available on the global `tools` object, for example `await tools.exec_command(...)`. Tool names are exposed as normalized JavaScript identifiers, for example `await tools.mcp__ologs__get_profile(...)`. @@ -23,7 +26,7 @@ const EXEC_DESCRIPTION_TEMPLATE: &str = r#"Run JavaScript code to orchestrate/co - Global helpers: - `exit()`: Immediately ends the current script successfully (like an early return from the top level). - `text(value: string | number | boolean | undefined | null)`: Appends a text item. Non-string values are stringified with `JSON.stringify(...)` when possible. -- `image(imageUrlOrItem: string | { image_url: string; detail?: "auto" | "low" | "high" | "original" | null })`: Appends an image item. `image_url` can be an HTTPS URL or a base64-encoded `data:` URL. +- `image(imageUrlOrItem: string | { image_url: string; detail?: "auto" | "low" | "high" | "original" | null } | ImageContent, detail?: "auto" | "low" | "high" | "original" | null)`: Appends an image item. `image_url` can be an HTTPS URL or a base64-encoded `data:` URL. To forward an MCP tool image, pass an individual `ImageContent` block from `result.content`, for example `image(result.content[0])`. MCP image blocks may request original detail with `_meta: { "codex/imageDetail": "original" }`. When provided, the second `detail` argument overrides any detail embedded in the first argument. - `store(key: string, value: any)`: stores a serializable value under a string key for later `exec` calls in the same session. - `load(key: string)`: returns the stored value for a string key, or `undefined` if it is missing. - `notify(value: string | number | boolean | undefined | null)`: immediately injects an extra `custom_tool_call_output` for the current `exec` call. Values are stringified like `text(...)`. @@ -129,6 +132,7 @@ pub enum CodeModeToolKind { #[derive(Clone, Debug, PartialEq)] pub struct ToolDefinition { pub name: String, + pub tool_name: ToolName, pub description: String, pub kind: CodeModeToolKind, pub input_schema: Option, @@ -249,15 +253,19 @@ pub fn build_exec_tool_description( enabled_tools: &[ToolDefinition], namespace_descriptions: &BTreeMap, code_mode_only: bool, + deferred_tools_available: bool, ) -> String { - if !code_mode_only { - return EXEC_DESCRIPTION_TEMPLATE.to_string(); + let mut sections = Vec::new(); + if code_mode_only { + sections.push(CODE_MODE_ONLY_PREFACE.to_string()); + } + sections.push(EXEC_DESCRIPTION_TEMPLATE.to_string()); + if deferred_tools_available { + sections.push(DEFERRED_NESTED_TOOLS_GUIDANCE.to_string()); + } + if !code_mode_only { + return sections.join("\n\n"); } - - let mut sections = vec![ - CODE_MODE_ONLY_PREFACE.to_string(), - EXEC_DESCRIPTION_TEMPLATE.to_string(), - ]; if !enabled_tools.is_empty() { let mut current_namespace: Option<&str> = None; @@ -269,11 +277,15 @@ pub fn build_exec_tool_description( for tool in enabled_tools { let name = tool.name.as_str(); let nested_description = render_code_mode_sample_for_definition(tool); - let next_namespace = namespace_descriptions - .get(name) + let namespace_description = tool + .tool_name + .namespace + .as_ref() + .and_then(|namespace| namespace_descriptions.get(namespace)); + let next_namespace = namespace_description .map(|namespace_description| namespace_description.name.as_str()); if next_namespace != current_namespace { - if let Some(namespace_description) = namespace_descriptions.get(name) { + if let Some(namespace_description) = namespace_description { let namespace_description_text = namespace_description.description.trim(); if !namespace_description_text.is_empty() { nested_tool_sections.push(format!( @@ -346,7 +358,7 @@ pub fn augment_tool_definition(mut definition: ToolDefinition) -> ToolDefinition pub fn enabled_tool_metadata(definition: &ToolDefinition) -> EnabledToolMetadata { EnabledToolMetadata { - tool_name: definition.name.clone(), + tool_name: definition.tool_name.clone(), global_name: normalize_code_mode_identifier(&definition.name), description: definition.description.clone(), kind: definition.kind, @@ -355,7 +367,7 @@ pub fn enabled_tool_metadata(definition: &ToolDefinition) -> EnabledToolMetadata #[derive(Clone, Debug, Eq, PartialEq, Serialize)] pub struct EnabledToolMetadata { - pub tool_name: String, + pub tool_name: ToolName, pub global_name: String, pub description: String, pub kind: CodeModeToolKind, @@ -706,6 +718,7 @@ mod tests { use super::build_exec_tool_description; use super::normalize_code_mode_identifier; use super::parse_exec_source; + use codex_protocol::ToolName; use pretty_assertions::assert_eq; use serde_json::Value as JsonValue; use serde_json::json; @@ -770,6 +783,7 @@ mod tests { fn augment_tool_definition_appends_typed_declaration() { let definition = ToolDefinition { name: "hidden_dynamic_tool".to_string(), + tool_name: ToolName::plain("hidden_dynamic_tool"), description: "Test tool".to_string(), kind: CodeModeToolKind::Function, input_schema: Some(json!({ @@ -798,6 +812,7 @@ mod tests { fn augment_tool_definition_includes_property_descriptions_as_comments() { let definition = ToolDefinition { name: "weather_tool".to_string(), + tool_name: ToolName::plain("weather_tool"), description: "Weather tool".to_string(), kind: CodeModeToolKind::Function, input_schema: Some(json!({ @@ -846,6 +861,7 @@ mod tests { let description = build_exec_tool_description( &[ToolDefinition { name: "foo".to_string(), + tool_name: ToolName::plain("foo"), description: "bar".to_string(), kind: CodeModeToolKind::Function, input_schema: None, @@ -853,6 +869,7 @@ mod tests { }], &BTreeMap::new(), /*code_mode_only*/ true, + /*deferred_tools_available*/ false, ); assert!(description.contains( "### `foo` @@ -862,34 +879,30 @@ bar" #[test] fn exec_description_mentions_timeout_helpers() { - let description = - build_exec_tool_description(&[], &BTreeMap::new(), /*code_mode_only*/ false); + let description = build_exec_tool_description( + &[], + &BTreeMap::new(), + /*code_mode_only*/ false, + /*deferred_tools_available*/ false, + ); assert!(description.contains("`setTimeout(callback: () => void, delayMs?: number)`")); assert!(description.contains("`clearTimeout(timeoutId?: number)`")); } #[test] fn code_mode_only_description_groups_namespace_instructions_once() { - let namespace_descriptions = BTreeMap::from([ - ( - "mcp__sample__alpha".to_string(), - ToolNamespaceDescription { - name: "mcp__sample".to_string(), - description: "Shared namespace guidance.".to_string(), - }, - ), - ( - "mcp__sample__beta".to_string(), - ToolNamespaceDescription { - name: "mcp__sample".to_string(), - description: "Shared namespace guidance.".to_string(), - }, - ), - ]); + let namespace_descriptions = BTreeMap::from([( + "mcp__sample__".to_string(), + ToolNamespaceDescription { + name: "mcp__sample".to_string(), + description: "Shared namespace guidance.".to_string(), + }, + )]); let description = build_exec_tool_description( &[ ToolDefinition { name: "mcp__sample__alpha".to_string(), + tool_name: ToolName::namespaced("mcp__sample__", "alpha"), description: "First tool".to_string(), kind: CodeModeToolKind::Function, input_schema: Some(json!({ @@ -905,6 +918,7 @@ bar" }, ToolDefinition { name: "mcp__sample__beta".to_string(), + tool_name: ToolName::namespaced("mcp__sample__", "beta"), description: "Second tool".to_string(), kind: CodeModeToolKind::Function, input_schema: Some(json!({ @@ -921,6 +935,7 @@ bar" ], &namespace_descriptions, /*code_mode_only*/ true, + /*deferred_tools_available*/ false, ); assert_eq!(description.matches("## mcp__sample").count(), 1); assert!(description.contains("## mcp__sample\nShared namespace guidance.")); @@ -935,7 +950,7 @@ bar" #[test] fn code_mode_only_description_omits_empty_namespace_sections() { let namespace_descriptions = BTreeMap::from([( - "mcp__sample__alpha".to_string(), + "mcp__sample__".to_string(), ToolNamespaceDescription { name: "mcp__sample".to_string(), description: String::new(), @@ -944,6 +959,7 @@ bar" let description = build_exec_tool_description( &[ToolDefinition { name: "mcp__sample__alpha".to_string(), + tool_name: ToolName::namespaced("mcp__sample__", "alpha"), description: "First tool".to_string(), kind: CodeModeToolKind::Function, input_schema: Some(json!({ @@ -959,6 +975,7 @@ bar" }], &namespace_descriptions, /*code_mode_only*/ true, + /*deferred_tools_available*/ false, ); assert!(!description.contains("## mcp__sample")); @@ -969,6 +986,7 @@ bar" fn code_mode_only_description_renders_shared_mcp_types_once() { let first_tool = augment_tool_definition(ToolDefinition { name: "mcp__sample__alpha".to_string(), + tool_name: ToolName::namespaced("mcp__sample__", "alpha"), description: "First tool".to_string(), kind: CodeModeToolKind::Function, input_schema: Some(json!({ @@ -1002,6 +1020,7 @@ bar" }); let second_tool = augment_tool_definition(ToolDefinition { name: "mcp__sample__beta".to_string(), + tool_name: ToolName::namespaced("mcp__sample__", "beta"), description: "Second tool".to_string(), kind: CodeModeToolKind::Function, input_schema: Some(json!({ @@ -1038,6 +1057,7 @@ bar" &[ ToolDefinition { name: first_tool.name, + tool_name: first_tool.tool_name, description: "First tool".to_string(), kind: first_tool.kind, input_schema: first_tool.input_schema, @@ -1045,6 +1065,7 @@ bar" }, ToolDefinition { name: second_tool.name, + tool_name: second_tool.tool_name, description: "Second tool".to_string(), kind: second_tool.kind, input_schema: second_tool.input_schema, @@ -1053,6 +1074,7 @@ bar" ], &BTreeMap::new(), /*code_mode_only*/ true, + /*deferred_tools_available*/ false, ); assert_eq!( @@ -1063,4 +1085,17 @@ bar" ); assert_eq!(description.matches("Shared MCP Types:").count(), 1); } + + #[test] + fn exec_description_mentions_deferred_nested_tools_when_available() { + let description = build_exec_tool_description( + &[], + &BTreeMap::new(), + /*code_mode_only*/ false, + /*deferred_tools_available*/ true, + ); + + assert!(description.contains("Some nested MCP/app tools may be omitted")); + assert!(description.contains("filter `ALL_TOOLS` by `name` and `description`")); + } } diff --git a/codex-rs/code-mode/src/runtime/callbacks.rs b/codex-rs/code-mode/src/runtime/callbacks.rs index 5511baca23..a9755f6eb0 100644 --- a/codex-rs/code-mode/src/runtime/callbacks.rs +++ b/codex-rs/code-mode/src/runtime/callbacks.rs @@ -15,7 +15,13 @@ pub(super) fn tool_callback( args: v8::FunctionCallbackArguments, mut retval: v8::ReturnValue, ) { - let tool_name = args.data().to_rust_string_lossy(scope); + let tool_index = match args.data().to_rust_string_lossy(scope).parse::() { + Ok(tool_index) => tool_index, + Err(_) => { + throw_type_error(scope, "invalid tool callback data"); + return; + } + }; let input = if args.length() == 0 { Ok(None) } else { @@ -36,6 +42,22 @@ pub(super) fn tool_callback( let promise = resolver.get_promise(scope); let resolver = v8::Global::new(scope, resolver); + let tool_name = { + let Some(state) = scope.get_slot::() else { + throw_type_error(scope, "runtime state unavailable"); + return; + }; + let Some(tool_name) = state + .enabled_tools + .get(tool_index) + .map(|tool| tool.tool_name.clone()) + else { + throw_type_error(scope, "tool callback data is out of range"); + return; + }; + tool_name + }; + let Some(state) = scope.get_slot_mut::() else { throw_type_error(scope, "runtime state unavailable"); return; @@ -87,7 +109,20 @@ pub(super) fn image_callback( } else { args.get(0) }; - let image_item = match normalize_output_image(scope, value) { + let detail_override = if args.length() < 2 { + None + } else { + let detail = args.get(1); + if detail.is_string() { + Some(detail.to_rust_string_lossy(scope)) + } else if detail.is_null() || detail.is_undefined() { + None + } else { + throw_type_error(scope, "image detail must be a string when provided"); + return; + } + }; + let image_item = match normalize_output_image(scope, value, detail_override) { Ok(image_item) => image_item, Err(()) => return, }; diff --git a/codex-rs/code-mode/src/runtime/globals.rs b/codex-rs/code-mode/src/runtime/globals.rs index 2d419db908..b40136c44c 100644 --- a/codex-rs/code-mode/src/runtime/globals.rs +++ b/codex-rs/code-mode/src/runtime/globals.rs @@ -53,10 +53,10 @@ fn build_tools_object<'s>( .map(|state| state.enabled_tools.clone()) .unwrap_or_default(); - for tool in enabled_tools { + for (tool_index, tool) in enabled_tools.iter().enumerate() { let name = v8::String::new(scope, &tool.global_name) .ok_or_else(|| "failed to allocate tool name".to_string())?; - let function = tool_function(scope, &tool.tool_name)?; + let function = tool_function(scope, tool_index)?; tools.set(scope, name.into(), function.into()); } Ok(tools) @@ -116,9 +116,9 @@ where fn tool_function<'s>( scope: &mut v8::PinScope<'s, '_>, - tool_name: &str, + tool_index: usize, ) -> Result, String> { - let data = v8::String::new(scope, tool_name) + let data = v8::String::new(scope, &tool_index.to_string()) .ok_or_else(|| "failed to allocate tool callback data".to_string())?; let template = v8::FunctionTemplate::builder(tool_callback) .data(data.into()) diff --git a/codex-rs/code-mode/src/runtime/mod.rs b/codex-rs/code-mode/src/runtime/mod.rs index 411f81bddc..0f50edd329 100644 --- a/codex-rs/code-mode/src/runtime/mod.rs +++ b/codex-rs/code-mode/src/runtime/mod.rs @@ -9,6 +9,7 @@ use std::sync::OnceLock; use std::sync::mpsc as std_mpsc; use std::thread; +use codex_protocol::ToolName; use serde_json::Value as JsonValue; use tokio::sync::mpsc; @@ -62,7 +63,7 @@ pub(crate) enum TurnMessage { ToolCall { cell_id: String, id: String, - name: String, + name: ToolName, input: Option, }, Notify { @@ -87,7 +88,7 @@ pub(crate) enum RuntimeEvent { YieldRequested, ToolCall { id: String, - name: String, + name: ToolName, input: Option, }, Notify { @@ -104,6 +105,8 @@ pub(crate) fn spawn_runtime( request: ExecuteRequest, event_tx: mpsc::UnboundedSender, ) -> Result<(std_mpsc::Sender, v8::IsolateHandle), String> { + initialize_v8()?; + let (command_tx, command_rx) = std_mpsc::channel(); let runtime_command_tx = command_tx.clone(); let (isolate_handle_tx, isolate_handle_rx) = std_mpsc::sync_channel(1); @@ -164,15 +167,20 @@ pub(super) enum CompletionState { }, } -fn initialize_v8() { - static PLATFORM: OnceLock> = OnceLock::new(); +fn initialize_v8() -> Result<(), String> { + static PLATFORM: OnceLock, String>> = OnceLock::new(); - let _ = PLATFORM.get_or_init(|| { + match PLATFORM.get_or_init(|| { + v8::icu::set_common_data_77(deno_core_icudata::ICU_DATA) + .map_err(|error_code| format!("failed to initialize ICU data: {error_code}"))?; let platform = v8::new_default_platform(0, false).make_shared(); v8::V8::initialize_platform(platform.clone()); v8::V8::initialize(); - platform - }); + Ok(platform) + }) { + Ok(_) => Ok(()), + Err(error_text) => Err(error_text.clone()), + } } fn run_runtime( @@ -182,8 +190,6 @@ fn run_runtime( isolate_handle_tx: std_mpsc::SyncSender, runtime_command_tx: std_mpsc::Sender, ) { - initialize_v8(); - let isolate = &mut v8::Isolate::new(v8::CreateParams::default()); let isolate_handle = isolate.thread_safe_handle(); if isolate_handle_tx.send(isolate_handle).is_err() { diff --git a/codex-rs/code-mode/src/runtime/value.rs b/codex-rs/code-mode/src/runtime/value.rs index eb0280142c..5c63434f4f 100644 --- a/codex-rs/code-mode/src/runtime/value.rs +++ b/codex-rs/code-mode/src/runtime/value.rs @@ -3,6 +3,9 @@ use serde_json::Value as JsonValue; use crate::response::FunctionCallOutputContentItem; use crate::response::ImageDetail; +const IMAGE_HELPER_EXPECTS_MESSAGE: &str = "image expects a non-empty image URL string, an object with image_url and optional detail, or a raw MCP image block"; +const CODEX_IMAGE_DETAIL_META_KEY: &str = "codex/imageDetail"; + pub(super) fn serialize_output_text( scope: &mut v8::PinScope<'_, '_>, value: v8::Local<'_, v8::Value>, @@ -34,45 +37,25 @@ pub(super) fn serialize_output_text( pub(super) fn normalize_output_image( scope: &mut v8::PinScope<'_, '_>, value: v8::Local<'_, v8::Value>, + detail_override: Option, ) -> Result { let result = (|| -> Result { let (image_url, detail) = if value.is_string() { (value.to_rust_string_lossy(scope), None) } else if value.is_object() && !value.is_array() { - let object = v8::Local::::try_from(value).map_err(|_| { - "image expects a non-empty image URL string or an object with image_url and optional detail".to_string() - })?; - let image_url_key = v8::String::new(scope, "image_url") - .ok_or_else(|| "failed to allocate image helper keys".to_string())?; - let detail_key = v8::String::new(scope, "detail") - .ok_or_else(|| "failed to allocate image helper keys".to_string())?; - let image_url = object - .get(scope, image_url_key.into()) - .filter(|value| value.is_string()) - .map(|value| value.to_rust_string_lossy(scope)) - .ok_or_else(|| { - "image expects a non-empty image URL string or an object with image_url and optional detail" - .to_string() - })?; - let detail = match object.get(scope, detail_key.into()) { - Some(value) if value.is_string() => Some(value.to_rust_string_lossy(scope)), - Some(value) if value.is_null() || value.is_undefined() => None, - Some(_) => return Err("image detail must be a string when provided".to_string()), - None => None, - }; - (image_url, detail) + let object = v8::Local::::try_from(value) + .map_err(|_| IMAGE_HELPER_EXPECTS_MESSAGE.to_string())?; + if let Some(image) = parse_non_mcp_output_image(scope, object)? { + image + } else { + parse_mcp_output_image(scope, value)? + } } else { - return Err( - "image expects a non-empty image URL string or an object with image_url and optional detail" - .to_string(), - ); + return Err(IMAGE_HELPER_EXPECTS_MESSAGE.to_string()); }; if image_url.is_empty() { - return Err( - "image expects a non-empty image URL string or an object with image_url and optional detail" - .to_string(), - ); + return Err(IMAGE_HELPER_EXPECTS_MESSAGE.to_string()); } let lower = image_url.to_ascii_lowercase(); if !(lower.starts_with("http://") @@ -82,6 +65,7 @@ pub(super) fn normalize_output_image( return Err("image expects an http(s) or data URL".to_string()); } + let detail = detail_override.or(detail); let detail = match detail { Some(detail) => { let normalized = detail.to_ascii_lowercase(); @@ -112,6 +96,86 @@ pub(super) fn normalize_output_image( } } +fn parse_non_mcp_output_image( + scope: &mut v8::PinScope<'_, '_>, + object: v8::Local<'_, v8::Object>, +) -> Result)>, String> { + let image_url_key = v8::String::new(scope, "image_url") + .ok_or_else(|| "failed to allocate image helper keys".to_string())?; + let Some(image_url) = object.get(scope, image_url_key.into()) else { + return Ok(None); + }; + if image_url.is_undefined() { + return Ok(None); + } + if !image_url.is_string() { + return Err(IMAGE_HELPER_EXPECTS_MESSAGE.to_string()); + } + let detail_key = v8::String::new(scope, "detail") + .ok_or_else(|| "failed to allocate image helper keys".to_string())?; + let detail = parse_image_detail_value(scope, object.get(scope, detail_key.into()))?; + Ok(Some((image_url.to_rust_string_lossy(scope), detail))) +} + +fn parse_mcp_output_image( + scope: &mut v8::PinScope<'_, '_>, + value: v8::Local<'_, v8::Value>, +) -> Result<(String, Option), String> { + let Some(result) = v8_value_to_json(scope, value)? else { + return Err(IMAGE_HELPER_EXPECTS_MESSAGE.to_string()); + }; + let JsonValue::Object(result) = result else { + return Err(IMAGE_HELPER_EXPECTS_MESSAGE.to_string()); + }; + let Some(item_type) = result.get("type").and_then(JsonValue::as_str) else { + return Err(IMAGE_HELPER_EXPECTS_MESSAGE.to_string()); + }; + if item_type != "image" { + return Err(format!( + "image only accepts MCP image blocks, got \"{item_type}\"" + )); + } + let data = result + .get("data") + .and_then(JsonValue::as_str) + .ok_or_else(|| "image expected MCP image data".to_string())?; + if data.is_empty() { + return Err("image expected MCP image data".to_string()); + } + + let image_url = if data.to_ascii_lowercase().starts_with("data:") { + data.to_string() + } else { + let mime_type = result + .get("mimeType") + .or_else(|| result.get("mime_type")) + .and_then(JsonValue::as_str) + .filter(|mime_type| !mime_type.is_empty()) + .unwrap_or("application/octet-stream"); + format!("data:{mime_type};base64,{data}") + }; + let detail = result + .get("_meta") + .and_then(JsonValue::as_object) + .and_then(|meta| meta.get(CODEX_IMAGE_DETAIL_META_KEY)) + .and_then(JsonValue::as_str) + .filter(|detail| *detail == "original") + .map(str::to_string); + Ok((image_url, detail)) +} + +fn parse_image_detail_value<'s>( + scope: &mut v8::PinScope<'s, '_>, + value: Option>, +) -> Result, String> { + match value { + Some(value) if value.is_string() => Ok(Some(value.to_rust_string_lossy(scope))), + Some(value) if value.is_null() || value.is_undefined() => Ok(None), + Some(_) => Err("image detail must be a string when provided".to_string()), + None => Ok(None), + } +} + pub(super) fn v8_value_to_json( scope: &mut v8::PinScope<'_, '_>, value: v8::Local<'_, v8::Value>, diff --git a/codex-rs/code-mode/src/service.rs b/codex-rs/code-mode/src/service.rs index 5b67dd17b8..23ca7a7460 100644 --- a/codex-rs/code-mode/src/service.rs +++ b/codex-rs/code-mode/src/service.rs @@ -5,6 +5,7 @@ use std::sync::atomic::Ordering; use std::time::Duration; use async_trait::async_trait; +use codex_protocol::ToolName; use serde_json::Value as JsonValue; use tokio::sync::Mutex; use tokio::sync::mpsc; @@ -26,7 +27,7 @@ use crate::runtime::spawn_runtime; pub trait CodeModeTurnHost: Send + Sync { async fn invoke_tool( &self, - tool_name: String, + tool_name: ToolName, input: Option, cancellation_token: CancellationToken, ) -> Result; @@ -561,6 +562,85 @@ mod tests { ); } + #[tokio::test] + async fn date_locale_string_formats_with_icu_data() { + let service = CodeModeService::new(); + + let response = service + .execute(ExecuteRequest { + source: r#" +const value = new Date("2025-01-02T03:04:05Z") + .toLocaleString("fr-FR", { + weekday: "long", + month: "long", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + hour12: false, + timeZone: "UTC", + }); +text(value); +"# + .to_string(), + yield_time_ms: None, + ..execute_request("") + }) + .await + .unwrap(); + + assert_eq!( + response, + RuntimeResponse::Result { + cell_id: "1".to_string(), + content_items: vec![FunctionCallOutputContentItem::InputText { + text: "jeudi 2 janvier \u{e0} 03:04:05".to_string(), + }], + stored_values: HashMap::new(), + error_text: None, + } + ); + } + + #[tokio::test] + async fn intl_date_time_format_formats_with_icu_data() { + let service = CodeModeService::new(); + + let response = service + .execute(ExecuteRequest { + source: r#" +const formatter = new Intl.DateTimeFormat("fr-FR", { + weekday: "long", + month: "long", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + hour12: false, + timeZone: "UTC", +}); +text(formatter.format(new Date("2025-01-02T03:04:05Z"))); +"# + .to_string(), + yield_time_ms: None, + ..execute_request("") + }) + .await + .unwrap(); + + assert_eq!( + response, + RuntimeResponse::Result { + cell_id: "1".to_string(), + content_items: vec![FunctionCallOutputContentItem::InputText { + text: "jeudi 2 janvier \u{e0} 03:04:05".to_string(), + }], + stored_values: HashMap::new(), + error_text: None, + } + ); + } + #[tokio::test] async fn output_helpers_return_undefined() { let service = CodeModeService::new(); @@ -604,6 +684,154 @@ text(JSON.stringify(returnsUndefined)); ); } + #[tokio::test] + async fn image_helper_accepts_raw_mcp_image_block_with_original_detail() { + let service = CodeModeService::new(); + + let response = service + .execute(ExecuteRequest { + source: r#" +image({ + type: "image", + data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR4nGP4z8DwHwAFAAH/iZk9HQAAAABJRU5ErkJggg==", + mimeType: "image/png", + _meta: { "codex/imageDetail": "original" }, +}); +"# + .to_string(), + yield_time_ms: None, + ..execute_request("") + }) + .await + .unwrap(); + + assert_eq!( + response, + RuntimeResponse::Result { + cell_id: "1".to_string(), + content_items: vec![FunctionCallOutputContentItem::InputImage { + image_url: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR4nGP4z8DwHwAFAAH/iZk9HQAAAABJRU5ErkJggg==".to_string(), + detail: Some(crate::ImageDetail::Original), + }], + stored_values: HashMap::new(), + error_text: None, + } + ); + } + + #[tokio::test] + async fn image_helper_second_arg_overrides_explicit_object_detail() { + let service = CodeModeService::new(); + + let response = service + .execute(ExecuteRequest { + source: r#" +image( + { + image_url: "https://example.com/image.jpg", + detail: "low", + }, + "original", +); +"# + .to_string(), + yield_time_ms: None, + ..execute_request("") + }) + .await + .unwrap(); + + assert_eq!( + response, + RuntimeResponse::Result { + cell_id: "1".to_string(), + content_items: vec![FunctionCallOutputContentItem::InputImage { + image_url: "https://example.com/image.jpg".to_string(), + detail: Some(crate::ImageDetail::Original), + }], + stored_values: HashMap::new(), + error_text: None, + } + ); + } + + #[tokio::test] + async fn image_helper_second_arg_overrides_raw_mcp_image_detail() { + let service = CodeModeService::new(); + + let response = service + .execute(ExecuteRequest { + source: r#" +image( + { + type: "image", + data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR4nGP4z8DwHwAFAAH/iZk9HQAAAABJRU5ErkJggg==", + mimeType: "image/png", + _meta: { "codex/imageDetail": "original" }, + }, + "low", +); +"# + .to_string(), + yield_time_ms: None, + ..execute_request("") + }) + .await + .unwrap(); + + assert_eq!( + response, + RuntimeResponse::Result { + cell_id: "1".to_string(), + content_items: vec![FunctionCallOutputContentItem::InputImage { + image_url: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR4nGP4z8DwHwAFAAH/iZk9HQAAAABJRU5ErkJggg==".to_string(), + detail: Some(crate::ImageDetail::Low), + }], + stored_values: HashMap::new(), + error_text: None, + } + ); + } + + #[tokio::test] + async fn image_helper_rejects_raw_mcp_result_container() { + let service = CodeModeService::new(); + + let response = service + .execute(ExecuteRequest { + source: r#" +image({ + content: [ + { + type: "image", + data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR4nGP4z8DwHwAFAAH/iZk9HQAAAABJRU5ErkJggg==", + mimeType: "image/png", + _meta: { "codex/imageDetail": "original" }, + }, + ], + isError: false, +}); +"# + .to_string(), + yield_time_ms: None, + ..execute_request("") + }) + .await + .unwrap(); + + assert_eq!( + response, + RuntimeResponse::Result { + cell_id: "1".to_string(), + content_items: Vec::new(), + stored_values: HashMap::new(), + error_text: Some( + "image expects a non-empty image URL string, an object with image_url and optional detail, or a raw MCP image block".to_string(), + ), + } + ); + } + #[tokio::test] async fn terminate_waits_for_runtime_shutdown_before_responding() { let inner = test_inner(); diff --git a/codex-rs/codex-api/src/api_bridge.rs b/codex-rs/codex-api/src/api_bridge.rs index 0ad2b13979..2677ff3e55 100644 --- a/codex-rs/codex-api/src/api_bridge.rs +++ b/codex-rs/codex-api/src/api_bridge.rs @@ -12,6 +12,7 @@ use codex_protocol::error::RetryLimitReachedError; use codex_protocol::error::UnexpectedResponseError; use codex_protocol::error::UsageLimitReachedError; use http::HeaderMap; +use http::HeaderValue; use serde::Deserialize; use serde_json::Value; @@ -178,6 +179,7 @@ struct UsageErrorBody { pub struct CoreAuthProvider { pub token: Option, pub account_id: Option, + pub is_fedramp_account: bool, } impl CoreAuthProvider { @@ -195,16 +197,25 @@ impl CoreAuthProvider { Self { token: token.map(str::to_string), account_id: account_id.map(str::to_string), + is_fedramp_account: false, } } } impl ApiAuthProvider for CoreAuthProvider { - fn bearer_token(&self) -> Option { - self.token.clone() - } - - fn account_id(&self) -> Option { - self.account_id.clone() + fn add_auth_headers(&self, headers: &mut HeaderMap) { + if let Some(token) = self.token.as_ref() + && let Ok(header) = HeaderValue::from_str(&format!("Bearer {token}")) + { + let _ = headers.insert(http::header::AUTHORIZATION, header); + } + if let Some(account_id) = self.account_id.as_ref() + && let Ok(header) = HeaderValue::from_str(account_id) + { + let _ = headers.insert("ChatGPT-Account-ID", header); + } + if self.is_fedramp_account { + crate::auth::add_fedramp_routing_header(headers); + } } } diff --git a/codex-rs/codex-api/src/api_bridge_tests.rs b/codex-rs/codex-api/src/api_bridge_tests.rs index 71d3889915..c7d4bbbdab 100644 --- a/codex-rs/codex-api/src/api_bridge_tests.rs +++ b/codex-rs/codex-api/src/api_bridge_tests.rs @@ -136,8 +136,49 @@ fn core_auth_provider_reports_when_auth_header_will_attach() { let auth = CoreAuthProvider { token: Some("access-token".to_string()), account_id: None, + is_fedramp_account: false, }; assert!(auth.auth_header_attached()); assert_eq!(auth.auth_header_name(), Some("authorization")); } + +#[test] +fn core_auth_provider_adds_auth_headers() { + let auth = CoreAuthProvider::for_test(Some("access-token"), Some("workspace-123")); + let mut headers = HeaderMap::new(); + + crate::AuthProvider::add_auth_headers(&auth, &mut headers); + + assert_eq!( + headers + .get(http::header::AUTHORIZATION) + .and_then(|value| value.to_str().ok()), + Some("Bearer access-token") + ); + assert_eq!( + headers + .get("ChatGPT-Account-ID") + .and_then(|value| value.to_str().ok()), + Some("workspace-123") + ); +} + +#[test] +fn core_auth_provider_adds_fedramp_routing_header_for_fedramp_accounts() { + let auth = CoreAuthProvider { + token: Some("access-token".to_string()), + account_id: Some("workspace-123".to_string()), + is_fedramp_account: true, + }; + let mut headers = HeaderMap::new(); + + crate::AuthProvider::add_auth_headers(&auth, &mut headers); + + assert_eq!( + headers + .get("X-OpenAI-Fedramp") + .and_then(|value| value.to_str().ok()), + Some("true") + ); +} diff --git a/codex-rs/codex-api/src/auth.rs b/codex-rs/codex-api/src/auth.rs index f649062db1..efa5fb3288 100644 --- a/codex-rs/codex-api/src/auth.rs +++ b/codex-rs/codex-api/src/auth.rs @@ -1,33 +1,34 @@ -use codex_client::Request; use http::HeaderMap; use http::HeaderValue; -/// Provides bearer and account identity information for API requests. +/// Adds authentication headers to API requests. /// /// Implementations should be cheap and non-blocking; any asynchronous /// refresh or I/O should be handled by higher layers before requests /// reach this interface. pub trait AuthProvider: Send + Sync { - fn bearer_token(&self) -> Option; - fn account_id(&self) -> Option { - None - } + fn add_auth_headers(&self, headers: &mut HeaderMap); } -pub(crate) fn add_auth_headers_to_header_map(auth: &A, headers: &mut HeaderMap) { - if let Some(token) = auth.bearer_token() - && let Ok(header) = HeaderValue::from_str(&format!("Bearer {token}")) - { - let _ = headers.insert(http::header::AUTHORIZATION, header); - } - if let Some(account_id) = auth.account_id() - && let Ok(header) = HeaderValue::from_str(&account_id) - { - let _ = headers.insert("ChatGPT-Account-ID", header); - } +pub(crate) fn add_fedramp_routing_header(headers: &mut HeaderMap) { + headers.insert("X-OpenAI-Fedramp", HeaderValue::from_static("true")); } -pub(crate) fn add_auth_headers(auth: &A, mut req: Request) -> Request { - add_auth_headers_to_header_map(auth, &mut req.headers); - req +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn add_fedramp_routing_header_sets_header() { + let mut headers = HeaderMap::new(); + + add_fedramp_routing_header(&mut headers); + + assert_eq!( + headers + .get("X-OpenAI-Fedramp") + .and_then(|v| v.to_str().ok()), + Some("true") + ); + } } diff --git a/codex-rs/codex-api/src/common.rs b/codex-rs/codex-api/src/common.rs index ef4dd5fb83..d088206496 100644 --- a/codex-rs/codex-api/src/common.rs +++ b/codex-rs/codex-api/src/common.rs @@ -80,6 +80,11 @@ pub enum ResponseEvent { token_usage: Option, }, OutputTextDelta(String), + ToolCallInputDelta { + item_id: String, + call_id: Option, + delta: String, + }, ReasoningSummaryDelta { delta: String, summary_index: i64, diff --git a/codex-rs/codex-api/src/endpoint/compact.rs b/codex-rs/codex-api/src/endpoint/compact.rs index 44a56a11a7..748ac35558 100644 --- a/codex-rs/codex-api/src/endpoint/compact.rs +++ b/codex-rs/codex-api/src/endpoint/compact.rs @@ -90,9 +90,7 @@ mod tests { struct DummyAuth; impl AuthProvider for DummyAuth { - fn bearer_token(&self) -> Option { - None - } + fn add_auth_headers(&self, _headers: &mut HeaderMap) {} } #[test] diff --git a/codex-rs/codex-api/src/endpoint/memories.rs b/codex-rs/codex-api/src/endpoint/memories.rs index 5cb2a65b1f..3047c859db 100644 --- a/codex-rs/codex-api/src/endpoint/memories.rs +++ b/codex-rs/codex-api/src/endpoint/memories.rs @@ -103,9 +103,7 @@ mod tests { struct DummyAuth; impl AuthProvider for DummyAuth { - fn bearer_token(&self) -> Option { - None - } + fn add_auth_headers(&self, _headers: &mut HeaderMap) {} } #[derive(Clone)] diff --git a/codex-rs/codex-api/src/endpoint/mod.rs b/codex-rs/codex-api/src/endpoint/mod.rs index 4a208317a9..c16687ff28 100644 --- a/codex-rs/codex-api/src/endpoint/mod.rs +++ b/codex-rs/codex-api/src/endpoint/mod.rs @@ -13,6 +13,7 @@ pub use models::ModelsClient; pub use realtime_call::RealtimeCallClient; pub use realtime_call::RealtimeCallResponse; pub use realtime_websocket::RealtimeEventParser; +pub use realtime_websocket::RealtimeOutputModality; pub use realtime_websocket::RealtimeSessionConfig; pub use realtime_websocket::RealtimeSessionMode; pub use realtime_websocket::RealtimeWebsocketClient; diff --git a/codex-rs/codex-api/src/endpoint/models.rs b/codex-rs/codex-api/src/endpoint/models.rs index 97781ac419..17342d6f99 100644 --- a/codex-rs/codex-api/src/endpoint/models.rs +++ b/codex-rs/codex-api/src/endpoint/models.rs @@ -132,9 +132,7 @@ mod tests { struct DummyAuth; impl AuthProvider for DummyAuth { - fn bearer_token(&self) -> Option { - None - } + fn add_auth_headers(&self, _headers: &mut HeaderMap) {} } fn provider(base_url: &str) -> Provider { diff --git a/codex-rs/codex-api/src/endpoint/realtime_call.rs b/codex-rs/codex-api/src/endpoint/realtime_call.rs index 8a68d088c7..a9a8b963cf 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_call.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_call.rs @@ -19,6 +19,7 @@ use serde_json::to_string; use serde_json::to_value; use std::sync::Arc; use tracing::instrument; +use tracing::trace; const MULTIPART_BOUNDARY: &str = "codex-realtime-call-boundary"; const MULTIPART_CONTENT_TYPE: &str = "multipart/form-data; boundary=codex-realtime-call-boundary"; @@ -118,6 +119,7 @@ impl RealtimeCallClient { session_config: RealtimeSessionConfig, extra_headers: HeaderMap, ) -> Result { + trace!(target: "codex_api::realtime_websocket::wire", "realtime call request SDP: {sdp}"); // WebRTC can begin inference as soon as the peer connection comes up, so the initial // session payload is sent with call creation. The sideband WebSocket still sends its normal // session.update after it joins. @@ -200,6 +202,7 @@ fn decode_call_id_from_location(headers: &HeaderMap) -> Result .ok_or_else(|| ApiError::Stream("realtime call response missing Location".to_string()))? .to_str() .map_err(|err| ApiError::Stream(format!("invalid realtime call Location: {err}")))?; + trace!("realtime call Location: {location}"); location .split('?') @@ -219,6 +222,7 @@ fn decode_call_id_from_location(headers: &HeaderMap) -> Result mod tests { use super::*; use crate::endpoint::realtime_websocket::RealtimeEventParser; + use crate::endpoint::realtime_websocket::RealtimeOutputModality; use crate::endpoint::realtime_websocket::RealtimeSessionMode; use crate::provider::RetryConfig; use async_trait::async_trait; @@ -280,8 +284,11 @@ mod tests { struct DummyAuth; impl AuthProvider for DummyAuth { - fn bearer_token(&self) -> Option { - Some("test-token".to_string()) + fn add_auth_headers(&self, headers: &mut HeaderMap) { + headers.insert( + http::header::AUTHORIZATION, + HeaderValue::from_static("Bearer test-token"), + ); } } @@ -309,6 +316,7 @@ mod tests { session_id: Some(session_id.to_string()), event_parser: RealtimeEventParser::RealtimeV2, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Marin, } } diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs index a2681f4969..29de90d403 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs @@ -7,9 +7,9 @@ use crate::endpoint::realtime_websocket::protocol::RealtimeAudioFrame; use crate::endpoint::realtime_websocket::protocol::RealtimeEvent; use crate::endpoint::realtime_websocket::protocol::RealtimeEventParser; use crate::endpoint::realtime_websocket::protocol::RealtimeOutboundMessage; +use crate::endpoint::realtime_websocket::protocol::RealtimeOutputModality; use crate::endpoint::realtime_websocket::protocol::RealtimeSessionConfig; use crate::endpoint::realtime_websocket::protocol::RealtimeSessionMode; -use crate::endpoint::realtime_websocket::protocol::RealtimeTranscriptDelta; use crate::endpoint::realtime_websocket::protocol::RealtimeTranscriptEntry; use crate::endpoint::realtime_websocket::protocol::RealtimeVoice; use crate::endpoint::realtime_websocket::protocol::parse_realtime_event; @@ -17,6 +17,7 @@ use crate::error::ApiError; use crate::provider::Provider; use codex_client::backoff; use codex_client::maybe_build_rustls_client_config_with_custom_ca; +use codex_protocol::protocol::RealtimeTranscriptDelta; use codex_utils_rustls_provider::ensure_rustls_crypto_provider; use futures::SinkExt; use futures::StreamExt; @@ -44,6 +45,8 @@ use tracing::warn; use tungstenite::protocol::WebSocketConfig; use url::Url; +const REALTIME_WIRE_LOG_TARGET: &str = "codex_api::realtime_websocket::wire"; + struct WsStream { tx_command: mpsc::Sender, pump_task: tokio::task::JoinHandle<()>, @@ -307,10 +310,17 @@ impl RealtimeWebsocketWriter { &self, instructions: String, session_mode: RealtimeSessionMode, + output_modality: RealtimeOutputModality, voice: RealtimeVoice, ) -> Result<(), ApiError> { let session_mode = normalized_session_mode(self.event_parser, session_mode); - let session = session_update_session(self.event_parser, instructions, session_mode, voice); + let session = session_update_session( + self.event_parser, + instructions, + session_mode, + output_modality, + voice, + ); self.send_json(&RealtimeOutboundMessage::SessionUpdate { session }) .await } @@ -343,6 +353,7 @@ impl RealtimeWebsocketWriter { )); } + trace!(target: REALTIME_WIRE_LOG_TARGET, "realtime websocket request: {payload}"); self.stream .send(Message::Text(payload.into())) .await @@ -376,6 +387,7 @@ impl RealtimeWebsocketEvents { match msg { Message::Text(text) => { + trace!(target: REALTIME_WIRE_LOG_TARGET, "realtime websocket event: {text}"); if let Some(mut event) = parse_realtime_event(&text, self.event_parser) { self.update_active_transcript(&mut event).await; debug!(?event, "realtime websocket parsed event"); @@ -406,10 +418,10 @@ impl RealtimeWebsocketEvents { let mut active_transcript = self.active_transcript.lock().await; match event { RealtimeEvent::InputAudioSpeechStarted(_) => {} - RealtimeEvent::InputTranscriptDelta(RealtimeTranscriptDelta { delta }) => { + RealtimeEvent::InputTranscriptDelta(RealtimeTranscriptDelta { delta, .. }) => { append_transcript_delta(&mut active_transcript.entries, "user", delta); } - RealtimeEvent::OutputTranscriptDelta(RealtimeTranscriptDelta { delta }) => { + RealtimeEvent::OutputTranscriptDelta(RealtimeTranscriptDelta { delta, .. }) => { append_transcript_delta(&mut active_transcript.entries, "assistant", delta); } RealtimeEvent::HandoffRequested(handoff) => { @@ -418,6 +430,8 @@ impl RealtimeWebsocketEvents { } } RealtimeEvent::SessionUpdated { .. } + | RealtimeEvent::InputTranscriptDone(_) + | RealtimeEvent::OutputTranscriptDone(_) | RealtimeEvent::AudioOut(_) | RealtimeEvent::ResponseCreated(_) | RealtimeEvent::ResponseCancelled(_) @@ -581,7 +595,12 @@ impl RealtimeWebsocketClient { ); connection .writer - .send_session_update(config.instructions, config.session_mode, config.voice) + .send_session_update( + config.instructions, + config.session_mode, + config.output_modality, + config.voice, + ) .await?; Ok(connection) } @@ -721,13 +740,14 @@ fn normalize_realtime_path(url: &mut Url) { #[cfg(test)] mod tests { use super::*; - use crate::endpoint::realtime_websocket::protocol::RealtimeTranscriptDelta; use crate::endpoint::realtime_websocket::protocol::RealtimeTranscriptEntry; use codex_protocol::protocol::RealtimeHandoffRequested; use codex_protocol::protocol::RealtimeInputAudioSpeechStarted; use codex_protocol::protocol::RealtimeResponseCancelled; use codex_protocol::protocol::RealtimeResponseCreated; use codex_protocol::protocol::RealtimeResponseDone; + use codex_protocol::protocol::RealtimeTranscriptDelta; + use codex_protocol::protocol::RealtimeTranscriptDone; use codex_protocol::protocol::RealtimeVoice; use http::HeaderValue; use pretty_assertions::assert_eq; @@ -894,6 +914,8 @@ mod tests { fn parse_realtime_v2_input_audio_transcription_delta_event() { let payload = json!({ "type": "conversation.item.input_audio_transcription.delta", + "item_id": "item_input_1", + "content_index": 0, "delta": "hello" }) .to_string(); @@ -908,6 +930,32 @@ mod tests { ); } + #[test] + fn parse_realtime_v2_item_done_output_text_event() { + let payload = json!({ + "type": "conversation.item.done", + "item": { + "id": "item_output_1", + "type": "message", + "role": "assistant", + "content": [ + {"type": "output_text", "text": "hello"}, + {"type": "output_text", "text": " world"} + ] + } + }) + .to_string(); + + assert_eq!( + parse_realtime_event(payload.as_str(), RealtimeEventParser::RealtimeV2), + Some(RealtimeEvent::OutputTranscriptDone( + RealtimeTranscriptDone { + text: "hello world".to_string(), + } + )) + ); + } + #[test] fn parse_realtime_v2_output_audio_delta_defaults_audio_shape() { let payload = json!({ @@ -1374,6 +1422,7 @@ mod tests { session_id: Some("conv_1".to_string()), event_parser: RealtimeEventParser::V1, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Breeze, }, HeaderMap::new(), @@ -1539,6 +1588,7 @@ mod tests { "type": "server_vad", "interrupt_response": true, "create_response": true, + "silence_duration_ms": 500, }) ); assert_eq!( @@ -1621,7 +1671,7 @@ mod tests { ); assert_eq!( third_json["item"]["output"], - Value::String("\"Agent Final Message\":\n\ndelegated result".to_string()) + Value::String("delegated result".to_string()) ); }); @@ -1648,6 +1698,7 @@ mod tests { session_id: Some("conv_1".to_string()), event_parser: RealtimeEventParser::RealtimeV2, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Cedar, }, HeaderMap::new(), @@ -1753,6 +1804,7 @@ mod tests { session_id: Some("conv_1".to_string()), event_parser: RealtimeEventParser::RealtimeV2, session_mode: RealtimeSessionMode::Transcription, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Marin, }, HeaderMap::new(), @@ -1856,6 +1908,7 @@ mod tests { session_id: Some("conv_1".to_string()), event_parser: RealtimeEventParser::V1, session_mode: RealtimeSessionMode::Transcription, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Cove, }, HeaderMap::new(), @@ -1945,6 +1998,7 @@ mod tests { session_id: Some("conv_1".to_string()), event_parser: RealtimeEventParser::V1, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Cove, }, HeaderMap::new(), diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_common.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_common.rs index 8eb079fe83..c0efcce165 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_common.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_common.rs @@ -8,6 +8,7 @@ use crate::endpoint::realtime_websocket::methods_v2::session_update_session as v use crate::endpoint::realtime_websocket::methods_v2::websocket_intent as v2_websocket_intent; use crate::endpoint::realtime_websocket::protocol::RealtimeEventParser; use crate::endpoint::realtime_websocket::protocol::RealtimeOutboundMessage; +use crate::endpoint::realtime_websocket::protocol::RealtimeOutputModality; use crate::endpoint::realtime_websocket::protocol::RealtimeSessionConfig; use crate::endpoint::realtime_websocket::protocol::RealtimeSessionMode; use crate::endpoint::realtime_websocket::protocol::RealtimeVoice; @@ -44,9 +45,11 @@ pub(super) fn conversation_handoff_append_message( handoff_id: String, output_text: String, ) -> RealtimeOutboundMessage { - let output_text = format!("{AGENT_FINAL_MESSAGE_PREFIX}{output_text}"); match event_parser { - RealtimeEventParser::V1 => v1_conversation_handoff_append_message(handoff_id, output_text), + RealtimeEventParser::V1 => v1_conversation_handoff_append_message( + handoff_id, + format!("{AGENT_FINAL_MESSAGE_PREFIX}{output_text}"), + ), RealtimeEventParser::RealtimeV2 => { v2_conversation_handoff_append_message(handoff_id, output_text) } @@ -57,13 +60,14 @@ pub(super) fn session_update_session( event_parser: RealtimeEventParser, instructions: String, session_mode: RealtimeSessionMode, + output_modality: RealtimeOutputModality, voice: RealtimeVoice, ) -> SessionUpdateSession { let session_mode = normalized_session_mode(event_parser, session_mode); match event_parser { RealtimeEventParser::V1 => v1_session_update_session(instructions, voice), RealtimeEventParser::RealtimeV2 => { - v2_session_update_session(instructions, session_mode, voice) + v2_session_update_session(instructions, session_mode, output_modality, voice) } } } @@ -73,6 +77,7 @@ pub fn session_update_session_json(config: RealtimeSessionConfig) -> JsonResult< config.event_parser, config.instructions, config.session_mode, + config.output_modality, config.voice, ); session.id = config.session_id; diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_v2.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_v2.rs index cfca6fce61..32f23a970b 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_v2.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods_v2.rs @@ -9,6 +9,7 @@ use crate::endpoint::realtime_websocket::protocol::ConversationMessageItem; use crate::endpoint::realtime_websocket::protocol::ConversationRole; use crate::endpoint::realtime_websocket::protocol::NoiseReductionType; use crate::endpoint::realtime_websocket::protocol::RealtimeOutboundMessage; +use crate::endpoint::realtime_websocket::protocol::RealtimeOutputModality; use crate::endpoint::realtime_websocket::protocol::RealtimeSessionMode; use crate::endpoint::realtime_websocket::protocol::RealtimeVoice; use crate::endpoint::realtime_websocket::protocol::SessionAudio; @@ -26,9 +27,10 @@ use crate::endpoint::realtime_websocket::protocol::TurnDetectionType; use serde_json::json; const REALTIME_V2_OUTPUT_MODALITY_AUDIO: &str = "audio"; +const REALTIME_V2_OUTPUT_MODALITY_TEXT: &str = "text"; const REALTIME_V2_TOOL_CHOICE: &str = "auto"; const REALTIME_V2_BACKGROUND_AGENT_TOOL_NAME: &str = "background_agent"; -const REALTIME_V2_BACKGROUND_AGENT_TOOL_DESCRIPTION: &str = "Send a user request to the background agent. Use this as the default action. If the background agent is idle, this starts a new task and returns the final result to the user. If the background agent is already working on a task, this sends the request as guidance to steer that previous task. If the user asks to do something next, later, after this, or once current work finishes, call this tool so the work is actually queued instead of merely promising to do it later."; +const REALTIME_V2_BACKGROUND_AGENT_TOOL_DESCRIPTION: &str = "Send a user request to the background agent. Use this as the default action. Do not rephrase the user's ask or rewrite it in your own words; pass along the user's own words. If the background agent is idle, this starts a new task and returns the final result to the user. If the background agent is already working on a task, this sends the request as guidance to steer that previous task. If the user asks to do something next, later, after this, or once current work finishes, call this tool so the work is actually queued instead of merely promising to do it later."; pub(super) fn conversation_item_create_message(text: String) -> RealtimeOutboundMessage { RealtimeOutboundMessage::ConversationItemCreate { @@ -59,6 +61,7 @@ pub(super) fn conversation_handoff_append_message( pub(super) fn session_update_session( instructions: String, session_mode: RealtimeSessionMode, + output_modality: RealtimeOutputModality, voice: RealtimeVoice, ) -> SessionUpdateSession { match session_mode { @@ -67,7 +70,7 @@ pub(super) fn session_update_session( r#type: SessionType::Realtime, model: None, instructions: Some(instructions), - output_modalities: Some(vec![REALTIME_V2_OUTPUT_MODALITY_AUDIO.to_string()]), + output_modalities: Some(vec![output_modality_value(output_modality).to_string()]), audio: SessionAudio { input: SessionAudioInput { format: SessionAudioFormat { @@ -81,6 +84,7 @@ pub(super) fn session_update_session( r#type: TurnDetectionType::ServerVad, interrupt_response: true, create_response: true, + silence_duration_ms: 500, }), }, output: Some(SessionAudioOutput { @@ -132,6 +136,13 @@ pub(super) fn session_update_session( } } +fn output_modality_value(output_modality: RealtimeOutputModality) -> &'static str { + match output_modality { + RealtimeOutputModality::Text => REALTIME_V2_OUTPUT_MODALITY_TEXT, + RealtimeOutputModality::Audio => REALTIME_V2_OUTPUT_MODALITY_AUDIO, + } +} + pub(super) fn websocket_intent() -> Option<&'static str> { None } diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs index 4031e01286..1fb49b2436 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs @@ -13,5 +13,6 @@ pub use methods::RealtimeWebsocketEvents; pub use methods::RealtimeWebsocketWriter; pub use methods_common::session_update_session_json; pub use protocol::RealtimeEventParser; +pub use protocol::RealtimeOutputModality; pub use protocol::RealtimeSessionConfig; pub use protocol::RealtimeSessionMode; diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs index 0185984c61..8a88cc34d1 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs @@ -2,7 +2,7 @@ use crate::endpoint::realtime_websocket::protocol_v1::parse_realtime_event_v1; use crate::endpoint::realtime_websocket::protocol_v2::parse_realtime_event_v2; pub use codex_protocol::protocol::RealtimeAudioFrame; pub use codex_protocol::protocol::RealtimeEvent; -pub use codex_protocol::protocol::RealtimeTranscriptDelta; +pub use codex_protocol::protocol::RealtimeOutputModality; pub use codex_protocol::protocol::RealtimeTranscriptEntry; pub use codex_protocol::protocol::RealtimeVoice; use serde::Serialize; @@ -27,6 +27,7 @@ pub struct RealtimeSessionConfig { pub session_id: Option, pub event_parser: RealtimeEventParser, pub session_mode: RealtimeSessionMode, + pub output_modality: RealtimeOutputModality, pub voice: RealtimeVoice, } @@ -129,6 +130,7 @@ pub(super) struct SessionTurnDetection { pub(super) r#type: TurnDetectionType, pub(super) interrupt_response: bool, pub(super) create_response: bool, + pub(super) silence_duration_ms: u32, } #[derive(Debug, Clone, Copy, Serialize)] diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_common.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_common.rs index dbd8544d94..c89c5ea4d0 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_common.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_common.rs @@ -1,5 +1,6 @@ use codex_protocol::protocol::RealtimeEvent; use codex_protocol::protocol::RealtimeTranscriptDelta; +use codex_protocol::protocol::RealtimeTranscriptDone; use serde_json::Value; use tracing::debug; @@ -53,6 +54,17 @@ pub(super) fn parse_transcript_delta_event( .map(|delta| RealtimeTranscriptDelta { delta }) } +pub(super) fn parse_transcript_done_event( + parsed: &Value, + field: &str, +) -> Option { + parsed + .get(field) + .and_then(Value::as_str) + .map(str::to_string) + .map(|text| RealtimeTranscriptDone { text }) +} + pub(super) fn parse_error_event(parsed: &Value) -> Option { parsed .get("message") diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_v2.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_v2.rs index 4c2c909e80..559e83426b 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_v2.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol_v2.rs @@ -2,6 +2,7 @@ use crate::endpoint::realtime_websocket::protocol_common::parse_error_event; use crate::endpoint::realtime_websocket::protocol_common::parse_realtime_payload; use crate::endpoint::realtime_websocket::protocol_common::parse_session_updated_event; use crate::endpoint::realtime_websocket::protocol_common::parse_transcript_delta_event; +use crate::endpoint::realtime_websocket::protocol_common::parse_transcript_done_event; use codex_protocol::protocol::RealtimeAudioFrame; use codex_protocol::protocol::RealtimeEvent; use codex_protocol::protocol::RealtimeHandoffRequested; @@ -9,6 +10,7 @@ use codex_protocol::protocol::RealtimeInputAudioSpeechStarted; use codex_protocol::protocol::RealtimeResponseCancelled; use codex_protocol::protocol::RealtimeResponseCreated; use codex_protocol::protocol::RealtimeResponseDone; +use codex_protocol::protocol::RealtimeTranscriptDone; use serde_json::Map as JsonMap; use serde_json::Value; use tracing::debug; @@ -30,8 +32,8 @@ pub(super) fn parse_realtime_event_v2(payload: &str) -> Option { parse_transcript_delta_event(&parsed, "delta").map(RealtimeEvent::InputTranscriptDelta) } "conversation.item.input_audio_transcription.completed" => { - parse_transcript_delta_event(&parsed, "transcript") - .map(RealtimeEvent::InputTranscriptDelta) + parse_transcript_done_event(&parsed, "transcript") + .map(RealtimeEvent::InputTranscriptDone) } "response.output_text.delta" | "response.output_audio_transcript.delta" => { parse_transcript_delta_event(&parsed, "delta").map(RealtimeEvent::OutputTranscriptDelta) @@ -120,12 +122,43 @@ fn parse_conversation_item_done_event(parsed: &Value) -> Option { return Some(handoff); } + if let Some(transcript_done) = parse_item_done_transcript(item) { + return Some(transcript_done); + } + item.get("id") .and_then(Value::as_str) .map(str::to_string) .map(|item_id| RealtimeEvent::ConversationItemDone { item_id }) } +fn parse_item_done_transcript(item: &JsonMap) -> Option { + let role = item.get("role").and_then(Value::as_str)?; + let text = item + .get("content") + .and_then(Value::as_array)? + .iter() + .filter_map(item_content_text) + .collect::(); + if text.is_empty() { + return None; + } + + let done = RealtimeTranscriptDone { text }; + match role { + "user" => Some(RealtimeEvent::InputTranscriptDone(done)), + "assistant" => Some(RealtimeEvent::OutputTranscriptDone(done)), + _ => None, + } +} + +fn item_content_text(content: &Value) -> Option<&str> { + content + .get("text") + .or_else(|| content.get("transcript")) + .and_then(Value::as_str) +} + fn parse_handoff_requested_event(item: &JsonMap) -> Option { let item_type = item.get("type").and_then(Value::as_str); let item_name = item.get("name").and_then(Value::as_str); diff --git a/codex-rs/codex-api/src/endpoint/responses_websocket.rs b/codex-rs/codex-api/src/endpoint/responses_websocket.rs index a60d188bda..d2b775cdd5 100644 --- a/codex-rs/codex-api/src/endpoint/responses_websocket.rs +++ b/codex-rs/codex-api/src/endpoint/responses_websocket.rs @@ -1,5 +1,4 @@ use crate::auth::AuthProvider; -use crate::auth::add_auth_headers_to_header_map; use crate::common::ResponseEvent; use crate::common::ResponseStream; use crate::common::ResponsesWsRequest; @@ -310,7 +309,7 @@ impl ResponsesWebsocketClient { let mut headers = merge_request_headers(&self.provider.headers, extra_headers, default_headers); - add_auth_headers_to_header_map(&self.auth, &mut headers); + self.auth.add_auth_headers(&mut headers); let (stream, server_reasoning_included, models_etag, server_model) = connect_websocket(ws_url, headers, turn_state.clone()).await?; diff --git a/codex-rs/codex-api/src/endpoint/session.rs b/codex-rs/codex-api/src/endpoint/session.rs index 00919a0c54..e4a470ceee 100644 --- a/codex-rs/codex-api/src/endpoint/session.rs +++ b/codex-rs/codex-api/src/endpoint/session.rs @@ -1,5 +1,4 @@ use crate::auth::AuthProvider; -use crate::auth::add_auth_headers; use crate::error::ApiError; use crate::provider::Provider; use crate::telemetry::run_with_request_telemetry; @@ -56,7 +55,8 @@ impl EndpointSession { if let Some(body) = body { req.body = Some(RequestBody::Json(body.clone())); } - add_auth_headers(&self.auth, req) + self.auth.add_auth_headers(&mut req.headers); + req } pub(crate) async fn execute( diff --git a/codex-rs/codex-api/src/files.rs b/codex-rs/codex-api/src/files.rs index 6fad5b62f5..ebe35af5ba 100644 --- a/codex-rs/codex-api/src/files.rs +++ b/codex-rs/codex-api/src/files.rs @@ -256,17 +256,14 @@ fn authorized_request( method: reqwest::Method, url: &str, ) -> reqwest::RequestBuilder { + let mut headers = http::HeaderMap::new(); + auth.add_auth_headers(&mut headers); + let client = build_reqwest_client(); - let mut request = client + client .request(method, url) - .timeout(OPENAI_FILE_REQUEST_TIMEOUT); - if let Some(token) = auth.bearer_token() { - request = request.bearer_auth(token); - } - if let Some(account_id) = auth.account_id() { - request = request.header("chatgpt-account-id", account_id); - } - request + .timeout(OPENAI_FILE_REQUEST_TIMEOUT) + .headers(headers) } fn build_reqwest_client() -> reqwest::Client { diff --git a/codex-rs/codex-api/src/lib.rs b/codex-rs/codex-api/src/lib.rs index ac26d3cdba..82a1004849 100644 --- a/codex-rs/codex-api/src/lib.rs +++ b/codex-rs/codex-api/src/lib.rs @@ -41,6 +41,7 @@ pub use crate::endpoint::ModelsClient; pub use crate::endpoint::RealtimeCallClient; pub use crate::endpoint::RealtimeCallResponse; pub use crate::endpoint::RealtimeEventParser; +pub use crate::endpoint::RealtimeOutputModality; pub use crate::endpoint::RealtimeSessionConfig; pub use crate::endpoint::RealtimeSessionMode; pub use crate::endpoint::RealtimeWebsocketClient; @@ -56,7 +57,7 @@ pub use crate::error::ApiError; pub use crate::files::upload_local_file; pub use crate::provider::Provider; pub use crate::provider::RetryConfig; -pub use crate::provider::is_azure_responses_wire_base_url; +pub use crate::provider::is_azure_responses_provider; pub use crate::requests::Compression; pub use crate::sse::stream_from_fixture; pub use crate::telemetry::SseTelemetry; diff --git a/codex-rs/codex-api/src/provider.rs b/codex-rs/codex-api/src/provider.rs index 81a168ffd7..45f2512dc3 100644 --- a/codex-rs/codex-api/src/provider.rs +++ b/codex-rs/codex-api/src/provider.rs @@ -86,7 +86,7 @@ impl Provider { } pub fn is_azure_responses_endpoint(&self) -> bool { - is_azure_responses_wire_base_url(&self.name, Some(&self.base_url)) + is_azure_responses_provider(&self.name, Some(&self.base_url)) } pub fn websocket_url_for_path(&self, path: &str) -> Result { @@ -103,21 +103,20 @@ impl Provider { } } -pub fn is_azure_responses_wire_base_url(name: &str, base_url: Option<&str>) -> bool { +pub fn is_azure_responses_provider(name: &str, base_url: Option<&str>) -> bool { if name.eq_ignore_ascii_case("azure") { - return true; + true + } else if let Some(base_url) = base_url { + matches_azure_responses_base_url(base_url) + } else { + false } - - let Some(base_url) = base_url else { - return false; - }; - - let base = base_url.to_ascii_lowercase(); - base.contains("openai.azure.") || matches_azure_responses_base_url(&base) } fn matches_azure_responses_base_url(base_url: &str) -> bool { - const AZURE_MARKERS: [&str; 5] = [ + let base_url = base_url.to_ascii_lowercase(); + const AZURE_MARKERS: [&str; 6] = [ + "openai.azure.", "cognitiveservices.azure.", "aoai.azure.", "azure-api.", @@ -144,12 +143,12 @@ mod tests { for base_url in positive_cases { assert!( - is_azure_responses_wire_base_url("test", Some(base_url)), + is_azure_responses_provider("test", Some(base_url)), "expected {base_url} to be detected as Azure" ); } - assert!(is_azure_responses_wire_base_url( + assert!(is_azure_responses_provider( "Azure", Some("https://example.com") )); @@ -162,7 +161,7 @@ mod tests { for base_url in negative_cases { assert!( - !is_azure_responses_wire_base_url("test", Some(base_url)), + !is_azure_responses_provider("test", Some(base_url)), "expected {base_url} not to be detected as Azure" ); } diff --git a/codex-rs/codex-api/src/sse/responses.rs b/codex-rs/codex-api/src/sse/responses.rs index bf6d755060..da7e87461e 100644 --- a/codex-rs/codex-api/src/sse/responses.rs +++ b/codex-rs/codex-api/src/sse/responses.rs @@ -167,6 +167,8 @@ pub struct ResponsesStreamEvent { headers: Option, response: Option, item: Option, + item_id: Option, + call_id: Option, delta: Option, summary_index: Option, content_index: Option, @@ -250,6 +252,17 @@ pub fn process_responses_event( return Ok(Some(ResponseEvent::OutputTextDelta(delta))); } } + "response.custom_tool_call_input.delta" => { + if let (Some(delta), Some(item_id)) = + (event.delta, event.item_id.clone().or(event.call_id.clone())) + { + return Ok(Some(ResponseEvent::ToolCallInputDelta { + item_id, + call_id: event.call_id, + delta, + })); + } + } "response.reasoning_summary_text.delta" => { if let (Some(delta), Some(summary_index)) = (event.delta, event.summary_index) { return Ok(Some(ResponseEvent::ReasoningSummaryDelta { @@ -692,6 +705,38 @@ mod tests { ); } + #[tokio::test] + async fn parses_tool_call_input_deltas() { + let events = run_sse(vec![ + json!({ + "type": "response.custom_tool_call_input.delta", + "item_id": "ctc_1", + "call_id": "call_1", + "delta": "*** Begin", + }), + json!({ + "type": "response.function_call_arguments.delta", + "item_id": "fc_1", + "delta": "{\"input\":\"", + }), + json!({ + "type": "response.completed", + "response": { "id": "resp1" } + }), + ]) + .await; + + assert_matches!( + &events[0], + ResponseEvent::ToolCallInputDelta { + item_id, + call_id: Some(call_id), + delta, + } if item_id == "ctc_1" && call_id == "call_1" && delta == "*** Begin" + ); + assert_matches!(&events[1], ResponseEvent::Completed { .. }); + } + #[tokio::test] async fn emits_completed_without_stream_end() { let completed = json!({ diff --git a/codex-rs/codex-api/tests/clients.rs b/codex-rs/codex-api/tests/clients.rs index b11c6f9d6b..d82fcc14ce 100644 --- a/codex-rs/codex-api/tests/clients.rs +++ b/codex-rs/codex-api/tests/clients.rs @@ -91,9 +91,7 @@ impl HttpTransport for RecordingTransport { struct NoAuth; impl AuthProvider for NoAuth { - fn bearer_token(&self) -> Option { - None - } + fn add_auth_headers(&self, _headers: &mut HeaderMap) {} } #[derive(Clone)] @@ -112,12 +110,14 @@ impl StaticAuth { } impl AuthProvider for StaticAuth { - fn bearer_token(&self) -> Option { - Some(self.token.clone()) - } - - fn account_id(&self) -> Option { - Some(self.account_id.clone()) + fn add_auth_headers(&self, headers: &mut HeaderMap) { + let token = &self.token; + if let Ok(header) = HeaderValue::from_str(&format!("Bearer {token}")) { + headers.insert(http::header::AUTHORIZATION, header); + } + if let Ok(header) = HeaderValue::from_str(&self.account_id) { + headers.insert("ChatGPT-Account-ID", header); + } } } diff --git a/codex-rs/codex-api/tests/models_integration.rs b/codex-rs/codex-api/tests/models_integration.rs index 91a8477a92..fab135e0ab 100644 --- a/codex-rs/codex-api/tests/models_integration.rs +++ b/codex-rs/codex-api/tests/models_integration.rs @@ -24,9 +24,7 @@ use wiremock::matchers::path; struct DummyAuth; impl AuthProvider for DummyAuth { - fn bearer_token(&self) -> Option { - None - } + fn add_auth_headers(&self, _headers: &mut HeaderMap) {} } fn provider(base_url: &str) -> Provider { diff --git a/codex-rs/codex-api/tests/realtime_websocket_e2e.rs b/codex-rs/codex-api/tests/realtime_websocket_e2e.rs index 9969a96f09..abafaef2ae 100644 --- a/codex-rs/codex-api/tests/realtime_websocket_e2e.rs +++ b/codex-rs/codex-api/tests/realtime_websocket_e2e.rs @@ -6,6 +6,7 @@ use codex_api::Provider; use codex_api::RealtimeAudioFrame; use codex_api::RealtimeEvent; use codex_api::RealtimeEventParser; +use codex_api::RealtimeOutputModality; use codex_api::RealtimeSessionConfig; use codex_api::RealtimeSessionMode; use codex_api::RealtimeWebsocketClient; @@ -145,6 +146,7 @@ async fn realtime_ws_e2e_session_create_and_event_flow() { session_id: Some("conv_123".to_string()), event_parser: RealtimeEventParser::V1, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Cove, }, HeaderMap::new(), @@ -248,6 +250,7 @@ async fn realtime_ws_connect_webrtc_sideband_retries_join_until_server_is_availa session_id: Some("conv_123".to_string()), event_parser: RealtimeEventParser::RealtimeV2, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Marin, }, "rtc_test", @@ -319,6 +322,7 @@ async fn realtime_ws_e2e_send_while_next_event_waits() { session_id: Some("conv_123".to_string()), event_parser: RealtimeEventParser::V1, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Cove, }, HeaderMap::new(), @@ -386,6 +390,7 @@ async fn realtime_ws_e2e_disconnected_emitted_once() { session_id: Some("conv_123".to_string()), event_parser: RealtimeEventParser::V1, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Cove, }, HeaderMap::new(), @@ -449,6 +454,7 @@ async fn realtime_ws_e2e_ignores_unknown_text_events() { session_id: Some("conv_123".to_string()), event_parser: RealtimeEventParser::V1, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Cove, }, HeaderMap::new(), @@ -515,6 +521,7 @@ async fn realtime_ws_e2e_realtime_v2_parser_emits_handoff_requested() { session_id: Some("conv_123".to_string()), event_parser: RealtimeEventParser::RealtimeV2, session_mode: RealtimeSessionMode::Conversational, + output_modality: RealtimeOutputModality::Audio, voice: RealtimeVoice::Marin, }, HeaderMap::new(), diff --git a/codex-rs/codex-api/tests/sse_end_to_end.rs b/codex-rs/codex-api/tests/sse_end_to_end.rs index b15de296a8..4d32e82242 100644 --- a/codex-rs/codex-api/tests/sse_end_to_end.rs +++ b/codex-rs/codex-api/tests/sse_end_to_end.rs @@ -53,9 +53,7 @@ impl HttpTransport for FixtureSseTransport { struct NoAuth; impl AuthProvider for NoAuth { - fn bearer_token(&self) -> Option { - None - } + fn add_auth_headers(&self, _headers: &mut HeaderMap) {} } fn provider(name: &str) -> Provider { diff --git a/codex-rs/codex-mcp/Cargo.toml b/codex-rs/codex-mcp/Cargo.toml index 92aa584646..adc38d4093 100644 --- a/codex-rs/codex-mcp/Cargo.toml +++ b/codex-rs/codex-mcp/Cargo.toml @@ -35,6 +35,7 @@ tracing = { workspace = true } url = { workspace = true } [dev-dependencies] +codex-utils-absolute-path = { workspace = true } pretty_assertions = { workspace = true } rmcp = { workspace = true, default-features = false, features = ["base64", "macros", "schemars", "server"] } tempfile = { workspace = true } diff --git a/codex-rs/codex-mcp/src/lib.rs b/codex-rs/codex-mcp/src/lib.rs index bfbe1c60f8..ed0d9b4122 100644 --- a/codex-rs/codex-mcp/src/lib.rs +++ b/codex-rs/codex-mcp/src/lib.rs @@ -36,8 +36,7 @@ pub use mcp::tool_plugin_provenance; pub use mcp::with_codex_apps_mcp; pub use mcp_connection_manager::CodexAppsToolsCacheKey; pub use mcp_connection_manager::DEFAULT_STARTUP_TIMEOUT; -pub use mcp_connection_manager::MCP_SANDBOX_STATE_CAPABILITY; -pub use mcp_connection_manager::MCP_SANDBOX_STATE_METHOD; +pub use mcp_connection_manager::MCP_SANDBOX_STATE_META_CAPABILITY; pub use mcp_connection_manager::McpConnectionManager; pub use mcp_connection_manager::SandboxState; pub use mcp_connection_manager::ToolInfo; diff --git a/codex-rs/codex-mcp/src/mcp/mod.rs b/codex-rs/codex-mcp/src/mcp/mod.rs index ba04429578..1dc9db0789 100644 --- a/codex-rs/codex-mcp/src/mcp/mod.rs +++ b/codex-rs/codex-mcp/src/mcp/mod.rs @@ -35,7 +35,6 @@ use codex_protocol::protocol::SandboxPolicy; use serde_json::Value; use crate::mcp_connection_manager::McpConnectionManager; -use crate::mcp_connection_manager::SandboxState; use crate::mcp_connection_manager::codex_apps_tools_cache_key; pub type McpManager = McpConnectionManager; @@ -270,11 +269,14 @@ fn codex_apps_mcp_server_config(config: &McpConfig, auth: Option<&CodexAuth>) -> http_headers, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: Some(Duration::from_secs(30)), tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -346,14 +348,6 @@ pub async fn collect_mcp_snapshot_with_detail( let (tx_event, rx_event) = unbounded(); drop(rx_event); - // Use ReadOnly sandbox policy for MCP snapshot collection (safest default) - let sandbox_state = SandboxState { - sandbox_policy: SandboxPolicy::new_read_only_policy(), - codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(), - sandbox_cwd: env::current_dir().unwrap_or_else(|_| PathBuf::from("/")), - use_legacy_landlock: config.use_legacy_landlock, - }; - let (mcp_connection_manager, cancel_token) = McpConnectionManager::new( &mcp_servers, config.mcp_oauth_credentials_store_mode, @@ -361,7 +355,7 @@ pub async fn collect_mcp_snapshot_with_detail( &config.approval_policy, submit_id, tx_event, - sandbox_state, + SandboxPolicy::new_read_only_policy(), config.codex_home.clone(), codex_apps_tools_cache_key(auth), tool_plugin_provenance, @@ -420,13 +414,6 @@ pub async fn collect_mcp_server_status_snapshot_with_detail( let (tx_event, rx_event) = unbounded(); drop(rx_event); - let sandbox_state = SandboxState { - sandbox_policy: SandboxPolicy::new_read_only_policy(), - codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(), - sandbox_cwd: env::current_dir().unwrap_or_else(|_| PathBuf::from("/")), - use_legacy_landlock: config.use_legacy_landlock, - }; - let (mcp_connection_manager, cancel_token) = McpConnectionManager::new( &mcp_servers, config.mcp_oauth_credentials_store_mode, @@ -434,7 +421,7 @@ pub async fn collect_mcp_server_status_snapshot_with_detail( &config.approval_policy, submit_id, tx_event, - sandbox_state, + SandboxPolicy::new_read_only_policy(), config.codex_home.clone(), codex_apps_tools_cache_key(auth), tool_plugin_provenance, diff --git a/codex-rs/codex-mcp/src/mcp/mod_tests.rs b/codex-rs/codex-mcp/src/mcp/mod_tests.rs index 8dc29bbf98..8db52a9d83 100644 --- a/codex-rs/codex-mcp/src/mcp/mod_tests.rs +++ b/codex-rs/codex-mcp/src/mcp/mod_tests.rs @@ -193,11 +193,14 @@ async fn effective_mcp_servers_preserve_user_servers_and_add_codex_apps() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -214,11 +217,14 @@ async fn effective_mcp_servers_preserve_user_servers_and_add_codex_apps() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, diff --git a/codex-rs/codex-mcp/src/mcp/skill_dependencies.rs b/codex-rs/codex-mcp/src/mcp/skill_dependencies.rs index aa26fd20fe..f785fe4bde 100644 --- a/codex-rs/codex-mcp/src/mcp/skill_dependencies.rs +++ b/codex-rs/codex-mcp/src/mcp/skill_dependencies.rs @@ -119,11 +119,14 @@ fn mcp_dependency_to_server_config( http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -145,11 +148,14 @@ fn mcp_dependency_to_server_config( env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, diff --git a/codex-rs/codex-mcp/src/mcp/skill_dependencies_tests.rs b/codex-rs/codex-mcp/src/mcp/skill_dependencies_tests.rs index 3f5f85d194..2d8390d15e 100644 --- a/codex-rs/codex-mcp/src/mcp/skill_dependencies_tests.rs +++ b/codex-rs/codex-mcp/src/mcp/skill_dependencies_tests.rs @@ -2,8 +2,9 @@ use super::*; use codex_protocol::protocol::SkillDependencies; use codex_protocol::protocol::SkillMetadata; use codex_protocol::protocol::SkillScope; +use codex_utils_absolute_path::test_support::PathBufExt as _; +use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; -use std::path::PathBuf; fn skill_with_tools(tools: Vec) -> SkillMetadata { SkillMetadata { @@ -12,7 +13,7 @@ fn skill_with_tools(tools: Vec) -> SkillMetadata { short_description: None, interface: None, dependencies: Some(SkillDependencies { tools }), - path: PathBuf::from("skill"), + path: test_path_buf("/tmp/skill").abs(), scope: SkillScope::User, enabled: true, } @@ -38,11 +39,14 @@ fn collect_missing_respects_canonical_installed_key() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -88,11 +92,14 @@ fn collect_missing_dedupes_by_canonical_key_but_preserves_original_name() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, diff --git a/codex-rs/codex-mcp/src/mcp_connection_manager.rs b/codex-rs/codex-mcp/src/mcp_connection_manager.rs index 9021f7d7a9..d1696b5931 100644 --- a/codex-rs/codex-mcp/src/mcp_connection_manager.rs +++ b/codex-rs/codex-mcp/src/mcp_connection_manager.rs @@ -36,6 +36,7 @@ use codex_async_utils::CancelErr; use codex_async_utils::OrCancelExt; use codex_config::Constrained; use codex_config::types::OAuthCredentialsStoreMode; +use codex_protocol::ToolName; use codex_protocol::approvals::ElicitationRequest; use codex_protocol::approvals::ElicitationRequestEvent; use codex_protocol::mcp::CallToolResult; @@ -155,6 +156,12 @@ pub struct ToolInfo { pub connector_description: Option, } +impl ToolInfo { + pub fn canonical_tool_name(&self) -> ToolName { + ToolName::namespaced(self.callable_namespace.clone(), self.callable_name.clone()) + } +} + const META_OPENAI_FILE_PARAMS: &str = "openai/fileParams"; pub fn declared_openai_file_input_param_names( @@ -432,7 +439,7 @@ struct ManagedClient { tool_filter: ToolFilter, tool_timeout: Option, server_instructions: Option, - server_supports_sandbox_state_capability: bool, + server_supports_sandbox_state_meta_capability: bool, codex_apps_tools_cache_context: Option, } @@ -461,22 +468,6 @@ impl ManagedClient { self.tools.clone() } - - /// Returns once the server has ack'd the sandbox state update. - async fn notify_sandbox_state_change(&self, sandbox_state: &SandboxState) -> Result<()> { - if !self.server_supports_sandbox_state_capability { - return Ok(()); - } - - let _response = self - .client - .send_custom_request( - MCP_SANDBOX_STATE_METHOD, - Some(serde_json::to_value(sandbox_state)?), - ) - .await?; - Ok(()) - } } #[derive(Clone)] @@ -634,18 +625,11 @@ impl AsyncManagedClient { }; tools.map(annotate_tools) } - - async fn notify_sandbox_state_change(&self, sandbox_state: &SandboxState) -> Result<()> { - let managed = self.client().await?; - managed.notify_sandbox_state_change(sandbox_state).await - } } -pub const MCP_SANDBOX_STATE_CAPABILITY: &str = "codex/sandbox-state"; - -/// Custom MCP request to push sandbox state updates. -/// When used, the `params` field of the notification is [`SandboxState`]. -pub const MCP_SANDBOX_STATE_METHOD: &str = "codex/sandbox-state/update"; +/// MCP server capability indicating that Codex should include [`SandboxState`] +/// in tool-call request `_meta` under this key. +pub const MCP_SANDBOX_STATE_META_CAPABILITY: &str = "codex/sandbox-state-meta"; #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -723,7 +707,7 @@ impl McpConnectionManager { approval_policy: &Constrained, submit_id: String, tx_event: Sender, - initial_sandbox_state: SandboxState, + initial_sandbox_policy: SandboxPolicy, codex_home: PathBuf, codex_apps_tools_cache_key: CodexAppsToolsCacheKey, tool_plugin_provenance: ToolPluginProvenance, @@ -732,10 +716,8 @@ impl McpConnectionManager { let mut clients = HashMap::new(); let mut server_origins = HashMap::new(); let mut join_set = JoinSet::new(); - let elicitation_requests = ElicitationRequestManager::new( - approval_policy.value(), - initial_sandbox_state.sandbox_policy.clone(), - ); + let elicitation_requests = + ElicitationRequestManager::new(approval_policy.value(), initial_sandbox_policy); let tool_plugin_provenance = Arc::new(tool_plugin_provenance); let startup_submit_id = submit_id.clone(); let mcp_servers = mcp_servers.clone(); @@ -775,25 +757,14 @@ impl McpConnectionManager { let tx_event = tx_event.clone(); let submit_id = startup_submit_id.clone(); let auth_entry = auth_entries.get(&server_name).cloned(); - let sandbox_state = initial_sandbox_state.clone(); join_set.spawn(async move { - let outcome = async_managed_client.client().await; + let mut outcome = async_managed_client.client().await; if cancel_token.is_cancelled() { - return (server_name, Err(StartupOutcomeError::Cancelled)); + outcome = Err(StartupOutcomeError::Cancelled); } let status = match &outcome { - Ok(_) => { - // Send sandbox state notification immediately after Ready - if let Err(e) = async_managed_client - .notify_sandbox_state_change(&sandbox_state) - .await - { - warn!( - "Failed to notify sandbox state to MCP server {server_name}: {e:#}", - ); - } - McpStartupStatus::Ready - } + Ok(_) => McpStartupStatus::Ready, + Err(StartupOutcomeError::Cancelled) => McpStartupStatus::Cancelled, Err(error) => { let error_str = mcp_init_error_display( server_name.as_str(), @@ -1142,6 +1113,16 @@ impl McpConnectionManager { }) } + pub async fn server_supports_sandbox_state_meta_capability( + &self, + server: &str, + ) -> Result { + Ok(self + .client_by_name(server) + .await? + .server_supports_sandbox_state_meta_capability) + } + /// List resources from the specified server. pub async fn list_resources( &self, @@ -1191,42 +1172,11 @@ impl McpConnectionManager { .with_context(|| format!("resources/read failed for `{server}` ({uri})")) } - pub async fn resolve_tool_info(&self, name: &str, namespace: Option<&str>) -> Option { - let qualified_name = match namespace { - Some(namespace) if name.starts_with(namespace) => name.to_string(), - Some(namespace) => format!("{namespace}{name}"), - None => name.to_string(), - }; - - self.list_all_tools().await.get(&qualified_name).cloned() - } - - pub async fn notify_sandbox_state_change(&self, sandbox_state: &SandboxState) -> Result<()> { - let mut join_set = JoinSet::new(); - - for async_managed_client in self.clients.values() { - let sandbox_state = sandbox_state.clone(); - let async_managed_client = async_managed_client.clone(); - join_set.spawn(async move { - async_managed_client - .notify_sandbox_state_change(&sandbox_state) - .await - }); - } - - while let Some(join_res) = join_set.join_next().await { - match join_res { - Ok(Ok(())) => {} - Ok(Err(err)) => { - warn!("Failed to notify sandbox state change to MCP server: {err:#}"); - } - Err(err) => { - warn!("Task panic when notifying sandbox state change to MCP server: {err:#}"); - } - } - } - - Ok(()) + pub async fn resolve_tool_info(&self, tool_name: &ToolName) -> Option { + let all_tools = self.list_all_tools().await; + all_tools + .into_values() + .find(|tool| tool.canonical_tool_name() == *tool_name) } } @@ -1473,11 +1423,11 @@ async fn start_server_task( .await .map_err(StartupOutcomeError::from)?; - let server_supports_sandbox_state_capability = initialize_result + let server_supports_sandbox_state_meta_capability = initialize_result .capabilities .experimental .as_ref() - .and_then(|exp| exp.get(MCP_SANDBOX_STATE_CAPABILITY)) + .and_then(|exp| exp.get(MCP_SANDBOX_STATE_META_CAPABILITY)) .is_some(); let list_start = Instant::now(); let fetch_start = Instant::now(); @@ -1514,7 +1464,7 @@ async fn start_server_task( tool_timeout: Some(tool_timeout), tool_filter, server_instructions: initialize_result.instructions, - server_supports_sandbox_state_capability, + server_supports_sandbox_state_meta_capability, codex_apps_tools_cache_context, }; diff --git a/codex-rs/codex-mcp/src/mcp_connection_manager_tests.rs b/codex-rs/codex-mcp/src/mcp_connection_manager_tests.rs index 2c0d6fda68..cf2889ccde 100644 --- a/codex-rs/codex-mcp/src/mcp_connection_manager_tests.rs +++ b/codex-rs/codex-mcp/src/mcp_connection_manager_tests.rs @@ -1,4 +1,5 @@ use super::*; +use codex_protocol::ToolName; use codex_protocol::protocol::GranularApprovalConfig; use codex_protocol::protocol::McpAuthStatus; use pretty_assertions::assert_eq; @@ -646,6 +647,42 @@ async fn list_all_tools_uses_startup_snapshot_while_client_is_pending() { assert_eq!(tool.callable_name, "calendar_create_event"); } +#[tokio::test] +async fn resolve_tool_info_accepts_canonical_namespaced_tool_names() { + let startup_tools = vec![create_test_tool("rmcp", "echo")]; + let pending_client = futures::future::pending::>() + .boxed() + .shared(); + let approval_policy = Constrained::allow_any(AskForApproval::OnFailure); + let sandbox_policy = Constrained::allow_any(SandboxPolicy::new_read_only_policy()); + let mut manager = McpConnectionManager::new_uninitialized(&approval_policy, &sandbox_policy); + manager.clients.insert( + "rmcp".to_string(), + AsyncManagedClient { + client: pending_client, + startup_snapshot: Some(startup_tools), + startup_complete: Arc::new(std::sync::atomic::AtomicBool::new(false)), + tool_plugin_provenance: Arc::new(ToolPluginProvenance::default()), + }, + ); + + let tool = manager + .resolve_tool_info(&ToolName::namespaced("mcp__rmcp__", "echo")) + .await + .expect("split MCP tool namespace and name should resolve"); + + let expected = ("rmcp", "mcp__rmcp__", "echo", "echo"); + assert_eq!( + ( + tool.server_name.as_str(), + tool.callable_namespace.as_str(), + tool.callable_name.as_str(), + tool.tool.name.as_ref(), + ), + expected + ); +} + #[tokio::test] async fn list_all_tools_blocks_while_client_is_pending_without_startup_snapshot() { let pending_client = futures::future::pending::>() @@ -755,11 +792,14 @@ fn mcp_init_error_display_prompts_for_github_pat() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -804,11 +844,14 @@ fn mcp_init_error_display_reports_generic_errors() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, diff --git a/codex-rs/config/Cargo.toml b/codex-rs/config/Cargo.toml index 9532d74d00..93ca283b86 100644 --- a/codex-rs/config/Cargo.toml +++ b/codex-rs/config/Cargo.toml @@ -12,12 +12,11 @@ anyhow = { workspace = true } codex-app-server-protocol = { workspace = true } codex-execpolicy = { workspace = true } codex-features = { workspace = true } -codex-git-utils = { workspace = true } codex-model-provider-info = { workspace = true } codex-network-proxy = { workspace = true } codex-protocol = { workspace = true } codex-utils-absolute-path = { workspace = true } -dunce = { workspace = true } +codex-utils-path = { workspace = true } futures = { workspace = true, features = ["alloc", "std"] } multimap = { workspace = true } schemars = { workspace = true } diff --git a/codex-rs/config/src/config_requirements.rs b/codex-rs/config/src/config_requirements.rs index 2e6756d81e..7abedc62f1 100644 --- a/codex-rs/config/src/config_requirements.rs +++ b/codex-rs/config/src/config_requirements.rs @@ -237,8 +237,6 @@ pub struct NetworkRequirementsToml { /// When true, only managed `allowed_domains` are respected while managed /// network enforcement is active. User allowlist entries are ignored. pub managed_allowed_domains_only: Option, - /// In danger-full-access mode, allow all network access and enforce managed deny entries. - pub danger_full_access_denylist_only: Option, pub unix_sockets: Option, pub allow_local_binding: Option, } @@ -257,8 +255,6 @@ struct RawNetworkRequirementsToml { /// When true, only managed `allowed_domains` are respected while managed /// network enforcement is active. User allowlist entries are ignored. managed_allowed_domains_only: Option, - /// In danger-full-access mode, allow all network access and enforce managed deny entries. - danger_full_access_denylist_only: Option, #[serde(default)] denied_domains: Option>, unix_sockets: Option, @@ -283,7 +279,6 @@ impl<'de> Deserialize<'de> for NetworkRequirementsToml { domains, allowed_domains, managed_allowed_domains_only, - danger_full_access_denylist_only, denied_domains, unix_sockets, allow_unix_sockets, @@ -312,7 +307,6 @@ impl<'de> Deserialize<'de> for NetworkRequirementsToml { domains: domains .or_else(|| legacy_domain_permissions_from_lists(allowed_domains, denied_domains)), managed_allowed_domains_only, - danger_full_access_denylist_only, unix_sockets: unix_sockets .or_else(|| legacy_unix_socket_permissions_from_list(allow_unix_sockets)), allow_local_binding, @@ -365,8 +359,6 @@ pub struct NetworkConstraints { /// When true, only managed `allowed_domains` are respected while managed /// network enforcement is active. User allowlist entries are ignored. pub managed_allowed_domains_only: Option, - /// In danger-full-access mode, allow all network access and enforce managed deny entries. - pub danger_full_access_denylist_only: Option, pub unix_sockets: Option, pub allow_local_binding: Option, } @@ -392,7 +384,6 @@ impl From for NetworkConstraints { dangerously_allow_all_unix_sockets, domains, managed_allowed_domains_only, - danger_full_access_denylist_only, unix_sockets, allow_local_binding, } = value; @@ -405,7 +396,6 @@ impl From for NetworkConstraints { dangerously_allow_all_unix_sockets, domains, managed_allowed_domains_only, - danger_full_access_denylist_only, unix_sockets, allow_local_binding, } @@ -1811,7 +1801,6 @@ allowed_approvals_reviewers = ["user"] allow_upstream_proxy = false dangerously_allow_all_unix_sockets = true managed_allowed_domains_only = true - danger_full_access_denylist_only = true allow_local_binding = false [experimental_network.domains] @@ -1862,10 +1851,6 @@ allowed_approvals_reviewers = ["user"] sourced_network.value.managed_allowed_domains_only, Some(true) ); - assert_eq!( - sourced_network.value.danger_full_access_denylist_only, - Some(true) - ); assert_eq!( sourced_network.value.unix_sockets.as_ref(), Some(&NetworkUnixSocketPermissionsToml { @@ -1889,7 +1874,6 @@ allowed_approvals_reviewers = ["user"] dangerously_allow_all_unix_sockets = true allowed_domains = ["api.example.com", "*.openai.com"] managed_allowed_domains_only = true - danger_full_access_denylist_only = true denied_domains = ["blocked.example.com"] allow_unix_sockets = ["/tmp/example.sock"] allow_local_binding = false @@ -1934,10 +1918,6 @@ allowed_approvals_reviewers = ["user"] sourced_network.value.managed_allowed_domains_only, Some(true) ); - assert_eq!( - sourced_network.value.danger_full_access_denylist_only, - Some(true) - ); assert_eq!( sourced_network.value.unix_sockets.as_ref(), Some(&NetworkUnixSocketPermissionsToml { diff --git a/codex-rs/config/src/config_toml.rs b/codex-rs/config/src/config_toml.rs index 92e5304fe5..83fe6c88e7 100644 --- a/codex-rs/config/src/config_toml.rs +++ b/codex-rs/config/src/config_toml.rs @@ -29,7 +29,6 @@ use crate::types::WindowsToml; use codex_app_server_protocol::Tools; use codex_app_server_protocol::UserSavedConfig; use codex_features::FeaturesToml; -use codex_git_utils::resolve_root_git_project_for_trust; use codex_model_provider_info::LEGACY_OLLAMA_CHAT_PROVIDER_ID; use codex_model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; use codex_model_provider_info::ModelProviderInfo; @@ -51,6 +50,7 @@ use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::ReadOnlyAccess; use codex_protocol::protocol::SandboxPolicy; use codex_utils_absolute_path::AbsolutePathBuf; +use codex_utils_path::normalize_for_path_comparison; use schemars::JsonSchema; use serde::Deserialize; use serde::Deserializer; @@ -596,12 +596,12 @@ pub struct GhostSnapshotToml { impl ConfigToml { /// Derive the effective sandbox policy from the configuration. - pub fn derive_sandbox_policy( + pub async fn derive_sandbox_policy( &self, sandbox_mode_override: Option, profile_sandbox_mode: Option, windows_sandbox_level: WindowsSandboxLevel, - resolved_cwd: &Path, + active_project: Option<&ProjectConfig>, sandbox_policy_constraint: Option<&crate::Constrained>, ) -> SandboxPolicy { let sandbox_mode_was_explicit = sandbox_mode_override.is_some() @@ -610,11 +610,13 @@ impl ConfigToml { let resolved_sandbox_mode = sandbox_mode_override .or(profile_sandbox_mode) .or(self.sandbox_mode) - .or_else(|| { + .or(if sandbox_mode_was_explicit { + None + } else { // If no sandbox_mode is set but this directory has a trust decision, // default to workspace-write except on unsandboxed Windows where we // default to read-only. - self.get_active_project(resolved_cwd).and_then(|p| { + active_project.and_then(|p| { if p.is_trusted() || p.is_untrusted() { if cfg!(target_os = "windows") && windows_sandbox_level == WindowsSandboxLevel::Disabled @@ -675,8 +677,13 @@ impl ConfigToml { } /// Resolves the cwd to an existing project, or returns None if ConfigToml - /// does not contain a project corresponding to cwd or a git repo for cwd - pub fn get_active_project(&self, resolved_cwd: &Path) -> Option { + /// does not contain a project corresponding to cwd or the resolved git repo + /// root for cwd. + pub fn get_active_project( + &self, + resolved_cwd: &Path, + repo_root: Option<&Path>, + ) -> Option { let projects = self.projects.clone().unwrap_or_default(); let resolved_cwd_key = project_trust_key(resolved_cwd); @@ -688,11 +695,8 @@ impl ConfigToml { return Some(project_config.clone()); } - // If cwd lives inside a git repo/worktree, check whether the root git project - // (the primary repository working directory) is trusted. This lets - // worktrees inherit trust from the main project. - if let Some(repo_root) = resolve_root_git_project_for_trust(resolved_cwd) { - let repo_root_key = project_trust_key(repo_root.as_path()); + if let Some(repo_root) = repo_root { + let repo_root_key = project_trust_key(repo_root); let repo_root_raw_key = repo_root.to_string_lossy().to_string(); if let Some(project_config_for_root) = projects .get(&repo_root_key) @@ -731,7 +735,7 @@ impl ConfigToml { /// projects trust map. On Windows, strips UNC, when possible, to try to ensure /// that different paths that point to the same location have the same key. fn project_trust_key(project_path: &Path) -> String { - dunce::canonicalize(project_path) + normalize_for_path_comparison(project_path) .unwrap_or_else(|_| project_path.to_path_buf()) .to_string_lossy() .to_string() diff --git a/codex-rs/config/src/lib.rs b/codex-rs/config/src/lib.rs index 1b178fcdae..66a6fc22fc 100644 --- a/codex-rs/config/src/lib.rs +++ b/codex-rs/config/src/lib.rs @@ -14,6 +14,7 @@ pub mod profile_toml; mod project_root_markers; mod requirements_exec_policy; pub mod schema; +pub mod shell_environment; mod skills_config; mod state; pub mod types; @@ -85,3 +86,5 @@ pub use state::ConfigLayerEntry; pub use state::ConfigLayerStack; pub use state::ConfigLayerStackOrdering; pub use state::LoaderOverrides; + +pub use codex_app_server_protocol::ConfigLayerSource; diff --git a/codex-rs/config/src/marketplace_edit.rs b/codex-rs/config/src/marketplace_edit.rs index 33cdd8e163..e20a75a02e 100644 --- a/codex-rs/config/src/marketplace_edit.rs +++ b/codex-rs/config/src/marketplace_edit.rs @@ -12,6 +12,7 @@ use crate::CONFIG_TOML_FILE; pub struct MarketplaceConfigUpdate<'a> { pub last_updated: &'a str, + pub last_revision: Option<&'a str>, pub source_type: &'a str, pub source: &'a str, pub ref_name: Option<&'a str>, @@ -63,6 +64,9 @@ fn upsert_marketplace( let mut entry = TomlTable::new(); entry.set_implicit(false); entry["last_updated"] = value(update.last_updated.to_string()); + if let Some(last_revision) = update.last_revision { + entry["last_revision"] = value(last_revision.to_string()); + } entry["source_type"] = value(update.source_type.to_string()); entry["source"] = value(update.source.to_string()); if let Some(ref_name) = update.ref_name { diff --git a/codex-rs/config/src/mcp_edit.rs b/codex-rs/config/src/mcp_edit.rs index f528751f26..c4bb38c543 100644 --- a/codex-rs/config/src/mcp_edit.rs +++ b/codex-rs/config/src/mcp_edit.rs @@ -174,15 +174,28 @@ fn serialize_mcp_server(config: &McpServerConfig) -> TomlItem { if !config.enabled { entry["enabled"] = value(false); } + if let Some(environment) = &config.experimental_environment { + entry["experimental_environment"] = value(environment.clone()); + } if config.required { entry["required"] = value(true); } + if config.supports_parallel_tool_calls { + entry["supports_parallel_tool_calls"] = value(true); + } if let Some(timeout) = config.startup_timeout_sec { entry["startup_timeout_sec"] = value(timeout.as_secs_f64()); } if let Some(timeout) = config.tool_timeout_sec { entry["tool_timeout_sec"] = value(timeout.as_secs_f64()); } + if let Some(approval_mode) = config.default_tools_approval_mode { + entry["default_tools_approval_mode"] = value(match approval_mode { + AppToolApproval::Auto => "auto", + AppToolApproval::Prompt => "prompt", + AppToolApproval::Approve => "approve", + }); + } if let Some(enabled_tools) = &config.enabled_tools && !enabled_tools.is_empty() { diff --git a/codex-rs/config/src/mcp_edit_tests.rs b/codex-rs/config/src/mcp_edit_tests.rs index 3a8eddee01..cfcd73c3e5 100644 --- a/codex-rs/config/src/mcp_edit_tests.rs +++ b/codex-rs/config/src/mcp_edit_tests.rs @@ -22,11 +22,14 @@ async fn replace_mcp_servers_serializes_per_tool_approval_overrides() -> anyhow: env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: true, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: Some(AppToolApproval::Auto), enabled_tools: None, disabled_tools: None, scopes: None, @@ -59,6 +62,8 @@ async fn replace_mcp_servers_serializes_per_tool_approval_overrides() -> anyhow: serialized, r#"[mcp_servers.docs] command = "docs-server" +supports_parallel_tool_calls = true +default_tools_approval_mode = "auto" [mcp_servers.docs.tools] diff --git a/codex-rs/config/src/mcp_types.rs b/codex-rs/config/src/mcp_types.rs index 52cf71b49b..75b68c3f94 100644 --- a/codex-rs/config/src/mcp_types.rs +++ b/codex-rs/config/src/mcp_types.rs @@ -61,6 +61,10 @@ pub struct McpServerConfig { #[serde(flatten)] pub transport: McpServerTransportConfig, + /// Experimental environment selector for where Codex should start this MCP server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub experimental_environment: Option, + /// When `false`, Codex skips initializing this MCP server. #[serde(default = "default_enabled")] pub enabled: bool, @@ -69,6 +73,10 @@ pub struct McpServerConfig { #[serde(default, skip_serializing_if = "std::ops::Not::not")] pub required: bool, + /// When `true`, every tool from this server is advertised as safe for parallel tool calls. + #[serde(default, skip_serializing_if = "std::ops::Not::not")] + pub supports_parallel_tool_calls: bool, + /// Reason this server was disabled after applying requirements. #[serde(skip)] pub disabled_reason: Option, @@ -85,6 +93,10 @@ pub struct McpServerConfig { #[serde(default, with = "option_duration_secs")] pub tool_timeout_sec: Option, + /// Approval mode for tools in this server unless a tool override exists. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub default_tools_approval_mode: Option, + /// Explicit allow-list of tools exposed from this server. When set, only these tools will be registered. #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled_tools: Option>, @@ -135,6 +147,8 @@ pub struct RawMcpServerConfig { // shared #[serde(default)] + pub experimental_environment: Option, + #[serde(default)] pub startup_timeout_sec: Option, #[serde(default)] pub startup_timeout_ms: Option, @@ -146,6 +160,10 @@ pub struct RawMcpServerConfig { #[serde(default)] pub required: Option, #[serde(default)] + pub supports_parallel_tool_calls: Option, + #[serde(default)] + pub default_tools_approval_mode: Option, + #[serde(default)] pub enabled_tools: Option>, #[serde(default)] pub disabled_tools: Option>, @@ -175,11 +193,14 @@ impl TryFrom for McpServerConfig { url, bearer_token, bearer_token_env_var, + experimental_environment, startup_timeout_sec, startup_timeout_ms, tool_timeout_sec, enabled, required, + supports_parallel_tool_calls, + default_tools_approval_mode, enabled_tools, disabled_tools, scopes, @@ -239,11 +260,14 @@ impl TryFrom for McpServerConfig { Ok(Self { transport, + experimental_environment, startup_timeout_sec, tool_timeout_sec, enabled: enabled.unwrap_or_else(default_enabled), required: required.unwrap_or_default(), + supports_parallel_tool_calls: supports_parallel_tool_calls.unwrap_or_default(), disabled_reason: None, + default_tools_approval_mode, enabled_tools, disabled_tools, scopes, diff --git a/codex-rs/config/src/mcp_types_tests.rs b/codex-rs/config/src/mcp_types_tests.rs index 694314e6e2..dff4a6bbf5 100644 --- a/codex-rs/config/src/mcp_types_tests.rs +++ b/codex-rs/config/src/mcp_types_tests.rs @@ -245,6 +245,70 @@ fn deserialize_server_config_with_tool_filters() { assert_eq!(cfg.disabled_tools, Some(vec!["blocked".to_string()])); } +#[test] +fn deserialize_server_config_with_parallel_tool_calls() { + let cfg: McpServerConfig = toml::from_str( + r#" + command = "echo" + supports_parallel_tool_calls = true + "#, + ) + .expect("should deserialize supports_parallel_tool_calls"); + + assert!(cfg.supports_parallel_tool_calls); +} + +#[test] +fn deserialize_server_config_with_default_tool_approval_mode() { + let cfg: McpServerConfig = toml::from_str( + r#" + command = "echo" + default_tools_approval_mode = "approve" + + [tools.search] + approval_mode = "prompt" + "#, + ) + .expect("should deserialize default tool approval mode"); + + assert_eq!( + cfg.default_tools_approval_mode, + Some(AppToolApproval::Approve) + ); + assert_eq!( + cfg.tools.get("search"), + Some(&McpServerToolConfig { + approval_mode: Some(AppToolApproval::Prompt), + }) + ); + + let serialized = toml::to_string(&cfg).expect("should serialize MCP config"); + assert!(serialized.contains("default_tools_approval_mode = \"approve\"")); + + let round_tripped: McpServerConfig = + toml::from_str(&serialized).expect("should deserialize serialized MCP config"); + assert_eq!(round_tripped, cfg); +} + +#[test] +fn serialize_round_trips_server_config_with_parallel_tool_calls() { + let cfg: McpServerConfig = toml::from_str( + r#" + command = "echo" + supports_parallel_tool_calls = true + tool_timeout_sec = 2.0 + "#, + ) + .expect("should deserialize supports_parallel_tool_calls"); + + let serialized = toml::to_string(&cfg).expect("should serialize MCP config"); + assert!(serialized.contains("supports_parallel_tool_calls = true")); + + let round_tripped: McpServerConfig = + toml::from_str(&serialized).expect("should deserialize serialized MCP config"); + assert_eq!(round_tripped, cfg); +} + #[test] fn deserialize_ignores_unknown_server_fields() { let cfg: McpServerConfig = toml::from_str( @@ -265,11 +329,14 @@ fn deserialize_ignores_unknown_server_fields() { env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, diff --git a/codex-rs/config/src/permissions_toml.rs b/codex-rs/config/src/permissions_toml.rs index fcc3e006b1..cee68d7abb 100644 --- a/codex-rs/config/src/permissions_toml.rs +++ b/codex-rs/config/src/permissions_toml.rs @@ -31,6 +31,10 @@ pub struct PermissionProfileToml { #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] pub struct FilesystemPermissionsToml { + /// Optional maximum depth for expanding unreadable glob patterns on + /// platforms that snapshot glob matches before sandbox startup. + #[schemars(range(min = 1))] + pub glob_scan_max_depth: Option, #[serde(flatten)] pub entries: BTreeMap, } diff --git a/codex-rs/config/src/shell_environment.rs b/codex-rs/config/src/shell_environment.rs new file mode 100644 index 0000000000..80fe0da426 --- /dev/null +++ b/codex-rs/config/src/shell_environment.rs @@ -0,0 +1,123 @@ +use crate::types::EnvironmentVariablePattern; +use crate::types::ShellEnvironmentPolicy; +use crate::types::ShellEnvironmentPolicyInherit; +use std::collections::HashMap; +use std::collections::HashSet; + +pub const CODEX_THREAD_ID_ENV_VAR: &str = "CODEX_THREAD_ID"; + +/// Construct a shell environment from the supplied process environment and +/// shell-environment policy. +pub fn create_env( + policy: &ShellEnvironmentPolicy, + thread_id: Option<&str>, +) -> HashMap { + create_env_from_vars(std::env::vars(), policy, thread_id) +} + +pub fn create_env_from_vars( + vars: I, + policy: &ShellEnvironmentPolicy, + thread_id: Option<&str>, +) -> HashMap +where + I: IntoIterator, +{ + let mut env_map = populate_env(vars, policy, thread_id); + + if cfg!(target_os = "windows") { + // This is a workaround to address the failures we are seeing in the + // following tests when run via Bazel on Windows: + // + // ``` + // suite::shell_command::unicode_output::with_login + // suite::shell_command::unicode_output::without_login + // ``` + // + // Currently, we can only reproduce these failures in CI, which makes + // iteration times long, so we include this quick fix for now to unblock + // getting the Windows Bazel build running. + if !env_map.keys().any(|k| k.eq_ignore_ascii_case("PATHEXT")) { + env_map.insert("PATHEXT".to_string(), ".COM;.EXE;.BAT;.CMD".to_string()); + } + } + env_map +} + +pub fn populate_env( + vars: I, + policy: &ShellEnvironmentPolicy, + thread_id: Option<&str>, +) -> HashMap +where + I: IntoIterator, +{ + // Step 1 - determine the starting set of variables based on the + // `inherit` strategy. + let mut env_map: HashMap = match policy.inherit { + ShellEnvironmentPolicyInherit::All => vars.into_iter().collect(), + ShellEnvironmentPolicyInherit::None => HashMap::new(), + ShellEnvironmentPolicyInherit::Core => { + let core_vars: HashSet<&str> = COMMON_CORE_VARS + .iter() + .copied() + .chain(PLATFORM_CORE_VARS.iter().copied()) + .collect(); + let is_core_var = |name: &str| { + if cfg!(target_os = "windows") { + core_vars + .iter() + .any(|allowed| allowed.eq_ignore_ascii_case(name)) + } else { + core_vars.contains(name) + } + }; + vars.into_iter().filter(|(k, _)| is_core_var(k)).collect() + } + }; + + // Internal helper - does `name` match any pattern in `patterns`? + let matches_any = |name: &str, patterns: &[EnvironmentVariablePattern]| -> bool { + patterns.iter().any(|pattern| pattern.matches(name)) + }; + + // Step 2 - Apply the default exclude if not disabled. + if !policy.ignore_default_excludes { + let default_excludes = vec![ + EnvironmentVariablePattern::new_case_insensitive("*KEY*"), + EnvironmentVariablePattern::new_case_insensitive("*SECRET*"), + EnvironmentVariablePattern::new_case_insensitive("*TOKEN*"), + ]; + env_map.retain(|k, _| !matches_any(k, &default_excludes)); + } + + // Step 3 - Apply custom excludes. + if !policy.exclude.is_empty() { + env_map.retain(|k, _| !matches_any(k, &policy.exclude)); + } + + // Step 4 - Apply user-provided overrides. + for (key, val) in &policy.r#set { + env_map.insert(key.clone(), val.clone()); + } + + // Step 5 - If include_only is non-empty, keep only the matching vars. + if !policy.include_only.is_empty() { + env_map.retain(|k, _| matches_any(k, &policy.include_only)); + } + + // Step 6 - Populate the thread ID environment variable when provided. + if let Some(thread_id) = thread_id { + env_map.insert(CODEX_THREAD_ID_ENV_VAR.to_string(), thread_id.to_string()); + } + + env_map +} + +const COMMON_CORE_VARS: &[&str] = &["PATH", "SHELL", "TMPDIR", "TEMP", "TMP"]; + +#[cfg(target_os = "windows")] +const PLATFORM_CORE_VARS: &[&str] = &["PATHEXT", "USERNAME", "USERPROFILE"]; + +#[cfg(unix)] +const PLATFORM_CORE_VARS: &[&str] = &["HOME", "LANG", "LC_ALL", "LC_CTYPE", "LOGNAME", "USER"]; diff --git a/codex-rs/config/src/types.rs b/codex-rs/config/src/types.rs index a1880c3be1..b3576cb3c1 100644 --- a/codex-rs/config/src/types.rs +++ b/codex-rs/config/src/types.rs @@ -31,6 +31,10 @@ pub const DEFAULT_MEMORIES_MAX_ROLLOUT_AGE_DAYS: i64 = 30; pub const DEFAULT_MEMORIES_MIN_ROLLOUT_IDLE_HOURS: i64 = 6; pub const DEFAULT_MEMORIES_MAX_RAW_MEMORIES_FOR_CONSOLIDATION: usize = 256; pub const DEFAULT_MEMORIES_MAX_UNUSED_DAYS: i64 = 30; +const MIN_MEMORIES_MAX_RAW_MEMORIES_FOR_CONSOLIDATION: usize = 1; +const MAX_MEMORIES_MAX_RAW_MEMORIES_FOR_CONSOLIDATION: usize = 4096; +const MIN_MEMORIES_MAX_ROLLOUTS_PER_STARTUP: usize = 1; +const MAX_MEMORIES_MAX_ROLLOUTS_PER_STARTUP: usize = 128; const fn default_enabled() -> bool { true @@ -185,12 +189,14 @@ pub struct MemoriesToml { /// When `false`, skip injecting memory usage instructions into developer prompts. pub use_memories: Option, /// Maximum number of recent raw memories retained for global consolidation. + #[schemars(range(min = 1, max = 4096))] pub max_raw_memories_for_consolidation: Option, /// Maximum number of days since a memory was last used before it becomes ineligible for phase 2 selection. pub max_unused_days: Option, /// Maximum age of the threads used for memories. pub max_rollout_age_days: Option, /// Maximum number of rollout candidates processed per pass. + #[schemars(range(min = 1, max = 128))] pub max_rollouts_per_startup: Option, /// Minimum idle time between last thread activity and memory creation (hours). > 12h recommended. pub min_rollout_idle_hours: Option, @@ -244,7 +250,10 @@ impl From for MemoriesConfig { max_raw_memories_for_consolidation: toml .max_raw_memories_for_consolidation .unwrap_or(defaults.max_raw_memories_for_consolidation) - .min(4096), + .clamp( + MIN_MEMORIES_MAX_RAW_MEMORIES_FOR_CONSOLIDATION, + MAX_MEMORIES_MAX_RAW_MEMORIES_FOR_CONSOLIDATION, + ), max_unused_days: toml .max_unused_days .unwrap_or(defaults.max_unused_days) @@ -256,7 +265,10 @@ impl From for MemoriesConfig { max_rollouts_per_startup: toml .max_rollouts_per_startup .unwrap_or(defaults.max_rollouts_per_startup) - .min(128), + .clamp( + MIN_MEMORIES_MAX_ROLLOUTS_PER_STARTUP, + MAX_MEMORIES_MAX_ROLLOUTS_PER_STARTUP, + ), min_rollout_idle_hours: toml .min_rollout_idle_hours .unwrap_or(defaults.min_rollout_idle_hours) @@ -614,6 +626,9 @@ pub struct MarketplaceConfig { /// Last time Codex successfully added or refreshed this marketplace. #[serde(default)] pub last_updated: Option, + /// Git revision Codex last successfully activated for this marketplace. + #[serde(default)] + pub last_revision: Option, /// Source kind used to install this marketplace. #[serde(default)] pub source_type: Option, @@ -632,6 +647,7 @@ pub struct MarketplaceConfig { #[serde(rename_all = "snake_case")] pub enum MarketplaceSourceType { Git, + Local, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)] @@ -658,7 +674,7 @@ impl From for codex_app_server_protocol::SandboxSettings } } -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default, JsonSchema)] #[serde(rename_all = "kebab-case")] pub enum ShellEnvironmentPolicyInherit { /// "Core" environment variables for the platform. On UNIX, this would diff --git a/codex-rs/config/src/types_tests.rs b/codex-rs/config/src/types_tests.rs index fbcd2bdc64..b18c1cc645 100644 --- a/codex-rs/config/src/types_tests.rs +++ b/codex-rs/config/src/types_tests.rs @@ -41,3 +41,21 @@ fn deserialize_skill_config_with_path_selector() { } ); } + +#[test] +fn memories_config_clamps_count_limits_to_nonzero_values() { + let config = MemoriesConfig::from(MemoriesToml { + max_raw_memories_for_consolidation: Some(0), + max_rollouts_per_startup: Some(0), + ..Default::default() + }); + + assert_eq!( + config, + MemoriesConfig { + max_raw_memories_for_consolidation: 1, + max_rollouts_per_startup: 1, + ..MemoriesConfig::default() + } + ); +} diff --git a/codex-rs/connectors/src/accessible.rs b/codex-rs/connectors/src/accessible.rs new file mode 100644 index 0000000000..c44f8d8a38 --- /dev/null +++ b/codex-rs/connectors/src/accessible.rs @@ -0,0 +1,76 @@ +use std::collections::BTreeSet; +use std::collections::HashMap; + +use crate::metadata::connector_install_url; +use crate::normalize_connector_value; +use codex_app_server_protocol::AppInfo; + +pub struct AccessibleConnectorTool { + pub connector_id: String, + pub connector_name: Option, + pub connector_description: Option, + pub plugin_display_names: Vec, +} + +pub fn collect_accessible_connectors(tools: I) -> Vec +where + I: IntoIterator, +{ + let mut connectors: HashMap)> = HashMap::new(); + for tool in tools { + let connector_id = tool.connector_id; + let connector_name = normalize_connector_value(tool.connector_name.as_deref()) + .unwrap_or_else(|| connector_id.clone()); + let connector_description = + normalize_connector_value(tool.connector_description.as_deref()); + if let Some((existing, existing_plugin_display_names)) = connectors.get_mut(&connector_id) { + if existing.name == connector_id && connector_name != connector_id { + existing.name = connector_name; + } + if existing.description.is_none() && connector_description.is_some() { + existing.description = connector_description; + } + existing_plugin_display_names.extend(tool.plugin_display_names); + } else { + connectors.insert( + connector_id.clone(), + ( + AppInfo { + id: connector_id.clone(), + name: connector_name, + description: connector_description, + logo_url: None, + logo_url_dark: None, + distribution_channel: None, + branding: None, + app_metadata: None, + labels: None, + install_url: None, + is_accessible: true, + is_enabled: true, + plugin_display_names: Vec::new(), + }, + tool.plugin_display_names + .into_iter() + .collect::>(), + ), + ); + } + } + let mut accessible: Vec = connectors + .into_values() + .map(|(mut connector, plugin_display_names)| { + connector.plugin_display_names = plugin_display_names.into_iter().collect(); + connector.install_url = Some(connector_install_url(&connector.name, &connector.id)); + connector + }) + .collect(); + accessible.sort_by(|left, right| { + right + .is_accessible + .cmp(&left.is_accessible) + .then_with(|| left.name.cmp(&right.name)) + .then_with(|| left.id.cmp(&right.id)) + }); + accessible +} diff --git a/codex-rs/connectors/src/filter.rs b/codex-rs/connectors/src/filter.rs new file mode 100644 index 0000000000..82c334f82d --- /dev/null +++ b/codex-rs/connectors/src/filter.rs @@ -0,0 +1,68 @@ +use std::collections::HashSet; + +use codex_app_server_protocol::AppInfo; + +pub fn filter_tool_suggest_discoverable_connectors( + directory_connectors: Vec, + accessible_connectors: &[AppInfo], + discoverable_connector_ids: &HashSet, + originator_value: &str, +) -> Vec { + let accessible_connector_ids: HashSet<&str> = accessible_connectors + .iter() + .filter(|connector| connector.is_accessible) + .map(|connector| connector.id.as_str()) + .collect(); + + let mut connectors = filter_disallowed_connectors(directory_connectors, originator_value) + .into_iter() + .filter(|connector| !accessible_connector_ids.contains(connector.id.as_str())) + .filter(|connector| discoverable_connector_ids.contains(connector.id.as_str())) + .collect::>(); + connectors.sort_by(|left, right| { + left.name + .cmp(&right.name) + .then_with(|| left.id.cmp(&right.id)) + }); + connectors +} + +const DISALLOWED_CONNECTOR_IDS: &[&str] = &[ + "asdk_app_6938a94a61d881918ef32cb999ff937c", + "connector_2b0a9009c9c64bf9933a3dae3f2b1254", + "connector_3f8d1a79f27c4c7ba1a897ab13bf37dc", + "connector_68de829bf7648191acd70a907364c67c", + "connector_68e004f14af881919eb50893d3d9f523", + "connector_69272cb413a081919685ec3c88d1744e", +]; +const FIRST_PARTY_CHAT_DISALLOWED_CONNECTOR_IDS: &[&str] = + &["connector_0f9c9d4592e54d0a9a12b3f44a1e2010"]; +const DISALLOWED_CONNECTOR_PREFIX: &str = "connector_openai_"; + +pub fn filter_disallowed_connectors( + connectors: Vec, + originator_value: &str, +) -> Vec { + let first_party_chat_originator = is_first_party_chat_originator(originator_value); + connectors + .into_iter() + .filter(|connector| { + is_connector_id_allowed(connector.id.as_str(), first_party_chat_originator) + }) + .collect() +} + +fn is_first_party_chat_originator(originator_value: &str) -> bool { + originator_value == "codex_atlas" || originator_value == "codex_chatgpt_desktop" +} + +fn is_connector_id_allowed(connector_id: &str, first_party_chat_originator: bool) -> bool { + let disallowed_connector_ids = if first_party_chat_originator { + FIRST_PARTY_CHAT_DISALLOWED_CONNECTOR_IDS + } else { + DISALLOWED_CONNECTOR_IDS + }; + + !connector_id.starts_with(DISALLOWED_CONNECTOR_PREFIX) + && !disallowed_connector_ids.contains(&connector_id) +} diff --git a/codex-rs/connectors/src/lib.rs b/codex-rs/connectors/src/lib.rs index a74ece5a35..d7c5fb405c 100644 --- a/codex-rs/connectors/src/lib.rs +++ b/codex-rs/connectors/src/lib.rs @@ -10,6 +10,11 @@ use codex_app_server_protocol::AppInfo; use codex_app_server_protocol::AppMetadata; use serde::Deserialize; +pub mod accessible; +pub mod filter; +pub mod merge; +pub mod metadata; + pub const CONNECTORS_CACHE_TTL: Duration = Duration::from_secs(3600); #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/codex-rs/connectors/src/merge.rs b/codex-rs/connectors/src/merge.rs new file mode 100644 index 0000000000..b41ee63add --- /dev/null +++ b/codex-rs/connectors/src/merge.rs @@ -0,0 +1,119 @@ +use std::collections::HashMap; +use std::collections::HashSet; + +use crate::metadata::connector_install_url; +use crate::metadata::sort_connectors_by_accessibility_and_name; +use codex_app_server_protocol::AppInfo; + +pub fn merge_connectors( + connectors: Vec, + accessible_connectors: Vec, +) -> Vec { + let mut merged: HashMap = connectors + .into_iter() + .map(|mut connector| { + connector.is_accessible = false; + (connector.id.clone(), connector) + }) + .collect(); + + for mut connector in accessible_connectors { + connector.is_accessible = true; + let connector_id = connector.id.clone(); + if let Some(existing) = merged.get_mut(&connector_id) { + existing.is_accessible = true; + if existing.name == existing.id && connector.name != connector.id { + existing.name = connector.name; + } + if existing.description.is_none() && connector.description.is_some() { + existing.description = connector.description; + } + if existing.logo_url.is_none() && connector.logo_url.is_some() { + existing.logo_url = connector.logo_url; + } + if existing.logo_url_dark.is_none() && connector.logo_url_dark.is_some() { + existing.logo_url_dark = connector.logo_url_dark; + } + if existing.distribution_channel.is_none() && connector.distribution_channel.is_some() { + existing.distribution_channel = connector.distribution_channel; + } + existing + .plugin_display_names + .extend(connector.plugin_display_names); + } else { + merged.insert(connector_id, connector); + } + } + + let mut merged = merged.into_values().collect::>(); + for connector in &mut merged { + if connector.install_url.is_none() { + connector.install_url = Some(connector_install_url(&connector.name, &connector.id)); + } + connector.plugin_display_names.sort_unstable(); + connector.plugin_display_names.dedup(); + } + sort_connectors_by_accessibility_and_name(&mut merged); + merged +} + +pub fn merge_plugin_connectors(connectors: Vec, plugin_app_ids: I) -> Vec +where + I: IntoIterator, +{ + let mut merged = connectors; + let mut connector_ids = merged + .iter() + .map(|connector| connector.id.clone()) + .collect::>(); + + for connector_id in plugin_app_ids { + if connector_ids.insert(connector_id.clone()) { + merged.push(plugin_connector_to_app_info(connector_id)); + } + } + + sort_connectors_by_accessibility_and_name(&mut merged); + merged +} + +pub fn merge_plugin_connectors_with_accessible( + plugin_app_ids: I, + accessible_connectors: Vec, +) -> Vec +where + I: IntoIterator, +{ + let accessible_connector_ids: HashSet<&str> = accessible_connectors + .iter() + .map(|connector| connector.id.as_str()) + .collect(); + let plugin_connectors = plugin_app_ids + .into_iter() + .filter(|connector_id| accessible_connector_ids.contains(connector_id.as_str())) + .map(plugin_connector_to_app_info) + .collect::>(); + merge_connectors(plugin_connectors, accessible_connectors) +} + +pub fn plugin_connector_to_app_info(connector_id: String) -> AppInfo { + // Leave the placeholder name as the connector id so merge_connectors() can + // replace it with canonical app metadata from directory fetches or + // connector_name values from codex_apps tool discovery. + let name = connector_id.clone(); + AppInfo { + id: connector_id.clone(), + name: name.clone(), + description: None, + logo_url: None, + logo_url_dark: None, + distribution_channel: None, + branding: None, + app_metadata: None, + labels: None, + install_url: Some(connector_install_url(&name, &connector_id)), + is_accessible: false, + is_enabled: true, + plugin_display_names: Vec::new(), + } +} diff --git a/codex-rs/connectors/src/metadata.rs b/codex-rs/connectors/src/metadata.rs new file mode 100644 index 0000000000..64becb766d --- /dev/null +++ b/codex-rs/connectors/src/metadata.rs @@ -0,0 +1,27 @@ +use codex_app_server_protocol::AppInfo; + +pub fn connector_display_label(connector: &AppInfo) -> String { + connector.name.clone() +} + +pub fn connector_mention_slug(connector: &AppInfo) -> String { + crate::connector_name_slug(&connector_display_label(connector)) +} + +pub fn connector_install_url(name: &str, connector_id: &str) -> String { + crate::connector_install_url(name, connector_id) +} + +pub fn sanitize_name(name: &str) -> String { + crate::connector_name_slug(name).replace("-", "_") +} + +pub(crate) fn sort_connectors_by_accessibility_and_name(connectors: &mut [AppInfo]) { + connectors.sort_by(|left, right| { + right + .is_accessible + .cmp(&left.is_accessible) + .then_with(|| left.name.cmp(&right.name)) + .then_with(|| left.id.cmp(&right.id)) + }); +} diff --git a/codex-rs/core-plugins/BUILD.bazel b/codex-rs/core-plugins/BUILD.bazel new file mode 100644 index 0000000000..aa19b9f368 --- /dev/null +++ b/codex-rs/core-plugins/BUILD.bazel @@ -0,0 +1,15 @@ +load("//:defs.bzl", "codex_rust_crate") + +codex_rust_crate( + name = "core-plugins", + crate_name = "codex_core_plugins", + compile_data = glob( + include = ["**"], + exclude = [ + "**/* *", + "BUILD.bazel", + "Cargo.toml", + ], + allow_empty = True, + ), +) diff --git a/codex-rs/core-plugins/Cargo.toml b/codex-rs/core-plugins/Cargo.toml new file mode 100644 index 0000000000..0372d9a14a --- /dev/null +++ b/codex-rs/core-plugins/Cargo.toml @@ -0,0 +1,40 @@ +[package] +edition.workspace = true +license.workspace = true +name = "codex-core-plugins" +version.workspace = true + +[lib] +doctest = false +name = "codex_core_plugins" +path = "src/lib.rs" + +[lints] +workspace = true + +[dependencies] +codex-app-server-protocol = { workspace = true } +codex-config = { workspace = true } +codex-core-skills = { workspace = true } +codex-exec-server = { workspace = true } +codex-git-utils = { workspace = true } +codex-login = { workspace = true } +codex-plugin = { workspace = true } +codex-protocol = { workspace = true } +codex-utils-absolute-path = { workspace = true } +codex-utils-plugins = { workspace = true } +chrono = { workspace = true } +dirs = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +tempfile = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["fs"] } +toml = { workspace = true } +tracing = { workspace = true } +url = { workspace = true } + +[dev-dependencies] +pretty_assertions = { workspace = true } +tempfile = { workspace = true } diff --git a/codex-rs/core-plugins/src/lib.rs b/codex-rs/core-plugins/src/lib.rs new file mode 100644 index 0000000000..82ff4df3c7 --- /dev/null +++ b/codex-rs/core-plugins/src/lib.rs @@ -0,0 +1,7 @@ +pub mod loader; +pub mod manifest; +pub mod marketplace; +pub mod marketplace_upgrade; +pub mod remote; +pub mod store; +pub mod toggles; diff --git a/codex-rs/core-plugins/src/loader.rs b/codex-rs/core-plugins/src/loader.rs new file mode 100644 index 0000000000..ff3b8fc494 --- /dev/null +++ b/codex-rs/core-plugins/src/loader.rs @@ -0,0 +1,838 @@ +use crate::manifest::PluginManifestPaths; +use crate::manifest::load_plugin_manifest; +use crate::marketplace::MarketplacePluginSource; +use crate::marketplace::list_marketplaces; +use crate::marketplace::load_marketplace; +use crate::store::PluginStore; +use crate::store::plugin_version_for_source; +use codex_config::ConfigLayerStack; +use codex_config::types::McpServerConfig; +use codex_config::types::PluginConfig; +use codex_core_skills::SkillMetadata; +use codex_core_skills::config_rules::SkillConfigRules; +use codex_core_skills::config_rules::resolve_disabled_skill_paths; +use codex_core_skills::config_rules::skill_config_rules_from_stack; +use codex_core_skills::loader::SkillRoot; +use codex_core_skills::loader::load_skills_from_roots; +use codex_exec_server::LOCAL_FS; +use codex_plugin::AppConnectorId; +use codex_plugin::LoadedPlugin; +use codex_plugin::PluginCapabilitySummary; +use codex_plugin::PluginId; +use codex_plugin::PluginIdError; +use codex_plugin::PluginLoadOutcome; +use codex_plugin::PluginTelemetryMetadata; +use codex_protocol::protocol::Product; +use codex_protocol::protocol::SkillScope; +use codex_utils_absolute_path::AbsolutePathBuf; +use serde::Deserialize; +use serde_json::Map as JsonMap; +use serde_json::Value as JsonValue; +use std::collections::HashMap; +use std::collections::HashSet; +use std::fs; +use std::path::Path; +use std::sync::Arc; +use tracing::warn; + +const DEFAULT_SKILLS_DIR_NAME: &str = "skills"; +const DEFAULT_MCP_CONFIG_FILE: &str = ".mcp.json"; +const DEFAULT_APP_CONFIG_FILE: &str = ".app.json"; +const OPENAI_CURATED_MARKETPLACE_NAME: &str = "openai-curated"; +const CONFIG_TOML_FILE: &str = "config.toml"; + +#[derive(Clone, Copy, PartialEq, Eq)] +enum NonCuratedCacheRefreshMode { + IfVersionChanged, + ForceReinstall, +} + +pub fn log_plugin_load_errors(outcome: &PluginLoadOutcome) { + for plugin in outcome + .plugins() + .iter() + .filter(|plugin| plugin.error.is_some()) + { + if let Some(error) = plugin.error.as_deref() { + warn!( + plugin = plugin.config_name, + path = %plugin.root.display(), + "failed to load plugin: {error}" + ); + } + } +} + +#[derive(Debug, Default, Deserialize)] +#[serde(rename_all = "camelCase")] +struct PluginMcpFile { + #[serde(default)] + mcp_servers: HashMap, +} + +#[derive(Debug, Default, Deserialize)] +#[serde(rename_all = "camelCase")] +struct PluginAppFile { + #[serde(default)] + apps: HashMap, +} + +#[derive(Debug, Default, Deserialize)] +struct PluginAppConfig { + id: String, +} + +pub async fn load_plugins_from_layer_stack( + config_layer_stack: &ConfigLayerStack, + store: &PluginStore, + restriction_product: Option, +) -> PluginLoadOutcome { + let skill_config_rules = skill_config_rules_from_stack(config_layer_stack); + let mut configured_plugins: Vec<_> = configured_plugins_from_stack(config_layer_stack) + .into_iter() + .collect(); + configured_plugins.sort_unstable_by(|(a, _), (b, _)| a.cmp(b)); + + let mut plugins = Vec::with_capacity(configured_plugins.len()); + let mut seen_mcp_server_names = HashMap::::new(); + for (configured_name, plugin) in configured_plugins { + let loaded_plugin = load_plugin( + configured_name.clone(), + &plugin, + store, + restriction_product, + &skill_config_rules, + ) + .await; + for name in loaded_plugin.mcp_servers.keys() { + if let Some(previous_plugin) = + seen_mcp_server_names.insert(name.clone(), configured_name.clone()) + { + warn!( + plugin = configured_name, + previous_plugin, + server = name, + "skipping duplicate plugin MCP server name" + ); + } + } + plugins.push(loaded_plugin); + } + + PluginLoadOutcome::from_plugins(plugins) +} + +pub fn refresh_curated_plugin_cache( + codex_home: &Path, + plugin_version: &str, + configured_curated_plugin_ids: &[PluginId], +) -> Result { + let store = PluginStore::new(codex_home.to_path_buf()); + let curated_marketplace_path = AbsolutePathBuf::try_from( + codex_home + .join(".tmp/plugins") + .join(".agents/plugins/marketplace.json"), + ) + .map_err(|_| "local curated marketplace is not available".to_string())?; + let curated_marketplace = load_marketplace(&curated_marketplace_path) + .map_err(|err| format!("failed to load curated marketplace for cache refresh: {err}"))?; + + let mut plugin_sources = HashMap::::new(); + for plugin in curated_marketplace.plugins { + let plugin_name = plugin.name; + if plugin_sources.contains_key(&plugin_name) { + warn!( + plugin = plugin_name, + marketplace = OPENAI_CURATED_MARKETPLACE_NAME, + "ignoring duplicate curated plugin entry during cache refresh" + ); + continue; + } + let source_path = match plugin.source { + MarketplacePluginSource::Local { path } => path, + }; + plugin_sources.insert(plugin_name, source_path); + } + + let mut cache_refreshed = false; + for plugin_id in configured_curated_plugin_ids { + if store.active_plugin_version(plugin_id).as_deref() == Some(plugin_version) { + continue; + } + + let Some(source_path) = plugin_sources.get(&plugin_id.plugin_name).cloned() else { + warn!( + plugin = plugin_id.plugin_name, + marketplace = OPENAI_CURATED_MARKETPLACE_NAME, + "configured curated plugin no longer exists in curated marketplace during cache refresh" + ); + continue; + }; + + store + .install_with_version(source_path, plugin_id.clone(), plugin_version.to_string()) + .map_err(|err| { + format!( + "failed to refresh curated plugin cache for {}: {err}", + plugin_id.as_key() + ) + })?; + cache_refreshed = true; + } + + Ok(cache_refreshed) +} + +pub fn refresh_non_curated_plugin_cache( + codex_home: &Path, + additional_roots: &[AbsolutePathBuf], +) -> Result { + refresh_non_curated_plugin_cache_with_mode( + codex_home, + additional_roots, + NonCuratedCacheRefreshMode::IfVersionChanged, + ) +} + +pub fn refresh_non_curated_plugin_cache_force_reinstall( + codex_home: &Path, + additional_roots: &[AbsolutePathBuf], +) -> Result { + refresh_non_curated_plugin_cache_with_mode( + codex_home, + additional_roots, + NonCuratedCacheRefreshMode::ForceReinstall, + ) +} + +fn refresh_non_curated_plugin_cache_with_mode( + codex_home: &Path, + additional_roots: &[AbsolutePathBuf], + mode: NonCuratedCacheRefreshMode, +) -> Result { + let configured_non_curated_plugin_ids = + non_curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( + codex_home, + "failed to read user config while refreshing non-curated plugin cache", + "failed to parse user config while refreshing non-curated plugin cache", + )); + if configured_non_curated_plugin_ids.is_empty() { + return Ok(false); + } + let configured_non_curated_plugin_keys = configured_non_curated_plugin_ids + .iter() + .map(PluginId::as_key) + .collect::>(); + + let store = PluginStore::new(codex_home.to_path_buf()); + let marketplace_outcome = list_marketplaces(additional_roots) + .map_err(|err| format!("failed to discover marketplaces for cache refresh: {err}"))?; + let mut plugin_sources = HashMap::::new(); + + for marketplace in marketplace_outcome.marketplaces { + if marketplace.name == OPENAI_CURATED_MARKETPLACE_NAME { + continue; + } + + for plugin in marketplace.plugins { + let plugin_id = + PluginId::new(plugin.name.clone(), marketplace.name.clone()).map_err(|err| { + match err { + PluginIdError::Invalid(message) => { + format!("failed to prepare non-curated plugin cache refresh: {message}") + } + } + })?; + let plugin_key = plugin_id.as_key(); + if !configured_non_curated_plugin_keys.contains(&plugin_key) { + continue; + } + if plugin_sources.contains_key(&plugin_key) { + warn!( + plugin = plugin.name, + marketplace = marketplace.name, + "ignoring duplicate non-curated plugin entry during cache refresh" + ); + continue; + } + + let source_path = match plugin.source { + MarketplacePluginSource::Local { path } => path, + }; + let plugin_version = plugin_version_for_source(source_path.as_path()) + .map_err(|err| format!("failed to read plugin version for {plugin_key}: {err}"))?; + plugin_sources.insert(plugin_key, (source_path, plugin_version)); + } + } + + let mut cache_refreshed = false; + for plugin_id in configured_non_curated_plugin_ids { + let plugin_key = plugin_id.as_key(); + let Some((source_path, plugin_version)) = plugin_sources.get(&plugin_key).cloned() else { + warn!( + plugin = plugin_id.plugin_name, + marketplace = plugin_id.marketplace_name, + "configured non-curated plugin no longer exists in discovered marketplaces during cache refresh" + ); + continue; + }; + + if mode == NonCuratedCacheRefreshMode::IfVersionChanged + && store.active_plugin_version(&plugin_id).as_deref() == Some(plugin_version.as_str()) + { + continue; + } + + store + .install_with_version(source_path, plugin_id.clone(), plugin_version) + .map_err(|err| format!("failed to refresh plugin cache for {plugin_key}: {err}"))?; + cache_refreshed = true; + } + + Ok(cache_refreshed) +} + +fn configured_plugins_from_stack( + config_layer_stack: &ConfigLayerStack, +) -> HashMap { + let Some(user_layer) = config_layer_stack.get_user_layer() else { + return HashMap::new(); + }; + configured_plugins_from_user_config_value(&user_layer.config) +} + +fn configured_plugins_from_user_config_value( + user_config: &toml::Value, +) -> HashMap { + let Some(plugins_value) = user_config.get("plugins") else { + return HashMap::new(); + }; + match plugins_value.clone().try_into() { + Ok(plugins) => plugins, + Err(err) => { + warn!("invalid plugins config: {err}"); + HashMap::new() + } + } +} + +fn configured_plugins_from_codex_home( + codex_home: &Path, + read_error_message: &str, + parse_error_message: &str, +) -> HashMap { + let config_path = codex_home.join(CONFIG_TOML_FILE); + let user_config = match fs::read_to_string(&config_path) { + Ok(user_config) => user_config, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => return HashMap::new(), + Err(err) => { + warn!( + path = %config_path.display(), + error = %err, + "{read_error_message}" + ); + return HashMap::new(); + } + }; + + let user_config = match toml::from_str::(&user_config) { + Ok(user_config) => user_config, + Err(err) => { + warn!( + path = %config_path.display(), + error = %err, + "{parse_error_message}" + ); + return HashMap::new(); + } + }; + + configured_plugins_from_user_config_value(&user_config) +} + +fn configured_plugin_ids( + configured_plugins: HashMap, + invalid_plugin_key_message: &str, +) -> Vec { + configured_plugins + .into_keys() + .filter_map(|plugin_key| match PluginId::parse(&plugin_key) { + Ok(plugin_id) => Some(plugin_id), + Err(err) => { + warn!( + plugin_key, + error = %err, + "{invalid_plugin_key_message}" + ); + None + } + }) + .collect() +} + +fn curated_plugin_ids_from_config_keys( + configured_plugins: HashMap, +) -> Vec { + let mut configured_curated_plugin_ids = configured_plugin_ids( + configured_plugins, + "ignoring invalid configured plugin key during curated sync setup", + ) + .into_iter() + .filter(|plugin_id| plugin_id.marketplace_name == OPENAI_CURATED_MARKETPLACE_NAME) + .collect::>(); + configured_curated_plugin_ids.sort_unstable_by_key(PluginId::as_key); + configured_curated_plugin_ids +} + +fn non_curated_plugin_ids_from_config_keys( + configured_plugins: HashMap, +) -> Vec { + let mut configured_non_curated_plugin_ids = configured_plugin_ids( + configured_plugins, + "ignoring invalid plugin key during non-curated cache refresh setup", + ) + .into_iter() + .filter(|plugin_id| plugin_id.marketplace_name != OPENAI_CURATED_MARKETPLACE_NAME) + .collect::>(); + configured_non_curated_plugin_ids.sort_unstable_by_key(PluginId::as_key); + configured_non_curated_plugin_ids +} + +pub fn configured_curated_plugin_ids_from_codex_home(codex_home: &Path) -> Vec { + curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( + codex_home, + "failed to read user config while refreshing curated plugin cache", + "failed to parse user config while refreshing curated plugin cache", + )) +} + +async fn load_plugin( + config_name: String, + plugin: &PluginConfig, + store: &PluginStore, + restriction_product: Option, + skill_config_rules: &SkillConfigRules, +) -> LoadedPlugin { + let plugin_id = PluginId::parse(&config_name); + let active_plugin_root = plugin_id + .as_ref() + .ok() + .and_then(|plugin_id| store.active_plugin_root(plugin_id)); + let root = active_plugin_root + .clone() + .unwrap_or_else(|| match &plugin_id { + Ok(plugin_id) => store.plugin_base_root(plugin_id), + Err(_) => store.root().clone(), + }); + let mut loaded_plugin = LoadedPlugin { + config_name, + manifest_name: None, + manifest_description: None, + root, + enabled: plugin.enabled, + skill_roots: Vec::new(), + disabled_skill_paths: HashSet::new(), + has_enabled_skills: false, + mcp_servers: HashMap::new(), + apps: Vec::new(), + error: None, + }; + + if !plugin.enabled { + return loaded_plugin; + } + + let plugin_root = match plugin_id { + Ok(_) => match active_plugin_root { + Some(plugin_root) => plugin_root, + None => { + loaded_plugin.error = Some("plugin is not installed".to_string()); + return loaded_plugin; + } + }, + Err(err) => { + loaded_plugin.error = Some(err.to_string()); + return loaded_plugin; + } + }; + + if !plugin_root.as_path().is_dir() { + loaded_plugin.error = Some("path does not exist or is not a directory".to_string()); + return loaded_plugin; + } + + let Some(manifest) = load_plugin_manifest(plugin_root.as_path()) else { + loaded_plugin.error = Some("missing or invalid .codex-plugin/plugin.json".to_string()); + return loaded_plugin; + }; + + let manifest_paths = &manifest.paths; + loaded_plugin.manifest_name = manifest + .interface + .as_ref() + .and_then(|interface| interface.display_name.as_deref()) + .map(str::trim) + .filter(|display_name| !display_name.is_empty()) + .map(str::to_string) + .or_else(|| Some(manifest.name.clone())); + loaded_plugin.manifest_description = manifest.description.clone(); + loaded_plugin.skill_roots = plugin_skill_roots(&plugin_root, manifest_paths); + let resolved_skills = load_plugin_skills( + &plugin_root, + manifest_paths, + restriction_product, + skill_config_rules, + ) + .await; + let has_enabled_skills = resolved_skills.has_enabled_skills(); + loaded_plugin.disabled_skill_paths = resolved_skills.disabled_skill_paths; + loaded_plugin.has_enabled_skills = has_enabled_skills; + let mut mcp_servers = HashMap::new(); + for mcp_config_path in plugin_mcp_config_paths(plugin_root.as_path(), manifest_paths) { + let plugin_mcp = load_mcp_servers_from_file(plugin_root.as_path(), &mcp_config_path).await; + for (name, config) in plugin_mcp.mcp_servers { + if mcp_servers.insert(name.clone(), config).is_some() { + warn!( + plugin = %plugin_root.display(), + path = %mcp_config_path.display(), + server = name, + "plugin MCP file overwrote an earlier server definition" + ); + } + } + } + loaded_plugin.mcp_servers = mcp_servers; + loaded_plugin.apps = load_plugin_apps(plugin_root.as_path()).await; + loaded_plugin +} + +#[derive(Debug, Clone)] +pub struct ResolvedPluginSkills { + pub skills: Vec, + pub disabled_skill_paths: HashSet, + pub had_errors: bool, +} + +impl ResolvedPluginSkills { + pub fn has_enabled_skills(&self) -> bool { + self.had_errors + || self + .skills + .iter() + .any(|skill| !self.disabled_skill_paths.contains(&skill.path_to_skills_md)) + } +} + +pub async fn load_plugin_skills( + plugin_root: &AbsolutePathBuf, + manifest_paths: &PluginManifestPaths, + restriction_product: Option, + skill_config_rules: &SkillConfigRules, +) -> ResolvedPluginSkills { + let roots = plugin_skill_roots(plugin_root, manifest_paths) + .into_iter() + .map(|path| SkillRoot { + path, + scope: SkillScope::User, + file_system: Arc::clone(&LOCAL_FS), + }) + .collect::>(); + let outcome = load_skills_from_roots(roots).await; + let had_errors = !outcome.errors.is_empty(); + let skills = outcome + .skills + .into_iter() + .filter(|skill| skill.matches_product_restriction_for_product(restriction_product)) + .collect::>(); + let disabled_skill_paths = resolve_disabled_skill_paths(&skills, skill_config_rules); + + ResolvedPluginSkills { + skills, + disabled_skill_paths, + had_errors, + } +} + +fn plugin_skill_roots( + plugin_root: &AbsolutePathBuf, + manifest_paths: &PluginManifestPaths, +) -> Vec { + let mut paths = default_skill_roots(plugin_root); + if let Some(path) = &manifest_paths.skills { + paths.push(path.clone()); + } + paths.sort_unstable(); + paths.dedup(); + paths +} + +fn default_skill_roots(plugin_root: &AbsolutePathBuf) -> Vec { + let skills_dir = plugin_root.join(DEFAULT_SKILLS_DIR_NAME); + if skills_dir.is_dir() { + vec![skills_dir] + } else { + Vec::new() + } +} + +fn plugin_mcp_config_paths( + plugin_root: &Path, + manifest_paths: &PluginManifestPaths, +) -> Vec { + if let Some(path) = &manifest_paths.mcp_servers { + return vec![path.clone()]; + } + default_mcp_config_paths(plugin_root) +} + +fn default_mcp_config_paths(plugin_root: &Path) -> Vec { + let mut paths = Vec::new(); + let default_path = plugin_root.join(DEFAULT_MCP_CONFIG_FILE); + if default_path.is_file() + && let Ok(default_path) = AbsolutePathBuf::try_from(default_path) + { + paths.push(default_path); + } + paths.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); + paths.dedup_by(|left, right| left.as_path() == right.as_path()); + paths +} + +pub async fn load_plugin_apps(plugin_root: &Path) -> Vec { + if let Some(manifest) = load_plugin_manifest(plugin_root) { + return load_apps_from_paths( + plugin_root, + plugin_app_config_paths(plugin_root, &manifest.paths), + ) + .await; + } + load_apps_from_paths(plugin_root, default_app_config_paths(plugin_root)).await +} + +fn plugin_app_config_paths( + plugin_root: &Path, + manifest_paths: &PluginManifestPaths, +) -> Vec { + if let Some(path) = &manifest_paths.apps { + return vec![path.clone()]; + } + default_app_config_paths(plugin_root) +} + +fn default_app_config_paths(plugin_root: &Path) -> Vec { + let mut paths = Vec::new(); + let default_path = plugin_root.join(DEFAULT_APP_CONFIG_FILE); + if default_path.is_file() + && let Ok(default_path) = AbsolutePathBuf::try_from(default_path) + { + paths.push(default_path); + } + paths.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); + paths.dedup_by(|left, right| left.as_path() == right.as_path()); + paths +} + +async fn load_apps_from_paths( + plugin_root: &Path, + app_config_paths: Vec, +) -> Vec { + let mut connector_ids = Vec::new(); + for app_config_path in app_config_paths { + let Ok(contents) = tokio::fs::read_to_string(app_config_path.as_path()).await else { + continue; + }; + let parsed = match serde_json::from_str::(&contents) { + Ok(parsed) => parsed, + Err(err) => { + warn!( + path = %app_config_path.display(), + "failed to parse plugin app config: {err}" + ); + continue; + } + }; + + let mut apps: Vec = parsed.apps.into_values().collect(); + apps.sort_unstable_by(|left, right| left.id.cmp(&right.id)); + + connector_ids.extend(apps.into_iter().filter_map(|app| { + if app.id.trim().is_empty() { + warn!( + plugin = %plugin_root.display(), + "plugin app config is missing an app id" + ); + None + } else { + Some(AppConnectorId(app.id)) + } + })); + } + connector_ids.dedup(); + connector_ids +} + +pub async fn plugin_telemetry_metadata_from_root( + plugin_id: &PluginId, + plugin_root: &AbsolutePathBuf, +) -> PluginTelemetryMetadata { + let Some(manifest) = load_plugin_manifest(plugin_root.as_path()) else { + return PluginTelemetryMetadata::from_plugin_id(plugin_id); + }; + + let manifest_paths = &manifest.paths; + let has_skills = !plugin_skill_roots(plugin_root, manifest_paths).is_empty(); + let mut mcp_server_names = Vec::new(); + for path in plugin_mcp_config_paths(plugin_root.as_path(), manifest_paths) { + mcp_server_names.extend( + load_mcp_servers_from_file(plugin_root.as_path(), &path) + .await + .mcp_servers + .into_keys(), + ); + } + mcp_server_names.sort_unstable(); + mcp_server_names.dedup(); + + PluginTelemetryMetadata { + plugin_id: plugin_id.clone(), + capability_summary: Some(PluginCapabilitySummary { + config_name: plugin_id.as_key(), + display_name: plugin_id.plugin_name.clone(), + description: None, + has_skills, + mcp_server_names, + app_connector_ids: load_apps_from_paths( + plugin_root.as_path(), + plugin_app_config_paths(plugin_root.as_path(), manifest_paths), + ) + .await, + }), + } +} + +pub async fn load_plugin_mcp_servers(plugin_root: &Path) -> HashMap { + let Some(manifest) = load_plugin_manifest(plugin_root) else { + return HashMap::new(); + }; + + let mut mcp_servers = HashMap::new(); + for mcp_config_path in plugin_mcp_config_paths(plugin_root, &manifest.paths) { + let plugin_mcp = load_mcp_servers_from_file(plugin_root, &mcp_config_path).await; + for (name, config) in plugin_mcp.mcp_servers { + mcp_servers.entry(name).or_insert(config); + } + } + + mcp_servers +} + +pub async fn installed_plugin_telemetry_metadata( + codex_home: &Path, + plugin_id: &PluginId, +) -> PluginTelemetryMetadata { + let store = PluginStore::new(codex_home.to_path_buf()); + let Some(plugin_root) = store.active_plugin_root(plugin_id) else { + return PluginTelemetryMetadata::from_plugin_id(plugin_id); + }; + + plugin_telemetry_metadata_from_root(plugin_id, &plugin_root).await +} + +async fn load_mcp_servers_from_file( + plugin_root: &Path, + mcp_config_path: &AbsolutePathBuf, +) -> PluginMcpDiscovery { + let Ok(contents) = tokio::fs::read_to_string(mcp_config_path.as_path()).await else { + return PluginMcpDiscovery::default(); + }; + let parsed = match serde_json::from_str::(&contents) { + Ok(parsed) => parsed, + Err(err) => { + warn!( + path = %mcp_config_path.display(), + "failed to parse plugin MCP config: {err}" + ); + return PluginMcpDiscovery::default(); + } + }; + normalize_plugin_mcp_servers( + plugin_root, + parsed.mcp_servers, + mcp_config_path.to_string_lossy().as_ref(), + ) +} + +fn normalize_plugin_mcp_servers( + plugin_root: &Path, + plugin_mcp_servers: HashMap, + source: &str, +) -> PluginMcpDiscovery { + let mut mcp_servers = HashMap::new(); + + for (name, config_value) in plugin_mcp_servers { + let normalized = normalize_plugin_mcp_server_value(plugin_root, config_value); + match serde_json::from_value::(JsonValue::Object(normalized)) { + Ok(config) => { + mcp_servers.insert(name, config); + } + Err(err) => { + warn!( + plugin = %plugin_root.display(), + server = name, + "failed to parse plugin MCP server from {source}: {err}" + ); + } + } + } + + PluginMcpDiscovery { mcp_servers } +} + +fn normalize_plugin_mcp_server_value( + plugin_root: &Path, + value: JsonValue, +) -> JsonMap { + let mut object = match value { + JsonValue::Object(object) => object, + _ => return JsonMap::new(), + }; + + if let Some(JsonValue::String(transport_type)) = object.remove("type") { + match transport_type.as_str() { + "http" | "streamable_http" | "streamable-http" => {} + "stdio" => {} + other => { + warn!( + plugin = %plugin_root.display(), + transport = other, + "plugin MCP server uses an unknown transport type" + ); + } + } + } + + if let Some(JsonValue::Object(oauth)) = object.remove("oauth") + && oauth.contains_key("callbackPort") + { + warn!( + plugin = %plugin_root.display(), + "plugin MCP server OAuth callbackPort is ignored; Codex uses global MCP OAuth callback settings" + ); + } + + if let Some(JsonValue::String(cwd)) = object.get("cwd") + && !Path::new(cwd).is_absolute() + { + object.insert( + "cwd".to_string(), + JsonValue::String(plugin_root.join(cwd).display().to_string()), + ); + } + + object +} + +#[derive(Debug, Default)] +struct PluginMcpDiscovery { + mcp_servers: HashMap, +} diff --git a/codex-rs/core/src/plugins/manifest.rs b/codex-rs/core-plugins/src/manifest.rs similarity index 98% rename from codex-rs/core/src/plugins/manifest.rs rename to codex-rs/core-plugins/src/manifest.rs index f1253441bd..f58b882ca1 100644 --- a/codex-rs/core/src/plugins/manifest.rs +++ b/codex-rs/core-plugins/src/manifest.rs @@ -30,12 +30,12 @@ struct RawPluginManifest { } #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct PluginManifest { - pub(crate) name: String, - pub(crate) version: Option, - pub(crate) description: Option, - pub(crate) paths: PluginManifestPaths, - pub(crate) interface: Option, +pub struct PluginManifest { + pub name: String, + pub version: Option, + pub description: Option, + pub paths: PluginManifestPaths, + pub interface: Option, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -114,7 +114,7 @@ enum RawPluginManifestDefaultPromptEntry { Invalid(JsonValue), } -pub(crate) fn load_plugin_manifest(plugin_root: &Path) -> Option { +pub fn load_plugin_manifest(plugin_root: &Path) -> Option { let manifest_path = plugin_root.join(PLUGIN_MANIFEST_PATH); if !manifest_path.is_file() { return None; diff --git a/codex-rs/core/src/plugins/marketplace.rs b/codex-rs/core-plugins/src/marketplace.rs similarity index 61% rename from codex-rs/core/src/plugins/marketplace.rs rename to codex-rs/core-plugins/src/marketplace.rs index 3cb1756f80..79e5237ad0 100644 --- a/codex-rs/core/src/plugins/marketplace.rs +++ b/codex-rs/core-plugins/src/marketplace.rs @@ -1,5 +1,5 @@ -use super::PluginManifestInterface; -use super::load_plugin_manifest; +use crate::manifest::PluginManifestInterface; +use crate::manifest::load_plugin_manifest; use codex_app_server_protocol::PluginAuthPolicy; use codex_app_server_protocol::PluginInstallPolicy; use codex_git_utils::get_git_repo_root; @@ -9,6 +9,8 @@ use codex_protocol::protocol::Product; use codex_utils_absolute_path::AbsolutePathBuf; use dirs::home_dir; use serde::Deserialize; +use serde::Deserializer; +use serde_json::Value as JsonValue; use std::fs; use std::io; use std::path::Component; @@ -16,7 +18,10 @@ use std::path::Path; use std::path::PathBuf; use tracing::warn; -const MARKETPLACE_RELATIVE_PATH: &str = ".agents/plugins/marketplace.json"; +const MARKETPLACE_MANIFEST_RELATIVE_PATHS: &[&str] = &[ + ".agents/plugins/marketplace.json", + ".claude-plugin/marketplace.json", +]; #[derive(Debug, Clone, PartialEq, Eq)] pub struct ResolvedMarketplacePlugin { @@ -162,46 +167,50 @@ pub fn resolve_marketplace_plugin( ) -> Result { let marketplace = load_raw_marketplace_manifest(marketplace_path)?; let marketplace_name = marketplace.name; - let plugin = marketplace - .plugins - .into_iter() - .find(|plugin| plugin.name == plugin_name); - - let Some(plugin) = plugin else { - return Err(MarketplaceError::PluginNotFound { - plugin_name: plugin_name.to_string(), - marketplace_name, - }); - }; - - let RawMarketplaceManifestPlugin { - name, - source, - policy, - .. - } = plugin; - let install_policy = policy.installation; - let product_allowed = match policy.products.as_deref() { - None => true, - Some([]) => false, - Some(products) => { - restriction_product.is_some_and(|product| product.matches_product_restriction(products)) + let marketplace_name_for_not_found = marketplace_name.clone(); + for plugin in marketplace.plugins { + if plugin.name != plugin_name { + continue; } - }; - if install_policy == MarketplacePluginInstallPolicy::NotAvailable || !product_allowed { - return Err(MarketplaceError::PluginNotAvailable { - plugin_name: name, - marketplace_name, + + let RawMarketplaceManifestPlugin { + name, + source, + policy, + .. + } = plugin; + let install_policy = policy.installation; + let product_allowed = match policy.products.as_deref() { + None => true, + Some([]) => false, + Some(products) => restriction_product + .is_some_and(|product| product.matches_product_restriction(products)), + }; + if install_policy == MarketplacePluginInstallPolicy::NotAvailable || !product_allowed { + return Err(MarketplaceError::PluginNotAvailable { + plugin_name: name, + marketplace_name, + }); + } + + let Some(source_path) = + resolve_supported_plugin_source_path(marketplace_path, &name, source) + else { + continue; + }; + + return Ok(ResolvedMarketplacePlugin { + plugin_id: PluginId::new(name, marketplace_name).map_err(|err| match err { + PluginIdError::Invalid(message) => MarketplaceError::InvalidPlugin(message), + })?, + source_path, + auth_policy: policy.authentication, }); } - let plugin_id = PluginId::new(name, marketplace_name).map_err(|err| match err { - PluginIdError::Invalid(message) => MarketplaceError::InvalidPlugin(message), - })?; - Ok(ResolvedMarketplacePlugin { - plugin_id, - source_path: resolve_plugin_source_path(marketplace_path, source)?, - auth_policy: policy.authentication, + Err(MarketplaceError::PluginNotFound { + plugin_name: plugin_name.to_string(), + marketplace_name: marketplace_name_for_not_found, }) } @@ -212,17 +221,51 @@ pub fn list_marketplaces( } pub fn validate_marketplace_root(root: &Path) -> Result { - let path = AbsolutePathBuf::try_from(root.join(MARKETPLACE_RELATIVE_PATH)).map_err(|err| { - MarketplaceError::InvalidMarketplaceFile { - path: root.join(MARKETPLACE_RELATIVE_PATH), - message: format!("marketplace path must resolve to an absolute path: {err}"), - } - })?; + let Some(path) = find_marketplace_manifest_path(root) else { + return Err(MarketplaceError::InvalidMarketplaceFile { + path: root.to_path_buf(), + message: "marketplace root does not contain a supported manifest".to_string(), + }); + }; let marketplace = load_marketplace(&path)?; Ok(marketplace.name) } -pub(crate) fn load_marketplace(path: &AbsolutePathBuf) -> Result { +pub fn find_marketplace_manifest_path(root: &Path) -> Option { + MARKETPLACE_MANIFEST_RELATIVE_PATHS + .iter() + .find_map(|relative_path| { + let path = root.join(relative_path); + if !path.is_file() { + return None; + } + AbsolutePathBuf::try_from(path).ok() + }) +} + +fn invalid_marketplace_layout_error(path: &AbsolutePathBuf) -> MarketplaceError { + MarketplaceError::InvalidMarketplaceFile { + path: path.to_path_buf(), + message: "marketplace file is not in a supported location".to_string(), + } +} + +fn marketplace_root_from_layout(marketplace_path: &Path, relative_path: &str) -> Option { + let mut current = marketplace_path; + for component in Path::new(relative_path).components().rev() { + let expected = match component { + Component::Normal(expected) => expected, + _ => return None, + }; + if current.file_name() != Some(expected) { + return None; + } + current = current.parent()?; + } + Some(current.to_path_buf()) +} + +pub fn load_marketplace(path: &AbsolutePathBuf) -> Result { let marketplace = load_raw_marketplace_manifest(path)?; let mut plugins = Vec::new(); @@ -233,7 +276,9 @@ pub(crate) fn load_marketplace(path: &AbsolutePathBuf) -> Result Result, ) -> Result { @@ -298,30 +344,27 @@ fn discover_marketplace_paths_from_roots( ) -> Vec { let mut paths = Vec::new(); - if let Some(home) = home_dir { - let path = home.join(MARKETPLACE_RELATIVE_PATH); - if path.is_file() - && let Ok(path) = AbsolutePathBuf::try_from(path) - { - paths.push(path); - } + if let Some(home) = home_dir + && let Some(path) = find_marketplace_manifest_path(home) + { + paths.push(path); } for root in additional_roots { // Curated marketplaces can now come from an HTTP-downloaded directory that is not a git // checkout, so check the root directly before falling back to repo-root discovery. - let path = root.join(MARKETPLACE_RELATIVE_PATH); - if path.as_path().is_file() && !paths.contains(&path) { + if let Some(path) = find_marketplace_manifest_path(root.as_path()) + && !paths.contains(&path) + { paths.push(path); continue; } if let Some(repo_root) = get_git_repo_root(root.as_path()) && let Ok(repo_root) = AbsolutePathBuf::try_from(repo_root) + && let Some(path) = find_marketplace_manifest_path(repo_root.as_path()) + && !paths.contains(&path) { - let path = repo_root.join(MARKETPLACE_RELATIVE_PATH); - if path.as_path().is_file() && !paths.contains(&path) { - paths.push(path); - } + paths.push(path); } } @@ -346,80 +389,83 @@ fn load_raw_marketplace_manifest( }) } -fn resolve_plugin_source_path( +fn resolve_supported_plugin_source_path( marketplace_path: &AbsolutePathBuf, + plugin_name: &str, source: RawMarketplaceManifestPluginSource, -) -> Result { +) -> Option { match source { RawMarketplaceManifestPluginSource::Local { path } => { - let Some(path) = path.strip_prefix("./") else { - return Err(MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: "local plugin source path must start with `./`".to_string(), - }); - }; - if path.is_empty() { - return Err(MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: "local plugin source path must not be empty".to_string(), - }); + match resolve_local_plugin_source_path(marketplace_path, &path) { + Ok(path) => Some(path), + Err(err) => { + warn!( + path = %marketplace_path.display(), + plugin = plugin_name, + error = %err, + "skipping marketplace plugin that failed to resolve" + ); + None + } } - - let relative_source_path = Path::new(path); - if relative_source_path - .components() - .any(|component| !matches!(component, Component::Normal(_))) - { - return Err(MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: "local plugin source path must stay within the marketplace root" - .to_string(), - }); - } - - // `marketplace.json` lives under `/.agents/plugins/`, but local plugin paths - // are resolved relative to ``, not relative to the `plugins/` directory. - Ok(marketplace_root_dir(marketplace_path)?.join(relative_source_path)) + } + RawMarketplaceManifestPluginSource::Unsupported => { + warn!( + path = %marketplace_path.display(), + plugin = plugin_name, + "skipping marketplace plugin with unsupported source" + ); + None } } } +fn resolve_local_plugin_source_path( + marketplace_path: &AbsolutePathBuf, + source_path: &str, +) -> Result { + let Some(source_path) = source_path.strip_prefix("./") else { + return Err(MarketplaceError::InvalidMarketplaceFile { + path: marketplace_path.to_path_buf(), + message: "local plugin source path must start with `./`".to_string(), + }); + }; + if source_path.is_empty() { + return Err(MarketplaceError::InvalidMarketplaceFile { + path: marketplace_path.to_path_buf(), + message: "local plugin source path must not be empty".to_string(), + }); + } + + let relative_source_path = Path::new(source_path); + if relative_source_path + .components() + .any(|component| !matches!(component, Component::Normal(_))) + { + return Err(MarketplaceError::InvalidMarketplaceFile { + path: marketplace_path.to_path_buf(), + message: "local plugin source path must stay within the marketplace root".to_string(), + }); + } + + // `marketplace.json` lives under a supported marketplace layout beneath ``, + // but local plugin paths are resolved relative to ``. + Ok(marketplace_root_dir(marketplace_path)?.join(relative_source_path)) +} + fn marketplace_root_dir( marketplace_path: &AbsolutePathBuf, ) -> Result { - let Some(plugins_dir) = marketplace_path.parent() else { - return Err(MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: "marketplace file must live under `/.agents/plugins/`".to_string(), - }); - }; - let Some(dot_agents_dir) = plugins_dir.parent() else { - return Err(MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: "marketplace file must live under `/.agents/plugins/`".to_string(), - }); - }; - let Some(marketplace_root) = dot_agents_dir.parent() else { - return Err(MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: "marketplace file must live under `/.agents/plugins/`".to_string(), - }); - }; - - if plugins_dir.as_path().file_name().and_then(|s| s.to_str()) != Some("plugins") - || dot_agents_dir - .as_path() - .file_name() - .and_then(|s| s.to_str()) - != Some(".agents") - { - return Err(MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: "marketplace file must live under `/.agents/plugins/`".to_string(), - }); + for relative_path in MARKETPLACE_MANIFEST_RELATIVE_PATHS { + if let Some(marketplace_root) = + marketplace_root_from_layout(marketplace_path.as_path(), relative_path) + { + return AbsolutePathBuf::try_from(marketplace_root) + .map_err(|_| invalid_marketplace_layout_error(marketplace_path)); + } } - Ok(marketplace_root) + Err(invalid_marketplace_layout_error(marketplace_path)) } #[derive(Debug, Deserialize)] @@ -459,10 +505,33 @@ struct RawMarketplaceManifestPluginPolicy { products: Option>, } -#[derive(Debug, Deserialize)] -#[serde(tag = "source", rename_all = "lowercase")] +#[derive(Debug)] enum RawMarketplaceManifestPluginSource { Local { path: String }, + // Mixed-source marketplaces should still contribute the local plugins we can load. + Unsupported, +} + +impl<'de> Deserialize<'de> for RawMarketplaceManifestPluginSource { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let source = JsonValue::deserialize(deserializer)?; + Ok(match source { + JsonValue::String(path) => Self::Local { path }, + JsonValue::Object(object) => match object.get("source").and_then(JsonValue::as_str) { + Some("local") => match object.get("path").and_then(JsonValue::as_str) { + Some(path) => Self::Local { + path: path.to_string(), + }, + None => Self::Unsupported, + }, + _ => Self::Unsupported, + }, + _ => Self::Unsupported, + }) + } } fn resolve_marketplace_interface( diff --git a/codex-rs/core/src/plugins/marketplace_tests.rs b/codex-rs/core-plugins/src/marketplace_tests.rs similarity index 79% rename from codex-rs/core/src/plugins/marketplace_tests.rs rename to codex-rs/core-plugins/src/marketplace_tests.rs index 226ae3ff6e..3412eff72c 100644 --- a/codex-rs/core/src/plugins/marketplace_tests.rs +++ b/codex-rs/core-plugins/src/marketplace_tests.rs @@ -1,8 +1,18 @@ use super::*; use codex_protocol::protocol::Product; use pretty_assertions::assert_eq; +use std::path::Path; use tempfile::tempdir; +const ALTERNATE_MARKETPLACE_RELATIVE_PATH: &str = ".claude-plugin/marketplace.json"; + +fn write_alternate_marketplace(repo_root: &Path, contents: &str) -> AbsolutePathBuf { + let marketplace_path = repo_root.join(ALTERNATE_MARKETPLACE_RELATIVE_PATH); + fs::create_dir_all(marketplace_path.parent().unwrap()).unwrap(); + fs::write(&marketplace_path, contents).unwrap(); + AbsolutePathBuf::try_from(marketplace_path).unwrap() +} + #[test] fn resolve_marketplace_plugin_finds_repo_marketplace_plugin() { let tmp = tempdir().unwrap(); @@ -45,6 +55,46 @@ fn resolve_marketplace_plugin_finds_repo_marketplace_plugin() { ); } +#[test] +fn resolve_marketplace_plugin_supports_alternate_layout_and_string_local_source() { + let tmp = tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + let marketplace_path = write_alternate_marketplace( + &repo_root, + r#"{ + "name": "alternate-marketplace", + "plugins": [ + { + "name": "string-source-plugin", + "source": "./plugins/string-source-plugin" + } + ] +}"#, + ); + + let resolved = resolve_marketplace_plugin( + &marketplace_path, + "string-source-plugin", + Some(Product::Codex), + ) + .unwrap(); + + assert_eq!( + resolved, + ResolvedMarketplacePlugin { + plugin_id: PluginId::new( + "string-source-plugin".to_string(), + "alternate-marketplace".to_string() + ) + .unwrap(), + source_path: AbsolutePathBuf::try_from(repo_root.join("plugins/string-source-plugin")) + .unwrap(), + auth_policy: MarketplacePluginAuthPolicy::OnInstall, + } + ); +} + #[test] fn resolve_marketplace_plugin_reports_missing_plugin() { let tmp = tempdir().unwrap(); @@ -70,6 +120,106 @@ fn resolve_marketplace_plugin_reports_missing_plugin() { ); } +#[test] +fn list_marketplaces_supports_alternate_manifest_layout() { + let tmp = tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + + fs::create_dir_all(repo_root.join(".git")).unwrap(); + let marketplace_path = write_alternate_marketplace( + &repo_root, + r#"{ + "name": "alternate-marketplace", + "plugins": [ + { + "name": "string-source-plugin", + "source": "./plugins/string-source-plugin" + } + ] +}"#, + ); + + let marketplaces = list_marketplaces_with_home( + &[AbsolutePathBuf::try_from(repo_root.clone()).unwrap()], + /*home_dir*/ None, + ) + .unwrap() + .marketplaces; + + assert_eq!( + marketplaces, + vec![Marketplace { + name: "alternate-marketplace".to_string(), + path: marketplace_path, + interface: None, + plugins: vec![MarketplacePlugin { + name: "string-source-plugin".to_string(), + source: MarketplacePluginSource::Local { + path: AbsolutePathBuf::try_from(repo_root.join("plugins/string-source-plugin")) + .unwrap(), + }, + policy: MarketplacePluginPolicy { + installation: MarketplacePluginInstallPolicy::Available, + authentication: MarketplacePluginAuthPolicy::OnInstall, + products: None, + }, + interface: None, + }], + }] + ); +} + +#[test] +fn list_marketplaces_prefers_first_supported_manifest_layout() { + let tmp = tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + + fs::create_dir_all(repo_root.join(".git")).unwrap(); + fs::create_dir_all(repo_root.join(".agents/plugins")).unwrap(); + fs::write( + repo_root.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "agents-marketplace", + "plugins": [ + { + "name": "agents-plugin", + "source": { + "source": "local", + "path": "./plugins/agents-plugin" + } + } + ] +}"#, + ) + .unwrap(); + write_alternate_marketplace( + &repo_root, + r#"{ + "name": "alternate-marketplace", + "plugins": [ + { + "name": "string-source-plugin", + "source": "./plugins/string-source-plugin" + } + ] +}"#, + ); + + let marketplaces = list_marketplaces_with_home( + &[AbsolutePathBuf::try_from(repo_root.clone()).unwrap()], + /*home_dir*/ None, + ) + .unwrap() + .marketplaces; + + assert_eq!(marketplaces.len(), 1); + assert_eq!(marketplaces[0].name, "agents-marketplace"); + assert_eq!( + marketplaces[0].path, + AbsolutePathBuf::try_from(repo_root.join(".agents/plugins/marketplace.json")).unwrap() + ); +} + #[test] fn list_marketplaces_returns_home_and_repo_marketplaces() { let tmp = tempdir().unwrap(); @@ -413,7 +563,7 @@ fn list_marketplaces_reads_marketplace_display_name() { } #[test] -fn list_marketplaces_skips_marketplaces_that_fail_to_load() { +fn list_marketplaces_skips_invalid_plugins_but_keeps_marketplace() { let tmp = tempdir().unwrap(); let valid_repo_root = tmp.path().join("valid-repo"); let invalid_repo_root = tmp.path().join("invalid-repo"); @@ -465,8 +615,10 @@ fn list_marketplaces_skips_marketplaces_that_fail_to_load() { .unwrap() .marketplaces; - assert_eq!(marketplaces.len(), 1); + assert_eq!(marketplaces.len(), 2); assert_eq!(marketplaces[0].name, "valid-marketplace"); + assert_eq!(marketplaces[1].name, "invalid-marketplace"); + assert!(marketplaces[1].plugins.is_empty()); } #[test] @@ -522,6 +674,60 @@ fn list_marketplaces_reports_marketplace_load_errors() { ); } +#[test] +fn list_marketplaces_skips_unsupported_plugin_sources_but_keeps_local_plugins() { + let tmp = tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + + fs::create_dir_all(repo_root.join(".git")).unwrap(); + write_alternate_marketplace( + &repo_root, + r#"{ + "name": "mixed-source-marketplace", + "plugins": [ + { + "name": "local-plugin", + "source": "./plugins/local-plugin" + }, + { + "name": "url-plugin", + "source": { + "source": "url", + "url": "https://github.com/example/plugin.git" + } + }, + { + "name": "git-subdir-plugin", + "source": { + "source": "git-subdir", + "url": "owner/repo", + "path": "plugins/example", + "ref": "main" + } + } + ] +}"#, + ); + + let marketplaces = list_marketplaces_with_home( + &[AbsolutePathBuf::try_from(repo_root.clone()).unwrap()], + /*home_dir*/ None, + ) + .unwrap() + .marketplaces; + + assert_eq!(marketplaces.len(), 1); + assert_eq!(marketplaces[0].name, "mixed-source-marketplace"); + assert_eq!(marketplaces[0].plugins.len(), 1); + assert_eq!(marketplaces[0].plugins[0].name, "local-plugin"); + assert_eq!( + marketplaces[0].plugins[0].source, + MarketplacePluginSource::Local { + path: AbsolutePathBuf::try_from(repo_root.join("plugins/local-plugin")).unwrap(), + } + ); +} + #[test] fn list_marketplaces_resolves_plugin_interface_paths_to_absolute() { let tmp = tempdir().unwrap(); @@ -734,7 +940,7 @@ fn list_marketplaces_ignores_plugin_interface_assets_without_dot_slash() { } #[test] -fn resolve_marketplace_plugin_rejects_non_relative_local_paths() { +fn resolve_marketplace_plugin_skips_invalid_local_paths() { let tmp = tempdir().unwrap(); let repo_root = tmp.path().join("repo"); fs::create_dir_all(repo_root.join(".git")).unwrap(); @@ -756,17 +962,46 @@ fn resolve_marketplace_plugin_rejects_non_relative_local_paths() { ) .unwrap(); - let marketplace_path = - AbsolutePathBuf::try_from(repo_root.join(".agents/plugins/marketplace.json")).unwrap(); - let err = resolve_marketplace_plugin(&marketplace_path, "local-plugin", Some(Product::Codex)) + let err = resolve_marketplace_plugin( + &AbsolutePathBuf::try_from(repo_root.join(".agents/plugins/marketplace.json")).unwrap(), + "local-plugin", + Some(Product::Codex), + ) + .unwrap_err(); + + assert_eq!( + err.to_string(), + "plugin `local-plugin` was not found in marketplace `codex-curated`" + ); +} + +#[test] +fn resolve_marketplace_plugin_skips_unsupported_sources() { + let tmp = tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + let marketplace_path = write_alternate_marketplace( + &repo_root, + r#"{ + "name": "alternate-marketplace", + "plugins": [ + { + "name": "remote-plugin", + "source": { + "source": "url", + "url": "https://github.com/example/plugin.git" + } + } + ] +}"#, + ); + + let err = resolve_marketplace_plugin(&marketplace_path, "remote-plugin", Some(Product::Codex)) .unwrap_err(); assert_eq!( err.to_string(), - format!( - "invalid marketplace file `{}`: local plugin source path must start with `./`", - marketplace_path.display() - ) + "plugin `remote-plugin` was not found in marketplace `alternate-marketplace`" ); } diff --git a/codex-rs/core-plugins/src/marketplace_upgrade.rs b/codex-rs/core-plugins/src/marketplace_upgrade.rs new file mode 100644 index 0000000000..81474cf66f --- /dev/null +++ b/codex-rs/core-plugins/src/marketplace_upgrade.rs @@ -0,0 +1,298 @@ +mod activation; +mod git; + +use self::activation::activate_marketplace_root; +use self::activation::installed_marketplace_metadata_matches; +use self::activation::write_installed_marketplace_metadata; +use self::git::clone_git_source; +use self::git::git_remote_revision; +use crate::marketplace::validate_marketplace_root; +use codex_config::CONFIG_TOML_FILE; +use codex_config::ConfigLayerStack; +use codex_config::MarketplaceConfigUpdate; +use codex_config::record_user_marketplace; +use codex_config::types::MarketplaceConfig; +use codex_config::types::MarketplaceSourceType; +use codex_plugin::validate_plugin_segment; +use codex_utils_absolute_path::AbsolutePathBuf; +use std::collections::HashMap; +use std::path::Path; +use std::path::PathBuf; +use std::time::Duration; +use tracing::warn; + +const INSTALLED_MARKETPLACES_DIR: &str = ".tmp/marketplaces"; +const MARKETPLACE_UPGRADE_GIT_TIMEOUT: Duration = Duration::from_secs(30); + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ConfiguredMarketplaceUpgradeError { + pub marketplace_name: String, + pub message: String, +} + +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct ConfiguredMarketplaceUpgradeOutcome { + pub selected_marketplaces: Vec, + pub upgraded_roots: Vec, + pub errors: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct ConfiguredGitMarketplace { + name: String, + source: String, + ref_name: Option, + sparse_paths: Vec, + last_revision: Option, +} + +impl ConfiguredMarketplaceUpgradeOutcome { + pub fn all_succeeded(&self) -> bool { + self.errors.is_empty() + } +} + +pub fn configured_git_marketplace_names(config_layer_stack: &ConfigLayerStack) -> Vec { + let mut names = configured_git_marketplaces(config_layer_stack) + .into_iter() + .map(|marketplace| marketplace.name) + .collect::>(); + names.sort_unstable(); + names +} + +pub fn upgrade_configured_git_marketplaces( + codex_home: &Path, + config_layer_stack: &ConfigLayerStack, + marketplace_name: Option<&str>, +) -> ConfiguredMarketplaceUpgradeOutcome { + let marketplaces = configured_git_marketplaces(config_layer_stack) + .into_iter() + .filter(|marketplace| marketplace_name.is_none_or(|name| marketplace.name.as_str() == name)) + .collect::>(); + if marketplaces.is_empty() { + return ConfiguredMarketplaceUpgradeOutcome::default(); + } + + let install_root = marketplace_install_root(codex_home); + let selected_marketplaces = marketplaces + .iter() + .map(|marketplace| marketplace.name.clone()) + .collect(); + let mut upgraded_roots = Vec::new(); + let mut errors = Vec::new(); + for marketplace in marketplaces { + match upgrade_configured_git_marketplace(codex_home, &install_root, &marketplace) { + Ok(Some(upgraded_root)) => upgraded_roots.push(upgraded_root), + Ok(None) => {} + Err(err) => { + errors.push(ConfiguredMarketplaceUpgradeError { + marketplace_name: marketplace.name, + message: err, + }); + } + } + } + + ConfiguredMarketplaceUpgradeOutcome { + selected_marketplaces, + upgraded_roots, + errors, + } +} + +fn marketplace_install_root(codex_home: &Path) -> PathBuf { + codex_home.join(INSTALLED_MARKETPLACES_DIR) +} + +fn configured_git_marketplaces( + config_layer_stack: &ConfigLayerStack, +) -> Vec { + let Some(user_layer) = config_layer_stack.get_user_layer() else { + return Vec::new(); + }; + let Some(marketplaces_value) = user_layer.config.get("marketplaces") else { + return Vec::new(); + }; + let marketplaces = match marketplaces_value + .clone() + .try_into::>() + { + Ok(marketplaces) => marketplaces, + Err(err) => { + warn!("invalid marketplaces config while preparing auto-upgrade: {err}"); + return Vec::new(); + } + }; + + let mut configured = marketplaces + .into_iter() + .filter_map(|(name, marketplace)| configured_git_marketplace_from_config(name, marketplace)) + .collect::>(); + configured.sort_unstable_by(|left, right| left.name.cmp(&right.name)); + configured +} + +fn configured_git_marketplace_from_config( + name: String, + marketplace: MarketplaceConfig, +) -> Option { + let MarketplaceConfig { + last_updated: _, + last_revision, + source_type, + source, + ref_name, + sparse_paths, + } = marketplace; + if source_type != Some(MarketplaceSourceType::Git) { + return None; + } + let Some(source) = source else { + warn!( + marketplace = name, + "ignoring configured Git marketplace without source" + ); + return None; + }; + Some(ConfiguredGitMarketplace { + name, + source, + ref_name, + sparse_paths: sparse_paths.unwrap_or_default(), + last_revision, + }) +} + +fn upgrade_configured_git_marketplace( + codex_home: &Path, + install_root: &Path, + marketplace: &ConfiguredGitMarketplace, +) -> Result, String> { + validate_plugin_segment(&marketplace.name, "marketplace name")?; + let remote_revision = git_remote_revision( + &marketplace.source, + marketplace.ref_name.as_deref(), + MARKETPLACE_UPGRADE_GIT_TIMEOUT, + )?; + let destination = install_root.join(&marketplace.name); + if destination + .join(".agents/plugins/marketplace.json") + .is_file() + && marketplace.last_revision.as_deref() == Some(remote_revision.as_str()) + && installed_marketplace_metadata_matches(&destination, marketplace, &remote_revision) + { + return Ok(None); + } + + let staging_parent = install_root.join(".staging"); + std::fs::create_dir_all(&staging_parent).map_err(|err| { + format!( + "failed to create marketplace upgrade staging directory {}: {err}", + staging_parent.display() + ) + })?; + let staged_dir = tempfile::Builder::new() + .prefix("marketplace-upgrade-") + .tempdir_in(&staging_parent) + .map_err(|err| { + format!( + "failed to create temporary marketplace upgrade directory in {}: {err}", + staging_parent.display() + ) + })?; + + let activated_revision = clone_git_source( + &marketplace.source, + marketplace.ref_name.as_deref(), + &marketplace.sparse_paths, + staged_dir.path(), + MARKETPLACE_UPGRADE_GIT_TIMEOUT, + )?; + let marketplace_name = validate_marketplace_root(staged_dir.path()) + .map_err(|err| format!("failed to validate upgraded marketplace root: {err}"))?; + if marketplace_name != marketplace.name { + return Err(format!( + "upgraded marketplace name `{marketplace_name}` does not match configured marketplace `{}`", + marketplace.name + )); + } + write_installed_marketplace_metadata(staged_dir.path(), marketplace, &activated_revision)?; + + let last_updated = chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true); + let update = MarketplaceConfigUpdate { + last_updated: &last_updated, + last_revision: Some(&activated_revision), + source_type: "git", + source: &marketplace.source, + ref_name: marketplace.ref_name.as_deref(), + sparse_paths: &marketplace.sparse_paths, + }; + activate_marketplace_root(&destination, staged_dir, || { + ensure_configured_git_marketplace_unchanged(codex_home, marketplace)?; + record_user_marketplace(codex_home, &marketplace.name, &update).map_err(|err| { + format!( + "failed to record upgraded marketplace `{}` in user config.toml: {err}", + marketplace.name + ) + }) + })?; + + AbsolutePathBuf::try_from(destination) + .map(Some) + .map_err(|err| format!("upgraded marketplace path is not absolute: {err}")) +} +fn ensure_configured_git_marketplace_unchanged( + codex_home: &Path, + expected: &ConfiguredGitMarketplace, +) -> Result<(), String> { + let current = read_configured_git_marketplace(codex_home, &expected.name)?; + match current { + Some(current) if current == *expected => Ok(()), + Some(_) => Err(format!( + "configured marketplace `{}` changed while auto-upgrade was in flight", + expected.name + )), + None => Err(format!( + "configured marketplace `{}` was removed or is no longer a Git marketplace", + expected.name + )), + } +} + +fn read_configured_git_marketplace( + codex_home: &Path, + marketplace_name: &str, +) -> Result, String> { + let config_path = codex_home.join(CONFIG_TOML_FILE); + let raw_config = match std::fs::read_to_string(&config_path) { + Ok(raw_config) => raw_config, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None), + Err(err) => { + return Err(format!( + "failed to read user config {} while checking marketplace auto-upgrade: {err}", + config_path.display() + )); + } + }; + let config: toml::Value = toml::from_str(&raw_config).map_err(|err| { + format!( + "failed to parse user config {} while checking marketplace auto-upgrade: {err}", + config_path.display() + ) + })?; + let Some(marketplaces_value) = config.get("marketplaces") else { + return Ok(None); + }; + let mut marketplaces = marketplaces_value + .clone() + .try_into::>() + .map_err(|err| format!("invalid marketplaces config while checking auto-upgrade: {err}"))?; + let Some(marketplace) = marketplaces.remove(marketplace_name) else { + return Ok(None); + }; + Ok(configured_git_marketplace_from_config( + marketplace_name.to_string(), + marketplace, + )) +} diff --git a/codex-rs/core-plugins/src/marketplace_upgrade/activation.rs b/codex-rs/core-plugins/src/marketplace_upgrade/activation.rs new file mode 100644 index 0000000000..366b35fb43 --- /dev/null +++ b/codex-rs/core-plugins/src/marketplace_upgrade/activation.rs @@ -0,0 +1,167 @@ +use super::ConfiguredGitMarketplace; +use codex_config::types::MarketplaceSourceType; +use serde::Deserialize; +use serde::Serialize; +use std::path::Path; +use std::path::PathBuf; +use tempfile::TempDir; +use tracing::warn; + +const MARKETPLACE_INSTALL_METADATA_FILE: &str = ".codex-marketplace-install.json"; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +struct InstalledMarketplaceMetadata { + source_type: MarketplaceSourceType, + source: String, + ref_name: Option, + sparse_paths: Vec, + revision: String, +} + +pub(super) fn installed_marketplace_metadata_matches( + root: &Path, + marketplace: &ConfiguredGitMarketplace, + revision: &str, +) -> bool { + let metadata = match std::fs::read_to_string(installed_marketplace_metadata_path(root)) { + Ok(metadata) => metadata, + Err(_) => return false, + }; + let metadata = match serde_json::from_str::(&metadata) { + Ok(metadata) => metadata, + Err(err) => { + warn!( + marketplace = marketplace.name, + error = %err, + "failed to parse activated marketplace metadata" + ); + return false; + } + }; + metadata == installed_marketplace_metadata(marketplace, revision) +} + +pub(super) fn write_installed_marketplace_metadata( + root: &Path, + marketplace: &ConfiguredGitMarketplace, + revision: &str, +) -> Result<(), String> { + let metadata = installed_marketplace_metadata(marketplace, revision); + let contents = serde_json::to_string_pretty(&metadata) + .map_err(|err| format!("failed to serialize activated marketplace metadata: {err}"))?; + std::fs::write(installed_marketplace_metadata_path(root), contents) + .map_err(|err| format!("failed to write activated marketplace metadata: {err}")) +} + +pub(super) fn activate_marketplace_root( + destination: &Path, + staged_dir: TempDir, + after_activate: impl FnOnce() -> Result<(), String>, +) -> Result<(), String> { + let staged_root = staged_dir.path(); + let Some(parent) = destination.parent() else { + return Err(format!( + "failed to determine marketplace install parent for {}", + destination.display() + )); + }; + std::fs::create_dir_all(parent).map_err(|err| { + format!( + "failed to create marketplace install parent {}: {err}", + parent.display() + ) + })?; + + if destination.exists() { + let backup_dir = tempfile::Builder::new() + .prefix("marketplace-backup-") + .tempdir_in(parent) + .map_err(|err| { + format!( + "failed to create marketplace backup directory in {}: {err}", + parent.display() + ) + })?; + let backup_root = backup_dir.path().join("root"); + std::fs::rename(destination, &backup_root).map_err(|err| { + format!( + "failed to move previous marketplace root out of the way at {}: {err}", + destination.display() + ) + })?; + + if let Err(err) = std::fs::rename(staged_root, destination) { + let rollback_result = std::fs::rename(&backup_root, destination); + return match rollback_result { + Ok(()) => Err(format!( + "failed to activate upgraded marketplace at {}: {err}", + destination.display() + )), + Err(rollback_err) => { + let backup_path = backup_dir.keep().join("root"); + Err(format!( + "failed to activate upgraded marketplace at {}: {err}; failed to restore previous marketplace root (left at {}): {rollback_err}", + destination.display(), + backup_path.display() + )) + } + }; + } + + if let Err(err) = after_activate() { + let remove_result = std::fs::remove_dir_all(destination); + let rollback_result = + remove_result.and_then(|()| std::fs::rename(&backup_root, destination)); + return match rollback_result { + Ok(()) => Err(err), + Err(rollback_err) => { + let backup_path = backup_dir.keep().join("root"); + Err(format!( + "{err}; failed to restore previous marketplace root at {} (left at {}): {rollback_err}", + destination.display(), + backup_path.display() + )) + } + }; + } + + return Ok(()); + } + + std::fs::rename(staged_root, destination).map_err(|err| { + format!( + "failed to activate upgraded marketplace at {}: {err}", + destination.display() + ) + })?; + if let Err(err) = after_activate() { + let remove_result = std::fs::remove_dir_all(destination); + return match remove_result { + Ok(()) => Err(err), + Err(remove_err) => Err(format!( + "{err}; failed to remove newly activated marketplace root at {}: {remove_err}", + destination.display() + )), + }; + } + + Ok(()) +} + +fn installed_marketplace_metadata( + marketplace: &ConfiguredGitMarketplace, + revision: &str, +) -> InstalledMarketplaceMetadata { + InstalledMarketplaceMetadata { + source_type: MarketplaceSourceType::Git, + source: marketplace.source.clone(), + ref_name: marketplace.ref_name.clone(), + sparse_paths: marketplace.sparse_paths.clone(), + revision: revision.to_string(), + } +} + +fn installed_marketplace_metadata_path(root: &Path) -> PathBuf { + root.join(MARKETPLACE_INSTALL_METADATA_FILE) +} diff --git a/codex-rs/core-plugins/src/marketplace_upgrade/git.rs b/codex-rs/core-plugins/src/marketplace_upgrade/git.rs new file mode 100644 index 0000000000..80a7c68f68 --- /dev/null +++ b/codex-rs/core-plugins/src/marketplace_upgrade/git.rs @@ -0,0 +1,238 @@ +use std::path::Path; +use std::process::Command; +use std::process::Output; +use std::process::Stdio; +use std::time::Duration; + +pub(super) fn git_remote_revision( + source: &str, + ref_name: Option<&str>, + timeout: Duration, +) -> Result { + if let Some(ref_name) = ref_name + && is_full_git_sha(ref_name) + { + return Ok(ref_name.to_string()); + } + + let ref_name = ref_name.unwrap_or("HEAD"); + let output = run_git_command_with_timeout( + git_command().arg("ls-remote").arg(source).arg(ref_name), + "git ls-remote marketplace source", + timeout, + )?; + ensure_git_success(&output, "git ls-remote marketplace source")?; + + let stdout = String::from_utf8_lossy(&output.stdout); + let Some(first_line) = stdout.lines().next() else { + return Err("git ls-remote returned empty output for marketplace source".to_string()); + }; + let Some((revision, _)) = first_line.split_once('\t') else { + return Err(format!( + "unexpected git ls-remote output for marketplace source: {first_line}" + )); + }; + let revision = revision.trim(); + if revision.is_empty() { + return Err("git ls-remote returned empty revision for marketplace source".to_string()); + } + Ok(revision.to_string()) +} + +pub(super) fn clone_git_source( + source: &str, + ref_name: Option<&str>, + sparse_paths: &[String], + destination: &Path, + timeout: Duration, +) -> Result { + if sparse_paths.is_empty() { + let output = run_git_command_with_timeout( + git_command().arg("clone").arg(source).arg(destination), + "git clone marketplace source", + timeout, + )?; + ensure_git_success(&output, "git clone marketplace source")?; + if let Some(ref_name) = ref_name { + let output = run_git_command_with_timeout( + git_command() + .arg("-C") + .arg(destination) + .arg("checkout") + .arg(ref_name), + "git checkout marketplace ref", + timeout, + )?; + ensure_git_success(&output, "git checkout marketplace ref")?; + } + return git_worktree_revision(destination, timeout); + } + + let output = run_git_command_with_timeout( + git_command() + .arg("clone") + .arg("--filter=blob:none") + .arg("--no-checkout") + .arg(source) + .arg(destination), + "git clone marketplace source", + timeout, + )?; + ensure_git_success(&output, "git clone marketplace source")?; + + let mut sparse_checkout = git_command(); + sparse_checkout + .arg("-C") + .arg(destination) + .arg("sparse-checkout") + .arg("set") + .args(sparse_paths); + let output = run_git_command_with_timeout( + &mut sparse_checkout, + "git sparse-checkout marketplace source", + timeout, + )?; + ensure_git_success(&output, "git sparse-checkout marketplace source")?; + + let output = run_git_command_with_timeout( + git_command() + .arg("-C") + .arg(destination) + .arg("checkout") + .arg(ref_name.unwrap_or("HEAD")), + "git checkout marketplace ref", + timeout, + )?; + ensure_git_success(&output, "git checkout marketplace ref")?; + git_worktree_revision(destination, timeout) +} + +fn git_worktree_revision(destination: &Path, timeout: Duration) -> Result { + let output = run_git_command_with_timeout( + git_command() + .arg("-C") + .arg(destination) + .arg("rev-parse") + .arg("HEAD"), + "git rev-parse marketplace revision", + timeout, + )?; + ensure_git_success(&output, "git rev-parse marketplace revision")?; + + let revision = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if revision.is_empty() { + Err("git rev-parse returned empty revision for marketplace source".to_string()) + } else { + Ok(revision) + } +} + +fn is_full_git_sha(value: &str) -> bool { + value.len() == 40 && value.chars().all(|ch| ch.is_ascii_hexdigit()) +} + +fn git_command() -> Command { + let mut command = Command::new("git"); + command + .env("GIT_OPTIONAL_LOCKS", "0") + .env("GIT_TERMINAL_PROMPT", "0"); + command +} + +fn run_git_command_with_timeout( + command: &mut Command, + context: &str, + timeout: Duration, +) -> Result { + let mut child = command + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .map_err(|err| format!("failed to run {context}: {err}"))?; + let start = std::time::Instant::now(); + loop { + match child.try_wait() { + Ok(Some(_)) => { + return child + .wait_with_output() + .map_err(|err| format!("failed to wait for {context}: {err}")); + } + Ok(None) => {} + Err(err) => return Err(format!("failed to poll {context}: {err}")), + } + + if start.elapsed() >= timeout { + let _ = child.kill(); + let output = child + .wait_with_output() + .map_err(|err| format!("failed to wait for {context} after timeout: {err}"))?; + let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string(); + return if stderr.is_empty() { + Err(format!("{context} timed out after {}s", timeout.as_secs())) + } else { + Err(format!( + "{context} timed out after {}s: {stderr}", + timeout.as_secs() + )) + }; + } + + std::thread::sleep(Duration::from_millis(100)); + } +} + +fn ensure_git_success(output: &Output, context: &str) -> Result<(), String> { + if output.status.success() { + return Ok(()); + } + let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string(); + if stderr.is_empty() { + Err(format!("{context} failed with status {}", output.status)) + } else { + Err(format!( + "{context} failed with status {}: {stderr}", + output.status + )) + } +} + +#[cfg(test)] +mod tests { + use super::git_command; + use super::is_full_git_sha; + use std::ffi::OsStr; + + #[test] + fn full_git_sha_ref_is_already_a_remote_revision() { + assert!(is_full_git_sha("0123456789abcdef0123456789abcdef01234567")); + assert!(!is_full_git_sha("main")); + assert!(!is_full_git_sha("0123456")); + } + + #[test] + fn git_command_uses_path_lookup_with_stable_noninteractive_env() { + let command = git_command(); + + assert_eq!(command.get_program(), OsStr::new("git")); + assert_eq!( + command_env(&command, "GIT_OPTIONAL_LOCKS"), + Some(Some(OsStr::new("0"))) + ); + assert_eq!( + command_env(&command, "GIT_TERMINAL_PROMPT"), + Some(Some(OsStr::new("0"))) + ); + assert_eq!(command_env(&command, "PATH"), None); + } + + fn command_env<'a>( + command: &'a std::process::Command, + name: &str, + ) -> Option> { + command + .get_envs() + .find(|(key, _)| key == &OsStr::new(name)) + .map(|(_, value)| value) + } +} diff --git a/codex-rs/core/src/plugins/remote.rs b/codex-rs/core-plugins/src/remote.rs similarity index 94% rename from codex-rs/core/src/plugins/remote.rs rename to codex-rs/core-plugins/src/remote.rs index f26bbe8880..34ece0880a 100644 --- a/codex-rs/core/src/plugins/remote.rs +++ b/codex-rs/core-plugins/src/remote.rs @@ -1,4 +1,3 @@ -use crate::config::Config; use codex_login::CodexAuth; use codex_login::default_client::build_reqwest_client; use codex_protocol::protocol::Product; @@ -11,12 +10,17 @@ const REMOTE_PLUGIN_FETCH_TIMEOUT: Duration = Duration::from_secs(30); const REMOTE_FEATURED_PLUGIN_FETCH_TIMEOUT: Duration = Duration::from_secs(10); const REMOTE_PLUGIN_MUTATION_TIMEOUT: Duration = Duration::from_secs(30); +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RemotePluginServiceConfig { + pub chatgpt_base_url: String, +} + #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -pub(crate) struct RemotePluginStatusSummary { - pub(crate) name: String, +pub struct RemotePluginStatusSummary { + pub name: String, #[serde(default = "default_remote_marketplace_name")] - pub(crate) marketplace_name: String, - pub(crate) enabled: bool, + pub marketplace_name: String, + pub enabled: bool, } #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] @@ -116,8 +120,8 @@ pub enum RemotePluginFetchError { }, } -pub(crate) async fn fetch_remote_plugin_status( - config: &Config, +pub async fn fetch_remote_plugin_status( + config: &RemotePluginServiceConfig, auth: Option<&CodexAuth>, ) -> Result, RemotePluginFetchError> { let Some(auth) = auth else { @@ -161,7 +165,7 @@ pub(crate) async fn fetch_remote_plugin_status( } pub async fn fetch_remote_featured_plugin_ids( - config: &Config, + config: &RemotePluginServiceConfig, auth: Option<&CodexAuth>, product: Option, ) -> Result, RemotePluginFetchError> { @@ -205,8 +209,8 @@ pub async fn fetch_remote_featured_plugin_ids( }) } -pub(crate) async fn enable_remote_plugin( - config: &Config, +pub async fn enable_remote_plugin( + config: &RemotePluginServiceConfig, auth: Option<&CodexAuth>, plugin_id: &str, ) -> Result<(), RemotePluginMutationError> { @@ -214,8 +218,8 @@ pub(crate) async fn enable_remote_plugin( Ok(()) } -pub(crate) async fn uninstall_remote_plugin( - config: &Config, +pub async fn uninstall_remote_plugin( + config: &RemotePluginServiceConfig, auth: Option<&CodexAuth>, plugin_id: &str, ) -> Result<(), RemotePluginMutationError> { @@ -238,7 +242,7 @@ fn default_remote_marketplace_name() -> String { } async fn post_remote_plugin_mutation( - config: &Config, + config: &RemotePluginServiceConfig, auth: Option<&CodexAuth>, plugin_id: &str, action: &str, @@ -294,7 +298,7 @@ async fn post_remote_plugin_mutation( } fn remote_plugin_mutation_url( - config: &Config, + config: &RemotePluginServiceConfig, plugin_id: &str, action: &str, ) -> Result { diff --git a/codex-rs/core/src/plugins/store.rs b/codex-rs/core-plugins/src/store.rs similarity index 97% rename from codex-rs/core/src/plugins/store.rs rename to codex-rs/core-plugins/src/store.rs index 4316f18002..2f530b8648 100644 --- a/codex-rs/core/src/plugins/store.rs +++ b/codex-rs/core-plugins/src/store.rs @@ -1,5 +1,5 @@ -use super::load_plugin_manifest; -use super::manifest::PluginManifest; +use crate::manifest::PluginManifest; +use crate::manifest::load_plugin_manifest; use codex_plugin::PluginId; use codex_plugin::validate_plugin_segment; use codex_utils_absolute_path::AbsolutePathBuf; @@ -11,8 +11,8 @@ use std::io; use std::path::Path; use std::path::PathBuf; -pub(crate) const DEFAULT_PLUGIN_VERSION: &str = "local"; -pub(crate) const PLUGINS_CACHE_DIR: &str = "plugins/cache"; +pub const DEFAULT_PLUGIN_VERSION: &str = "local"; +pub const PLUGINS_CACHE_DIR: &str = "plugins/cache"; #[derive(Debug, Clone, PartialEq, Eq)] pub struct PluginInstallResult { @@ -157,7 +157,7 @@ impl PluginStoreError { } } -pub(crate) fn plugin_version_for_source(source_path: &Path) -> Result { +pub fn plugin_version_for_source(source_path: &Path) -> Result { let plugin_version = plugin_manifest_version_for_source(source_path)? .unwrap_or_else(|| DEFAULT_PLUGIN_VERSION.to_string()); validate_plugin_version_segment(&plugin_version).map_err(PluginStoreError::Invalid)?; diff --git a/codex-rs/core/src/plugins/store_tests.rs b/codex-rs/core-plugins/src/store_tests.rs similarity index 100% rename from codex-rs/core/src/plugins/store_tests.rs rename to codex-rs/core-plugins/src/store_tests.rs diff --git a/codex-rs/core/src/plugins/toggles.rs b/codex-rs/core-plugins/src/toggles.rs similarity index 100% rename from codex-rs/core/src/plugins/toggles.rs rename to codex-rs/core-plugins/src/toggles.rs diff --git a/codex-rs/core-skills/Cargo.toml b/codex-rs/core-skills/Cargo.toml index 35c9ed0a57..bc73909f92 100644 --- a/codex-rs/core-skills/Cargo.toml +++ b/codex-rs/core-skills/Cargo.toml @@ -17,6 +17,7 @@ anyhow = { workspace = true } codex-analytics = { workspace = true } codex-app-server-protocol = { workspace = true } codex-config = { workspace = true } +codex-exec-server = { workspace = true } codex-instructions = { workspace = true } codex-login = { workspace = true } codex-otel = { workspace = true } diff --git a/codex-rs/core-skills/src/config_rules.rs b/codex-rs/core-skills/src/config_rules.rs index f613d494a2..92ad2ab1a6 100644 --- a/codex-rs/core-skills/src/config_rules.rs +++ b/codex-rs/core-skills/src/config_rules.rs @@ -1,12 +1,11 @@ use std::collections::HashSet; -use std::path::Path; -use std::path::PathBuf; use codex_app_server_protocol::ConfigLayerSource; use codex_config::ConfigLayerStack; use codex_config::ConfigLayerStackOrdering; use codex_config::SkillConfig; use codex_config::SkillsConfig; +use codex_utils_absolute_path::AbsolutePathBuf; use tracing::warn; use crate::SkillMetadata; @@ -14,7 +13,7 @@ use crate::SkillMetadata; #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum SkillConfigRuleSelector { Name(String), - Path(PathBuf), + Path(AbsolutePathBuf), } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -72,7 +71,7 @@ pub fn skill_config_rules_from_stack(config_layer_stack: &ConfigLayerStack) -> S pub fn resolve_disabled_skill_paths( skills: &[SkillMetadata], rules: &SkillConfigRules, -) -> HashSet { +) -> HashSet { let mut disabled_paths = HashSet::new(); for entry in &rules.entries { @@ -105,9 +104,9 @@ pub fn resolve_disabled_skill_paths( fn skill_config_rule_selector(entry: &SkillConfig) -> Option { match (entry.path.as_ref(), entry.name.as_deref()) { - (Some(path), None) => Some(SkillConfigRuleSelector::Path(normalize_rule_path( - path.as_path(), - ))), + (Some(path), None) => Some(SkillConfigRuleSelector::Path( + path.canonicalize().unwrap_or_else(|_| path.clone()), + )), (None, Some(name)) => { let name = name.trim(); if name.is_empty() { @@ -127,7 +126,3 @@ fn skill_config_rule_selector(entry: &SkillConfig) -> Option PathBuf { - dunce::canonicalize(path).unwrap_or_else(|_| path.to_path_buf()) -} diff --git a/codex-rs/core-skills/src/injection.rs b/codex-rs/core-skills/src/injection.rs index b31885b8c5..1ccd447cf0 100644 --- a/codex-rs/core-skills/src/injection.rs +++ b/codex-rs/core-skills/src/injection.rs @@ -1,19 +1,21 @@ use std::collections::HashMap; use std::collections::HashSet; -use std::path::PathBuf; +use std::sync::Arc; +use crate::SkillLoadOutcome; use crate::SkillMetadata; use crate::build_skill_name_counts; use codex_analytics::AnalyticsEventsClient; use codex_analytics::InvocationType; use codex_analytics::SkillInvocation; use codex_analytics::TrackEventsContext; +use codex_exec_server::LOCAL_FS; use codex_instructions::SkillInstructions; use codex_otel::SessionTelemetry; use codex_protocol::models::ResponseItem; use codex_protocol::user_input::UserInput; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_plugins::mention_syntax::TOOL_MENTION_SIGIL; -use tokio::fs; #[derive(Debug, Default)] pub struct SkillInjections { @@ -23,6 +25,7 @@ pub struct SkillInjections { pub async fn build_skill_injections( mentioned_skills: &[SkillMetadata], + loaded_skills: Option<&SkillLoadOutcome>, otel: Option<&SessionTelemetry>, analytics_client: &AnalyticsEventsClient, tracking: TrackEventsContext, @@ -38,13 +41,19 @@ pub async fn build_skill_injections( let mut invocations = Vec::new(); for skill in mentioned_skills { - match fs::read_to_string(&skill.path_to_skills_md).await { + let fs = loaded_skills + .and_then(|outcome| outcome.file_system_for_skill(skill)) + .unwrap_or_else(|| Arc::clone(&LOCAL_FS)); + match fs + .read_file_text(&skill.path_to_skills_md, /*sandbox*/ None) + .await + { Ok(contents) => { emit_skill_injected_metric(otel, skill, "ok"); invocations.push(SkillInvocation { skill_name: skill.name.clone(), skill_scope: skill.scope, - skill_path: skill.path_to_skills_md.clone(), + skill_path: skill.path_to_skills_md.to_path_buf(), invocation_type: InvocationType::Explicit, }); result.items.push(ResponseItem::from(SkillInstructions { @@ -100,7 +109,7 @@ fn emit_skill_injected_metric( pub fn collect_explicit_skill_mentions( inputs: &[UserInput], skills: &[SkillMetadata], - disabled_paths: &HashSet, + disabled_paths: &HashSet, connector_slug_counts: &HashMap, ) -> Vec { let skill_name_counts = build_skill_name_counts(skills, disabled_paths).0; @@ -113,20 +122,24 @@ pub fn collect_explicit_skill_mentions( }; let mut selected: Vec = Vec::new(); let mut seen_names: HashSet = HashSet::new(); - let mut seen_paths: HashSet = HashSet::new(); + let mut seen_paths: HashSet = HashSet::new(); let mut blocked_plain_names: HashSet = HashSet::new(); for input in inputs { if let UserInput::Skill { name, path } = input { blocked_plain_names.insert(name.clone()); - if selection_context.disabled_paths.contains(path) || seen_paths.contains(path) { + let Ok(path) = AbsolutePathBuf::relative_to_current_dir(path) else { + continue; + }; + + if selection_context.disabled_paths.contains(&path) || seen_paths.contains(&path) { continue; } if let Some(skill) = selection_context .skills .iter() - .find(|skill| skill.path_to_skills_md.as_path() == path.as_path()) + .find(|skill| skill.path_to_skills_md == path) { seen_paths.insert(skill.path_to_skills_md.clone()); seen_names.insert(skill.name.clone()); @@ -154,7 +167,7 @@ pub fn collect_explicit_skill_mentions( struct SkillSelectionContext<'a> { skills: &'a [SkillMetadata], - disabled_paths: &'a HashSet, + disabled_paths: &'a HashSet, skill_name_counts: &'a HashMap, connector_slug_counts: &'a HashMap, } @@ -305,7 +318,7 @@ fn select_skills_from_mentions( blocked_plain_names: &HashSet, mentions: &ToolMentions<'_>, seen_names: &mut HashSet, - seen_paths: &mut HashSet, + seen_paths: &mut HashSet, selected: &mut Vec, ) { if mentions.is_empty() { diff --git a/codex-rs/core-skills/src/injection_tests.rs b/codex-rs/core-skills/src/injection_tests.rs index b8611de4ef..9627318653 100644 --- a/codex-rs/core-skills/src/injection_tests.rs +++ b/codex-rs/core-skills/src/injection_tests.rs @@ -1,4 +1,7 @@ use super::*; +use codex_utils_absolute_path::AbsolutePathBuf; +use codex_utils_absolute_path::test_support::PathBufExt; +use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use std::collections::HashMap; use std::collections::HashSet; @@ -11,7 +14,7 @@ fn make_skill(name: &str, path: &str) -> SkillMetadata { interface: None, dependencies: None, policy: None, - path_to_skills_md: PathBuf::from(path), + path_to_skills_md: test_path_buf(path).abs(), scope: codex_protocol::protocol::SkillScope::User, } } @@ -26,10 +29,14 @@ fn assert_mentions(text: &str, expected_names: &[&str], expected_paths: &[&str]) assert_eq!(mentions.paths, set(expected_paths)); } +fn linked_skill_mention(name: &str, unix_path: &str) -> String { + format!("[${name}]({})", test_path_buf(unix_path).display()) +} + fn collect_mentions( inputs: &[UserInput], skills: &[SkillMetadata], - disabled_paths: &HashSet, + disabled_paths: &HashSet, connector_slug_counts: &HashMap, ) -> Vec { collect_explicit_skill_mentions(inputs, skills, disabled_paths, connector_slug_counts) @@ -151,7 +158,7 @@ fn collect_explicit_skill_mentions_prioritizes_structured_inputs() { }, UserInput::Skill { name: "beta-skill".to_string(), - path: PathBuf::from("/tmp/beta"), + path: test_path_buf("/tmp/beta"), }, ]; let connector_counts = HashMap::new(); @@ -172,7 +179,7 @@ fn collect_explicit_skill_mentions_skips_invalid_structured_and_blocks_plain_fal }, UserInput::Skill { name: "alpha-skill".to_string(), - path: PathBuf::from("/tmp/missing"), + path: test_path_buf("/tmp/missing"), }, ]; let connector_counts = HashMap::new(); @@ -193,10 +200,10 @@ fn collect_explicit_skill_mentions_skips_disabled_structured_and_blocks_plain_fa }, UserInput::Skill { name: "alpha-skill".to_string(), - path: PathBuf::from("/tmp/alpha"), + path: test_path_buf("/tmp/alpha"), }, ]; - let disabled = HashSet::from([PathBuf::from("/tmp/alpha")]); + let disabled = HashSet::from([test_path_buf("/tmp/alpha").abs()]); let connector_counts = HashMap::new(); let selected = collect_mentions(&inputs, &skills, &disabled, &connector_counts); @@ -208,8 +215,9 @@ fn collect_explicit_skill_mentions_skips_disabled_structured_and_blocks_plain_fa fn collect_explicit_skill_mentions_dedupes_by_path() { let alpha = make_skill("alpha-skill", "/tmp/alpha"); let skills = vec![alpha.clone()]; + let mention = linked_skill_mention("alpha-skill", "/tmp/alpha"); let inputs = vec![UserInput::Text { - text: "use [$alpha-skill](/tmp/alpha) and [$alpha-skill](/tmp/alpha)".to_string(), + text: format!("use {mention} and {mention}"), text_elements: Vec::new(), }]; let connector_counts = HashMap::new(); @@ -241,7 +249,10 @@ fn collect_explicit_skill_mentions_prefers_linked_path_over_name() { let beta = make_skill("demo-skill", "/tmp/beta"); let skills = vec![alpha, beta.clone()]; let inputs = vec![UserInput::Text { - text: "use $demo-skill and [$demo-skill](/tmp/beta)".to_string(), + text: format!( + "use $demo-skill and {}", + linked_skill_mention("demo-skill", "/tmp/beta") + ), text_elements: Vec::new(), }]; let connector_counts = HashMap::new(); @@ -271,7 +282,7 @@ fn collect_explicit_skill_mentions_allows_explicit_path_with_connector_conflict( let alpha = make_skill("alpha-skill", "/tmp/alpha"); let skills = vec![alpha.clone()]; let inputs = vec![UserInput::Text { - text: "use [$alpha-skill](/tmp/alpha)".to_string(), + text: format!("use {}", linked_skill_mention("alpha-skill", "/tmp/alpha")), text_elements: Vec::new(), }]; let connector_counts = HashMap::from([("alpha-skill".to_string(), 1)]); @@ -287,10 +298,10 @@ fn collect_explicit_skill_mentions_skips_when_linked_path_disabled() { let beta = make_skill("demo-skill", "/tmp/beta"); let skills = vec![alpha, beta]; let inputs = vec![UserInput::Text { - text: "use [$demo-skill](/tmp/alpha)".to_string(), + text: format!("use {}", linked_skill_mention("demo-skill", "/tmp/alpha")), text_elements: Vec::new(), }]; - let disabled = HashSet::from([PathBuf::from("/tmp/alpha")]); + let disabled = HashSet::from([test_path_buf("/tmp/alpha").abs()]); let connector_counts = HashMap::new(); let selected = collect_mentions(&inputs, &skills, &disabled, &connector_counts); @@ -304,7 +315,7 @@ fn collect_explicit_skill_mentions_prefers_resource_path() { let beta = make_skill("demo-skill", "/tmp/beta"); let skills = vec![alpha, beta.clone()]; let inputs = vec![UserInput::Text { - text: "use [$demo-skill](/tmp/beta)".to_string(), + text: format!("use {}", linked_skill_mention("demo-skill", "/tmp/beta")), text_elements: Vec::new(), }]; let connector_counts = HashMap::new(); @@ -320,7 +331,7 @@ fn collect_explicit_skill_mentions_skips_missing_path_with_no_fallback() { let beta = make_skill("demo-skill", "/tmp/beta"); let skills = vec![alpha, beta]; let inputs = vec![UserInput::Text { - text: "use [$demo-skill](/tmp/missing)".to_string(), + text: format!("use {}", linked_skill_mention("demo-skill", "/tmp/missing")), text_elements: Vec::new(), }]; let connector_counts = HashMap::new(); @@ -335,7 +346,7 @@ fn collect_explicit_skill_mentions_skips_missing_path_without_fallback() { let alpha = make_skill("demo-skill", "/tmp/alpha"); let skills = vec![alpha]; let inputs = vec![UserInput::Text { - text: "use [$demo-skill](/tmp/missing)".to_string(), + text: format!("use {}", linked_skill_mention("demo-skill", "/tmp/missing")), text_elements: Vec::new(), }]; let connector_counts = HashMap::new(); diff --git a/codex-rs/core-skills/src/invocation_utils.rs b/codex-rs/core-skills/src/invocation_utils.rs index 1936bbe630..4c9d0a4119 100644 --- a/codex-rs/core-skills/src/invocation_utils.rs +++ b/codex-rs/core-skills/src/invocation_utils.rs @@ -1,24 +1,24 @@ use std::collections::HashMap; use std::path::Path; -use std::path::PathBuf; use crate::SkillLoadOutcome; use crate::SkillMetadata; +use codex_utils_absolute_path::AbsolutePathBuf; pub(crate) fn build_implicit_skill_path_indexes( skills: Vec, ) -> ( - HashMap, - HashMap, + HashMap, + HashMap, ) { let mut by_scripts_dir = HashMap::new(); let mut by_skill_doc_path = HashMap::new(); for skill in skills { - let skill_doc_path = normalize_path(skill.path_to_skills_md.as_path()); + let skill_doc_path = canonicalize_if_exists(&skill.path_to_skills_md); by_skill_doc_path.insert(skill_doc_path, skill.clone()); if let Some(skill_dir) = skill.path_to_skills_md.parent() { - let scripts_dir = normalize_path(&skill_dir.join("scripts")); + let scripts_dir = canonicalize_if_exists(&skill_dir.join("scripts")); by_scripts_dir.insert(scripts_dir, skill); } } @@ -29,17 +29,16 @@ pub(crate) fn build_implicit_skill_path_indexes( pub fn detect_implicit_skill_invocation_for_command( outcome: &SkillLoadOutcome, command: &str, - workdir: &Path, + workdir: &AbsolutePathBuf, ) -> Option { - let workdir = normalize_path(workdir); + let workdir = canonicalize_if_exists(workdir); let tokens = tokenize_command(command); - if let Some(candidate) = detect_skill_script_run(outcome, tokens.as_slice(), workdir.as_path()) - { + if let Some(candidate) = detect_skill_script_run(outcome, tokens.as_slice(), &workdir) { return Some(candidate); } - detect_skill_doc_read(outcome, tokens.as_slice(), workdir.as_path()) + detect_skill_doc_read(outcome, tokens.as_slice(), &workdir) } fn tokenize_command(command: &str) -> Vec { @@ -82,19 +81,14 @@ fn script_run_token(tokens: &[String]) -> Option<&str> { fn detect_skill_script_run( outcome: &SkillLoadOutcome, tokens: &[String], - workdir: &Path, + workdir: &AbsolutePathBuf, ) -> Option { let script_token = script_run_token(tokens)?; let script_path = Path::new(script_token); - let script_path = if script_path.is_absolute() { - script_path.to_path_buf() - } else { - workdir.join(script_path) - }; - let script_path = normalize_path(script_path.as_path()); + let script_path = canonicalize_if_exists(&workdir.join(script_path)); - for ancestor in script_path.ancestors() { - if let Some(candidate) = outcome.implicit_skills_by_scripts_dir.get(ancestor) { + for path in script_path.ancestors() { + if let Some(candidate) = outcome.implicit_skills_by_scripts_dir.get(&path) { return Some(candidate.clone()); } } @@ -105,7 +99,7 @@ fn detect_skill_script_run( fn detect_skill_doc_read( outcome: &SkillLoadOutcome, tokens: &[String], - workdir: &Path, + workdir: &AbsolutePathBuf, ) -> Option { if !command_reads_file(tokens) { return None; @@ -116,11 +110,7 @@ fn detect_skill_doc_read( continue; } let path = Path::new(token); - let candidate_path = if path.is_absolute() { - normalize_path(path) - } else { - normalize_path(&workdir.join(path)) - }; + let candidate_path = canonicalize_if_exists(&workdir.join(path)); if let Some(candidate) = outcome.implicit_skills_by_doc_path.get(&candidate_path) { return Some(candidate.clone()); } @@ -146,8 +136,8 @@ fn command_basename(command: &str) -> String { .to_string() } -fn normalize_path(path: &Path) -> PathBuf { - std::fs::canonicalize(path).unwrap_or_else(|_| path.to_path_buf()) +fn canonicalize_if_exists(path: &AbsolutePathBuf) -> AbsolutePathBuf { + path.canonicalize().unwrap_or_else(|_| path.clone()) } #[cfg(test)] diff --git a/codex-rs/core-skills/src/invocation_utils_tests.rs b/codex-rs/core-skills/src/invocation_utils_tests.rs index 6d74dbe9a7..ab3a3e8dc0 100644 --- a/codex-rs/core-skills/src/invocation_utils_tests.rs +++ b/codex-rs/core-skills/src/invocation_utils_tests.rs @@ -1,16 +1,17 @@ use super::SkillLoadOutcome; use super::SkillMetadata; +use super::canonicalize_if_exists; use super::detect_skill_doc_read; use super::detect_skill_script_run; -use super::normalize_path; use super::script_run_token; +use codex_utils_absolute_path::AbsolutePathBuf; +use codex_utils_absolute_path::test_support::PathBufExt; +use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use std::collections::HashMap; -use std::path::Path; -use std::path::PathBuf; use std::sync::Arc; -fn test_skill_metadata(skill_doc_path: PathBuf) -> SkillMetadata { +fn test_skill_metadata(skill_doc_path: AbsolutePathBuf) -> SkillMetadata { SkillMetadata { name: "test-skill".to_string(), description: "test".to_string(), @@ -23,6 +24,10 @@ fn test_skill_metadata(skill_doc_path: PathBuf) -> SkillMetadata { } } +fn test_path_display(unix_path: &str) -> String { + test_path_buf(unix_path).display().to_string() +} + #[test] fn script_run_detection_matches_runner_plus_extension() { let tokens = vec![ @@ -47,8 +52,8 @@ fn script_run_detection_excludes_python_c() { #[test] fn skill_doc_read_detection_matches_absolute_path() { - let skill_doc_path = PathBuf::from("/tmp/skill-test/SKILL.md"); - let normalized_skill_doc_path = normalize_path(skill_doc_path.as_path()); + let skill_doc_path = test_path_buf("/tmp/skill-test/SKILL.md").abs(); + let normalized_skill_doc_path = canonicalize_if_exists(&skill_doc_path); let skill = test_skill_metadata(skill_doc_path); let outcome = SkillLoadOutcome { implicit_skills_by_scripts_dir: Arc::new(HashMap::new()), @@ -58,11 +63,11 @@ fn skill_doc_read_detection_matches_absolute_path() { let tokens = vec![ "cat".to_string(), - "/tmp/skill-test/SKILL.md".to_string(), + test_path_display("/tmp/skill-test/SKILL.md"), "|".to_string(), "head".to_string(), ]; - let found = detect_skill_doc_read(&outcome, &tokens, Path::new("/tmp")); + let found = detect_skill_doc_read(&outcome, &tokens, &test_path_buf("/tmp").abs()); assert_eq!( found.map(|value| value.name), @@ -72,8 +77,8 @@ fn skill_doc_read_detection_matches_absolute_path() { #[test] fn skill_script_run_detection_matches_relative_path_from_skill_root() { - let skill_doc_path = PathBuf::from("/tmp/skill-test/SKILL.md"); - let scripts_dir = normalize_path(Path::new("/tmp/skill-test/scripts")); + let skill_doc_path = test_path_buf("/tmp/skill-test/SKILL.md").abs(); + let scripts_dir = canonicalize_if_exists(&test_path_buf("/tmp/skill-test/scripts").abs()); let skill = test_skill_metadata(skill_doc_path); let outcome = SkillLoadOutcome { implicit_skills_by_scripts_dir: Arc::new(HashMap::from([(scripts_dir, skill)])), @@ -85,7 +90,7 @@ fn skill_script_run_detection_matches_relative_path_from_skill_root() { "scripts/fetch_comments.py".to_string(), ]; - let found = detect_skill_script_run(&outcome, &tokens, Path::new("/tmp/skill-test")); + let found = detect_skill_script_run(&outcome, &tokens, &test_path_buf("/tmp/skill-test").abs()); assert_eq!( found.map(|value| value.name), @@ -95,8 +100,8 @@ fn skill_script_run_detection_matches_relative_path_from_skill_root() { #[test] fn skill_script_run_detection_matches_absolute_path_from_any_workdir() { - let skill_doc_path = PathBuf::from("/tmp/skill-test/SKILL.md"); - let scripts_dir = normalize_path(Path::new("/tmp/skill-test/scripts")); + let skill_doc_path = test_path_buf("/tmp/skill-test/SKILL.md").abs(); + let scripts_dir = canonicalize_if_exists(&test_path_buf("/tmp/skill-test/scripts").abs()); let skill = test_skill_metadata(skill_doc_path); let outcome = SkillLoadOutcome { implicit_skills_by_scripts_dir: Arc::new(HashMap::from([(scripts_dir, skill)])), @@ -105,10 +110,10 @@ fn skill_script_run_detection_matches_absolute_path_from_any_workdir() { }; let tokens = vec![ "python3".to_string(), - "/tmp/skill-test/scripts/fetch_comments.py".to_string(), + test_path_display("/tmp/skill-test/scripts/fetch_comments.py"), ]; - let found = detect_skill_script_run(&outcome, &tokens, Path::new("/tmp/other")); + let found = detect_skill_script_run(&outcome, &tokens, &test_path_buf("/tmp/other").abs()); assert_eq!( found.map(|value| value.name), diff --git a/codex-rs/core-skills/src/loader.rs b/codex-rs/core-skills/src/loader.rs index 42de9fb288..2cae6a4b0b 100644 --- a/codex-rs/core-skills/src/loader.rs +++ b/codex-rs/core-skills/src/loader.rs @@ -1,5 +1,6 @@ use crate::model::SkillDependencies; use crate::model::SkillError; +use crate::model::SkillFileSystemsByPath; use crate::model::SkillInterface; use crate::model::SkillLoadOutcome; use crate::model::SkillMetadata; @@ -12,21 +13,24 @@ use codex_config::ConfigLayerStackOrdering; use codex_config::default_project_root_markers; use codex_config::merge_toml_values; use codex_config::project_root_markers_from_config; +use codex_exec_server::ExecutorFileSystem; +use codex_exec_server::LOCAL_FS; use codex_protocol::protocol::Product; use codex_protocol::protocol::SkillScope; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_absolute_path::AbsolutePathBufGuard; use codex_utils_plugins::plugin_namespace_for_skill_path; use dirs::home_dir; -use dunce::canonicalize as canonicalize_path; use serde::Deserialize; +use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; use std::error::Error; use std::fmt; -use std::fs; +use std::io; use std::path::Component; -use std::path::Path; use std::path::PathBuf; +use std::sync::Arc; use toml::Value as TomlValue; use tracing::error; @@ -145,23 +149,40 @@ impl fmt::Display for SkillParseError { impl Error for SkillParseError {} pub struct SkillRoot { - pub path: PathBuf, + pub path: AbsolutePathBuf, pub scope: SkillScope, + pub file_system: Arc, } -pub fn load_skills_from_roots(roots: I) -> SkillLoadOutcome +pub async fn load_skills_from_roots(roots: I) -> SkillLoadOutcome where I: IntoIterator, { let mut outcome = SkillLoadOutcome::default(); + let mut file_systems_by_skill_path: HashMap> = + HashMap::new(); for root in roots { - discover_skills_under_root(&root.path, root.scope, &mut outcome); + let fs = root.file_system; + let skills_before_root = outcome.skills.len(); + discover_skills_under_root(fs.as_ref(), &root.path, root.scope, &mut outcome).await; + for skill in &outcome.skills[skills_before_root..] { + file_systems_by_skill_path + .entry(skill.path_to_skills_md.clone()) + .or_insert_with(|| Arc::clone(&fs)); + } } - let mut seen: HashSet = HashSet::new(); + let mut seen: HashSet = HashSet::new(); outcome .skills .retain(|skill| seen.insert(skill.path_to_skills_md.clone())); + let retained_skill_paths: HashSet = outcome + .skills + .iter() + .map(|skill| skill.path_to_skills_md.clone()) + .collect(); + file_systems_by_skill_path.retain(|path, _| retained_skill_paths.contains(path)); + outcome.file_systems_by_skill_path = SkillFileSystemsByPath::new(file_systems_by_skill_path); fn scope_rank(scope: SkillScope) -> u8 { // Higher-priority scopes first (matches root scan order for dedupe). @@ -183,38 +204,46 @@ where outcome } -pub(crate) fn skill_roots( +pub(crate) async fn skill_roots( + fs: Option>, config_layer_stack: &ConfigLayerStack, - cwd: &Path, - plugin_skill_roots: Vec, + cwd: &AbsolutePathBuf, + plugin_skill_roots: Vec, ) -> Vec { + let home_dir = + home_dir().and_then(|path| AbsolutePathBuf::from_absolute_path_checked(path).ok()); skill_roots_with_home_dir( + fs, config_layer_stack, cwd, - home_dir().as_deref(), + home_dir.as_ref(), plugin_skill_roots, ) + .await } -fn skill_roots_with_home_dir( +async fn skill_roots_with_home_dir( + fs: Option>, config_layer_stack: &ConfigLayerStack, - cwd: &Path, - home_dir: Option<&Path>, - plugin_skill_roots: Vec, + cwd: &AbsolutePathBuf, + home_dir: Option<&AbsolutePathBuf>, + plugin_skill_roots: Vec, ) -> Vec { - let mut roots = skill_roots_from_layer_stack_inner(config_layer_stack, home_dir); + let mut roots = skill_roots_from_layer_stack_inner(config_layer_stack, home_dir, fs.clone()); roots.extend(plugin_skill_roots.into_iter().map(|path| SkillRoot { path, scope: SkillScope::User, + file_system: Arc::clone(&LOCAL_FS), })); - roots.extend(repo_agents_skill_roots(config_layer_stack, cwd)); + roots.extend(repo_agents_skill_roots(fs, config_layer_stack, cwd).await); dedupe_skill_roots_by_path(&mut roots); roots } fn skill_roots_from_layer_stack_inner( config_layer_stack: &ConfigLayerStack, - home_dir: Option<&Path>, + home_dir: Option<&AbsolutePathBuf>, + repo_fs: Option>, ) -> Vec { let mut roots = Vec::new(); @@ -228,17 +257,21 @@ fn skill_roots_from_layer_stack_inner( match &layer.name { ConfigLayerSource::Project { .. } => { - roots.push(SkillRoot { - path: config_folder.as_path().join(SKILLS_DIR_NAME), - scope: SkillScope::Repo, - }); + if let Some(repo_fs) = &repo_fs { + roots.push(SkillRoot { + path: config_folder.join(SKILLS_DIR_NAME), + scope: SkillScope::Repo, + file_system: Arc::clone(repo_fs), + }); + } } ConfigLayerSource::User { .. } => { // Deprecated user skills location (`$CODEX_HOME/skills`), kept for backward // compatibility. roots.push(SkillRoot { - path: config_folder.as_path().join(SKILLS_DIR_NAME), + path: config_folder.join(SKILLS_DIR_NAME), scope: SkillScope::User, + file_system: Arc::clone(&LOCAL_FS), }); // `$HOME/.agents/skills` (user-installed skills). @@ -246,22 +279,25 @@ fn skill_roots_from_layer_stack_inner( roots.push(SkillRoot { path: home_dir.join(AGENTS_DIR_NAME).join(SKILLS_DIR_NAME), scope: SkillScope::User, + file_system: Arc::clone(&LOCAL_FS), }); } // Embedded system skills are cached under `$CODEX_HOME/skills/.system` and are a // special case (not a config layer). roots.push(SkillRoot { - path: system_cache_root_dir(config_folder.as_path()), + path: system_cache_root_dir(&config_folder), scope: SkillScope::System, + file_system: Arc::clone(&LOCAL_FS), }); } ConfigLayerSource::System { .. } => { // The system config layer lives under `/etc/codex/` on Unix, so treat // `/etc/codex/skills` as admin-scoped skills. roots.push(SkillRoot { - path: config_folder.as_path().join(SKILLS_DIR_NAME), + path: config_folder.join(SKILLS_DIR_NAME), scope: SkillScope::Admin, + file_system: Arc::clone(&LOCAL_FS), }); } ConfigLayerSource::Mdm { .. } @@ -274,18 +310,34 @@ fn skill_roots_from_layer_stack_inner( roots } -fn repo_agents_skill_roots(config_layer_stack: &ConfigLayerStack, cwd: &Path) -> Vec { +async fn repo_agents_skill_roots( + fs: Option>, + config_layer_stack: &ConfigLayerStack, + cwd: &AbsolutePathBuf, +) -> Vec { + let Some(fs) = fs else { + return Vec::new(); + }; let project_root_markers = project_root_markers_from_stack(config_layer_stack); - let project_root = find_project_root(cwd, &project_root_markers); + let project_root = find_project_root(fs.as_ref(), cwd, &project_root_markers).await; let dirs = dirs_between_project_root_and_cwd(cwd, &project_root); let mut roots = Vec::new(); for dir in dirs { let agents_skills = dir.join(AGENTS_DIR_NAME).join(SKILLS_DIR_NAME); - if agents_skills.is_dir() { - roots.push(SkillRoot { + match fs.get_metadata(&agents_skills, /*sandbox*/ None).await { + Ok(metadata) if metadata.is_directory => roots.push(SkillRoot { path: agents_skills, scope: SkillScope::Repo, - }); + file_system: Arc::clone(&fs), + }), + Ok(_) => {} + Err(err) if err.kind() == io::ErrorKind::NotFound => {} + Err(err) => { + tracing::warn!( + "failed to stat repo skills root {}: {err:#}", + agents_skills.display() + ); + } } } roots @@ -313,34 +365,48 @@ fn project_root_markers_from_stack(config_layer_stack: &ConfigLayerStack) -> Vec } } -fn find_project_root(cwd: &Path, project_root_markers: &[String]) -> PathBuf { +async fn find_project_root( + fs: &dyn ExecutorFileSystem, + cwd: &AbsolutePathBuf, + project_root_markers: &[String], +) -> AbsolutePathBuf { if project_root_markers.is_empty() { - return cwd.to_path_buf(); + return cwd.clone(); } for ancestor in cwd.ancestors() { for marker in project_root_markers { let marker_path = ancestor.join(marker); - if marker_path.exists() { - return ancestor.to_path_buf(); + match fs.get_metadata(&marker_path, /*sandbox*/ None).await { + Ok(_) => return ancestor, + Err(err) if err.kind() == io::ErrorKind::NotFound => {} + Err(err) => { + tracing::warn!( + "failed to stat project root marker {}: {err:#}", + marker_path.display() + ); + } } } } - cwd.to_path_buf() + cwd.clone() } -fn dirs_between_project_root_and_cwd(cwd: &Path, project_root: &Path) -> Vec { +fn dirs_between_project_root_and_cwd( + cwd: &AbsolutePathBuf, + project_root: &AbsolutePathBuf, +) -> Vec { let mut dirs = cwd .ancestors() - .scan(false, |done, a| { + .scan(false, |done, dir| { if *done { None } else { - if a == project_root { + if &dir == project_root { *done = true; } - Some(a.to_path_buf()) + Some(dir) } }) .collect::>(); @@ -349,24 +415,37 @@ fn dirs_between_project_root_and_cwd(cwd: &Path, project_root: &Path) -> Vec) { - let mut seen: HashSet = HashSet::new(); + let mut seen: HashSet = HashSet::new(); roots.retain(|root| seen.insert(root.path.clone())); } -fn discover_skills_under_root(root: &Path, scope: SkillScope, outcome: &mut SkillLoadOutcome) { - let Ok(root) = canonicalize_path(root) else { - return; - }; +fn canonicalize_for_skill_identity(path: &AbsolutePathBuf) -> AbsolutePathBuf { + path.canonicalize().unwrap_or_else(|_| path.clone()) +} - if !root.is_dir() { - return; +async fn discover_skills_under_root( + fs: &dyn ExecutorFileSystem, + root: &AbsolutePathBuf, + scope: SkillScope, + outcome: &mut SkillLoadOutcome, +) { + let root = canonicalize_for_skill_identity(root); + + match fs.get_metadata(&root, /*sandbox*/ None).await { + Ok(metadata) if metadata.is_directory => {} + Ok(_) => return, + Err(err) if err.kind() == io::ErrorKind::NotFound => return, + Err(err) => { + error!("failed to stat skills root {}: {err:#}", root.display()); + return; + } } fn enqueue_dir( - queue: &mut VecDeque<(PathBuf, usize)>, - visited_dirs: &mut HashSet, + queue: &mut VecDeque<(AbsolutePathBuf, usize)>, + visited_dirs: &mut HashSet, truncated_by_dir_limit: &mut bool, - path: PathBuf, + path: AbsolutePathBuf, depth: usize, ) { if depth > MAX_SCAN_DEPTH { @@ -387,14 +466,14 @@ fn discover_skills_under_root(root: &Path, scope: SkillScope, outcome: &mut Skil SkillScope::Repo | SkillScope::User | SkillScope::Admin ); - let mut visited_dirs: HashSet = HashSet::new(); + let mut visited_dirs: HashSet = HashSet::new(); visited_dirs.insert(root.clone()); - let mut queue: VecDeque<(PathBuf, usize)> = VecDeque::from([(root.clone(), 0)]); + let mut queue: VecDeque<(AbsolutePathBuf, usize)> = VecDeque::from([(root.clone(), 0)]); let mut truncated_by_dir_limit = false; while let Some((dir, depth)) = queue.pop_front() { - let entries = match fs::read_dir(&dir) { + let entries = match fs.read_directory(&dir, /*sandbox*/ None).await { Ok(entries) => entries, Err(e) => { error!("failed to read skills dir {}: {e:#}", dir.display()); @@ -402,59 +481,53 @@ fn discover_skills_under_root(root: &Path, scope: SkillScope, outcome: &mut Skil } }; - for entry in entries.flatten() { - let path = entry.path(); - let file_name = match path.file_name().and_then(|f| f.to_str()) { - Some(name) => name, - None => continue, - }; - + for entry in entries { + let file_name = entry.file_name; if file_name.starts_with('.') { continue; } - let Ok(file_type) = entry.file_type() else { - continue; + let path = dir.join(&file_name); + let metadata = match fs.get_metadata(&path, /*sandbox*/ None).await { + Ok(metadata) => metadata, + Err(e) => { + error!("failed to stat skills path {}: {e:#}", path.display()); + continue; + } }; - if file_type.is_symlink() { + if metadata.is_symlink { if !follow_symlinks { continue; } - - // Follow the symlink to determine what it points to. - let metadata = match fs::metadata(&path) { - Ok(metadata) => metadata, - Err(e) => { + match fs.read_directory(&path, /*sandbox*/ None).await { + Ok(_) => { + let resolved_dir = canonicalize_for_skill_identity(&path); + enqueue_dir( + &mut queue, + &mut visited_dirs, + &mut truncated_by_dir_limit, + resolved_dir, + depth + 1, + ); + } + Err(err) + if matches!( + err.kind(), + io::ErrorKind::NotADirectory | io::ErrorKind::NotFound + ) => {} + Err(err) => { error!( - "failed to stat skills entry {} (symlink): {e:#}", + "failed to read skills symlink dir {}: {err:#}", path.display() ); - continue; } - }; - - if metadata.is_dir() { - let Ok(resolved_dir) = canonicalize_path(&path) else { - continue; - }; - enqueue_dir( - &mut queue, - &mut visited_dirs, - &mut truncated_by_dir_limit, - resolved_dir, - depth + 1, - ); - continue; } - continue; } - if file_type.is_dir() { - let Ok(resolved_dir) = canonicalize_path(&path) else { - continue; - }; + if metadata.is_directory { + let resolved_dir = canonicalize_for_skill_identity(&path); enqueue_dir( &mut queue, &mut visited_dirs, @@ -465,15 +538,15 @@ fn discover_skills_under_root(root: &Path, scope: SkillScope, outcome: &mut Skil continue; } - if file_type.is_file() && file_name == SKILLS_FILENAME { - match parse_skill_file(&path, scope) { + if metadata.is_file && file_name == SKILLS_FILENAME { + match parse_skill_file(fs, &path, scope).await { Ok(skill) => { outcome.skills.push(skill); } Err(err) => { if scope != SkillScope::System { outcome.errors.push(SkillError { - path, + path: path.clone(), message: err.to_string(), }); } @@ -492,8 +565,15 @@ fn discover_skills_under_root(root: &Path, scope: SkillScope, outcome: &mut Skil } } -fn parse_skill_file(path: &Path, scope: SkillScope) -> Result { - let contents = fs::read_to_string(path).map_err(SkillParseError::Read)?; +async fn parse_skill_file( + fs: &dyn ExecutorFileSystem, + path: &AbsolutePathBuf, + scope: SkillScope, +) -> Result { + let contents = fs + .read_file_text(path, /*sandbox*/ None) + .await + .map_err(SkillParseError::Read)?; let frontmatter = extract_frontmatter(&contents).ok_or(SkillParseError::MissingFrontmatter)?; @@ -506,7 +586,7 @@ fn parse_skill_file(path: &Path, scope: SkillScope) -> Result Result Result Result String { +fn default_skill_name(path: &AbsolutePathBuf) -> String { path.parent() - .and_then(Path::file_name) - .and_then(|name| name.to_str()) - .map(sanitize_single_line) + .and_then(|parent| { + parent + .file_name() + .and_then(|name| name.to_str()) + .map(sanitize_single_line) + }) .filter(|value| !value.is_empty()) .unwrap_or_else(|| "skill".to_string()) } -fn namespaced_skill_name(path: &Path, base_name: &str) -> String { - plugin_namespace_for_skill_path(path) +async fn namespaced_skill_name( + fs: &dyn ExecutorFileSystem, + path: &AbsolutePathBuf, + base_name: &str, +) -> String { + plugin_namespace_for_skill_path(fs, path) + .await .map(|namespace| format!("{namespace}:{base_name}")) .unwrap_or_else(|| base_name.to_string()) } -fn load_skill_metadata(skill_path: &Path) -> LoadedSkillMetadata { +async fn load_skill_metadata( + fs: &dyn ExecutorFileSystem, + skill_path: &AbsolutePathBuf, +) -> LoadedSkillMetadata { // Fail open: optional metadata should not block loading SKILL.md. let Some(skill_dir) = skill_path.parent() else { return LoadedSkillMetadata::default(); @@ -571,11 +662,23 @@ fn load_skill_metadata(skill_path: &Path) -> LoadedSkillMetadata { let metadata_path = skill_dir .join(SKILLS_METADATA_DIR) .join(SKILLS_METADATA_FILENAME); - if !metadata_path.exists() { - return LoadedSkillMetadata::default(); + match fs.get_metadata(&metadata_path, /*sandbox*/ None).await { + Ok(metadata) if metadata.is_file => {} + Ok(_) => return LoadedSkillMetadata::default(), + Err(error) if error.kind() == io::ErrorKind::NotFound => { + return LoadedSkillMetadata::default(); + } + Err(error) => { + tracing::warn!( + "ignoring {path}: failed to stat {label}: {error}", + path = metadata_path.display(), + label = SKILLS_METADATA_FILENAME + ); + return LoadedSkillMetadata::default(); + } } - let contents = match fs::read_to_string(&metadata_path) { + let contents = match fs.read_file_text(&metadata_path, /*sandbox*/ None).await { Ok(contents) => contents, Err(error) => { tracing::warn!( @@ -588,7 +691,7 @@ fn load_skill_metadata(skill_path: &Path) -> LoadedSkillMetadata { }; let parsed: SkillMetadataFile = { - let _guard = AbsolutePathBufGuard::new(skill_dir); + let _guard = AbsolutePathBufGuard::new(skill_dir.as_path()); match serde_yaml::from_str(&contents) { Ok(parsed) => parsed, Err(error) => { @@ -608,13 +711,16 @@ fn load_skill_metadata(skill_path: &Path) -> LoadedSkillMetadata { policy, } = parsed; LoadedSkillMetadata { - interface: resolve_interface(interface, skill_dir), + interface: resolve_interface(interface, &skill_dir), dependencies: resolve_dependencies(dependencies), policy: resolve_policy(policy), } } -fn resolve_interface(interface: Option, skill_dir: &Path) -> Option { +fn resolve_interface( + interface: Option, + skill_dir: &AbsolutePathBuf, +) -> Option { let interface = interface?; let interface = SkillInterface { display_name: resolve_str( @@ -705,10 +811,10 @@ fn resolve_dependency_tool(tool: DependencyTool) -> Option } fn resolve_asset_path( - skill_dir: &Path, + skill_dir: &AbsolutePathBuf, field: &'static str, path: Option, -) -> Option { +) -> Option { // Icons must be relative paths under the skill's assets/ directory; otherwise return None. let path = path?; if path.as_os_str().is_empty() { @@ -838,11 +944,13 @@ fn extract_frontmatter(contents: &str) -> Option { Some(frontmatter_lines.join("\n")) } #[cfg(test)] -pub(crate) fn skill_roots_from_layer_stack( +pub(crate) async fn skill_roots_from_layer_stack( + fs: Arc, config_layer_stack: &ConfigLayerStack, - home_dir: Option<&Path>, + cwd: &AbsolutePathBuf, + home_dir: Option<&AbsolutePathBuf>, ) -> Vec { - skill_roots_with_home_dir(config_layer_stack, Path::new("."), home_dir, Vec::new()) + skill_roots_with_home_dir(Some(fs), config_layer_stack, cwd, home_dir, Vec::new()).await } #[cfg(test)] diff --git a/codex-rs/core-skills/src/loader_tests.rs b/codex-rs/core-skills/src/loader_tests.rs index 3702856306..a12f09f80f 100644 --- a/codex-rs/core-skills/src/loader_tests.rs +++ b/codex-rs/core-skills/src/loader_tests.rs @@ -4,18 +4,25 @@ use codex_config::ConfigLayerEntry; use codex_config::ConfigLayerStack; use codex_config::ConfigRequirements; use codex_config::ConfigRequirementsToml; +use codex_exec_server::LOCAL_FS; use codex_protocol::protocol::Product; use codex_protocol::protocol::SkillScope; use codex_utils_absolute_path::AbsolutePathBuf; +use codex_utils_absolute_path::test_support::PathBufExt; +use codex_utils_absolute_path::test_support::PathExt; +use dunce::canonicalize as canonicalize_path; use pretty_assertions::assert_eq; +use std::fs; use std::path::Path; +use std::path::PathBuf; +use std::sync::Arc; use tempfile::TempDir; use toml::Value as TomlValue; const REPO_ROOT_CONFIG_DIR_NAME: &str = ".codex"; struct TestConfig { - cwd: PathBuf, + cwd: AbsolutePathBuf, config_layer_stack: ConfigLayerStack, } @@ -24,7 +31,7 @@ async fn make_config(codex_home: &TempDir) -> TestConfig { } fn config_file(path: PathBuf) -> AbsolutePathBuf { - AbsolutePathBuf::from_absolute_path(path).expect("config file path should be absolute") + path.abs() } fn project_layers_for_cwd(cwd: &Path) -> Vec { @@ -63,8 +70,7 @@ fn project_layers_for_cwd(cwd: &Path) -> Vec { dot_codex.is_dir().then(|| { ConfigLayerEntry::new( ConfigLayerSource::Project { - dot_codex_folder: AbsolutePathBuf::from_absolute_path(dot_codex) - .expect("project .codex path should be absolute"), + dot_codex_folder: dot_codex.abs(), }, TomlValue::Table(toml::map::Map::new()), ) @@ -99,8 +105,9 @@ async fn make_config_for_cwd(codex_home: &TempDir, cwd: PathBuf) -> TestConfig { ]; layers.extend(project_layers_for_cwd(&cwd)); + let cwd_abs = cwd.abs(); TestConfig { - cwd, + cwd: cwd_abs, config_layer_stack: ConfigLayerStack::new( layers, ConfigRequirements::default(), @@ -110,14 +117,18 @@ async fn make_config_for_cwd(codex_home: &TempDir, cwd: PathBuf) -> TestConfig { } } -fn load_skills_for_test(config: &TestConfig) -> SkillLoadOutcome { +async fn load_skills_for_test(config: &TestConfig) -> SkillLoadOutcome { // Keep unit tests hermetic by never scanning the real `$HOME/.agents/skills`. - super::load_skills_from_roots(super::skill_roots_with_home_dir( - &config.config_layer_stack, - &config.cwd, - /*home_dir*/ None, - Vec::new(), - )) + super::load_skills_from_roots( + super::skill_roots_from_layer_stack( + Arc::clone(&LOCAL_FS), + &config.config_layer_stack, + &config.cwd, + /*home_dir*/ None, + ) + .await, + ) + .await } fn mark_as_git_repo(dir: &Path) { @@ -126,12 +137,14 @@ fn mark_as_git_repo(dir: &Path) { fs::write(dir.join(".git"), "gitdir: fake\n").unwrap(); } -fn normalized(path: &Path) -> PathBuf { - canonicalize_path(path).unwrap_or_else(|_| path.to_path_buf()) +fn normalized(path: &Path) -> AbsolutePathBuf { + canonicalize_path(path) + .unwrap_or_else(|_| path.to_path_buf()) + .abs() } -#[test] -fn skill_roots_from_layer_stack_maps_user_to_user_and_system_cache_and_system_to_admin() +#[tokio::test] +async fn skill_roots_from_layer_stack_maps_user_to_user_and_system_cache_and_system_to_admin() -> anyhow::Result<()> { let tmp = tempfile::tempdir()?; @@ -142,8 +155,8 @@ fn skill_roots_from_layer_stack_maps_user_to_user_and_system_cache_and_system_to fs::create_dir_all(&user_folder)?; // The file path doesn't need to exist; it's only used to derive the config folder. - let system_file = AbsolutePathBuf::from_absolute_path(system_folder.join("config.toml"))?; - let user_file = AbsolutePathBuf::from_absolute_path(user_folder.join("config.toml"))?; + let system_file = system_folder.join("config.toml").abs(); + let user_file = user_folder.join("config.toml").abs(); let layers = vec![ ConfigLayerEntry::new( @@ -161,10 +174,17 @@ fn skill_roots_from_layer_stack_maps_user_to_user_and_system_cache_and_system_to ConfigRequirementsToml::default(), )?; - let got = skill_roots_from_layer_stack(&stack, Some(&home_folder)) - .into_iter() - .map(|root| (root.scope, root.path)) - .collect::>(); + let home_folder_abs = home_folder.abs(); + let got = skill_roots_from_layer_stack( + Arc::clone(&LOCAL_FS), + &stack, + &home_folder_abs, + Some(&home_folder_abs), + ) + .await + .into_iter() + .map(|root| (root.scope, root.path.to_path_buf())) + .collect::>(); assert_eq!( got, @@ -185,8 +205,8 @@ fn skill_roots_from_layer_stack_maps_user_to_user_and_system_cache_and_system_to Ok(()) } -#[test] -fn skill_roots_from_layer_stack_includes_disabled_project_layers() -> anyhow::Result<()> { +#[tokio::test] +async fn skill_roots_from_layer_stack_includes_disabled_project_layers() -> anyhow::Result<()> { let tmp = tempfile::tempdir()?; let home_folder = tmp.path().join("home"); @@ -197,8 +217,8 @@ fn skill_roots_from_layer_stack_includes_disabled_project_layers() -> anyhow::Re let dot_codex = project_root.join(".codex"); fs::create_dir_all(&dot_codex)?; - let user_file = AbsolutePathBuf::from_absolute_path(user_folder.join("config.toml"))?; - let project_dot_codex = AbsolutePathBuf::from_absolute_path(&dot_codex)?; + let user_file = user_folder.join("config.toml").abs(); + let project_dot_codex = dot_codex.abs(); let layers = vec![ ConfigLayerEntry::new( @@ -219,10 +239,18 @@ fn skill_roots_from_layer_stack_includes_disabled_project_layers() -> anyhow::Re ConfigRequirementsToml::default(), )?; - let got = skill_roots_from_layer_stack(&stack, Some(&home_folder)) - .into_iter() - .map(|root| (root.scope, root.path)) - .collect::>(); + let home_folder_abs = home_folder.abs(); + let project_root_abs = project_root.abs(); + let got = skill_roots_from_layer_stack( + Arc::clone(&LOCAL_FS), + &stack, + &project_root_abs, + Some(&home_folder_abs), + ) + .await + .into_iter() + .map(|root| (root.scope, root.path.to_path_buf())) + .collect::>(); assert_eq!( got, @@ -243,15 +271,15 @@ fn skill_roots_from_layer_stack_includes_disabled_project_layers() -> anyhow::Re Ok(()) } -#[test] -fn loads_skills_from_home_agents_dir_for_user_scope() -> anyhow::Result<()> { +#[tokio::test] +async fn loads_skills_from_home_agents_dir_for_user_scope() -> anyhow::Result<()> { let tmp = tempfile::tempdir()?; let home_folder = tmp.path().join("home"); let user_folder = home_folder.join("codex"); fs::create_dir_all(&user_folder)?; - let user_file = AbsolutePathBuf::from_absolute_path(user_folder.join("config.toml"))?; + let user_file = user_folder.join("config.toml").abs(); let layers = vec![ConfigLayerEntry::new( ConfigLayerSource::User { file: user_file }, TomlValue::Table(toml::map::Map::new()), @@ -269,7 +297,15 @@ fn loads_skills_from_home_agents_dir_for_user_scope() -> anyhow::Result<()> { "from home agents", ); - let outcome = load_skills_from_roots(skill_roots_from_layer_stack(&stack, Some(&home_folder))); + let home_folder_abs = home_folder.abs(); + let roots = skill_roots_from_layer_stack( + Arc::clone(&LOCAL_FS), + &stack, + &home_folder_abs, + Some(&home_folder_abs), + ) + .await; + let outcome = load_skills_from_roots(roots).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -383,7 +419,7 @@ async fn loads_skill_dependencies_metadata_from_yaml() { ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -461,7 +497,7 @@ interface: ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -510,7 +546,7 @@ policy: ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -542,7 +578,7 @@ policy: {} ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -581,7 +617,7 @@ policy: ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -619,7 +655,7 @@ async fn accepts_icon_paths_under_assets_dir() { ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -666,7 +702,7 @@ async fn ignores_invalid_brand_color() { ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -712,7 +748,7 @@ async fn ignores_default_prompt_over_max_length() { ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -760,7 +796,7 @@ async fn drops_interface_when_icons_are_invalid() { ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -804,7 +840,7 @@ async fn loads_skills_via_symlinked_subdir_for_user_scope() { symlink_dir(shared.path(), &codex_home.path().join("skills/shared")); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -839,7 +875,7 @@ async fn ignores_symlinked_skill_file_for_user_scope() { symlink_file(&shared_skill_path, &skill_dir.join(SKILLS_FILENAME)); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -863,7 +899,7 @@ async fn does_not_loop_on_symlink_cycle_for_user_scope() { let skill_path = write_skill_at(&cycle_dir, "demo", "cycle-skill", "still loads"); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -885,9 +921,9 @@ async fn does_not_loop_on_symlink_cycle_for_user_scope() { ); } -#[test] +#[tokio::test] #[cfg(unix)] -fn loads_skills_via_symlinked_subdir_for_admin_scope() { +async fn loads_skills_via_symlinked_subdir_for_admin_scope() { let admin_root = tempfile::tempdir().expect("tempdir"); let shared = tempfile::tempdir().expect("tempdir"); @@ -897,9 +933,11 @@ fn loads_skills_via_symlinked_subdir_for_admin_scope() { symlink_dir(shared.path(), &admin_root.path().join("shared")); let outcome = load_skills_from_roots([SkillRoot { - path: admin_root.path().to_path_buf(), + path: admin_root.path().abs(), scope: SkillScope::Admin, - }]); + file_system: Arc::clone(&LOCAL_FS), + }]) + .await; assert!( outcome.errors.is_empty(), @@ -938,7 +976,7 @@ async fn loads_skills_via_symlinked_subdir_for_repo_scope() { symlink_dir(shared.path(), &repo_skills_root.join("shared")); let cfg = make_config_for_cwd(&codex_home, repo_dir.path().to_path_buf()).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -973,9 +1011,11 @@ async fn system_scope_ignores_symlinked_subdir() { symlink_dir(shared.path(), &system_root.join("shared")); let outcome = load_skills_from_roots([SkillRoot { - path: system_root, + path: system_root.abs(), scope: SkillScope::System, - }]); + file_system: Arc::clone(&LOCAL_FS), + }]) + .await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1003,9 +1043,11 @@ async fn respects_max_scan_depth_for_user_scope() { let skills_root = codex_home.path().join("skills"); let outcome = load_skills_from_roots([SkillRoot { - path: skills_root, + path: skills_root.abs(), scope: SkillScope::User, - }]); + file_system: Arc::clone(&LOCAL_FS), + }]) + .await; assert!( outcome.errors.is_empty(), @@ -1033,7 +1075,7 @@ async fn loads_valid_skill() { let skill_path = write_skill(&codex_home, "demo", "demo-skill", "does things\ncarefully"); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1064,7 +1106,7 @@ async fn falls_back_to_directory_name_when_skill_name_is_missing() { ); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), @@ -1103,9 +1145,11 @@ async fn namespaces_plugin_skills_using_plugin_name() { .unwrap(); let outcome = load_skills_from_roots([SkillRoot { - path: plugin_root.join("skills"), + path: plugin_root.join("skills").abs(), scope: SkillScope::User, - }]); + file_system: Arc::clone(&LOCAL_FS), + }]) + .await; assert!( outcome.errors.is_empty(), @@ -1137,7 +1181,7 @@ async fn loads_short_description_from_metadata() { fs::write(&skill_path, contents).unwrap(); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1170,7 +1214,7 @@ async fn enforces_short_description_length_limits() { fs::write(skill_dir.join(SKILLS_FILENAME), contents).unwrap(); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert_eq!(outcome.skills.len(), 0); assert_eq!(outcome.errors.len(), 1); assert!( @@ -1199,7 +1243,7 @@ async fn skips_hidden_and_invalid() { fs::write(invalid_dir.join(SKILLS_FILENAME), "---\nname: bad").unwrap(); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert_eq!(outcome.skills.len(), 0); assert_eq!(outcome.errors.len(), 1); assert!( @@ -1217,7 +1261,7 @@ async fn enforces_length_limits() { write_skill(&codex_home, "max-len", "max-len", &max_desc); let cfg = make_config(&codex_home).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1227,7 +1271,7 @@ async fn enforces_length_limits() { let too_long_desc = "\u{1F4A1}".repeat(MAX_DESCRIPTION_LEN + 1); write_skill(&codex_home, "too-long", "too-long", &too_long_desc); - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.errors.len(), 1); assert!( @@ -1249,7 +1293,7 @@ async fn loads_skills_from_repo_root() { let skill_path = write_skill_at(&skills_root, "repo", "repo-skill", "from repo"); let cfg = make_config_for_cwd(&codex_home, repo_dir.path().to_path_buf()).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1284,7 +1328,7 @@ async fn loads_skills_from_agents_dir_without_codex_dir() { ); let cfg = make_config_for_cwd(&codex_home, repo_dir.path().to_path_buf()).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1336,7 +1380,7 @@ async fn loads_skills_from_all_codex_dirs_under_project_root() { let cfg = make_config_for_cwd(&codex_home, nested_dir).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1386,7 +1430,7 @@ async fn loads_skills_from_codex_dir_when_not_git_repo() { let cfg = make_config_for_cwd(&codex_home, work_dir.path().to_path_buf()).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1415,14 +1459,17 @@ async fn deduplicates_by_path_preferring_first_root() { let outcome = load_skills_from_roots([ SkillRoot { - path: root.path().to_path_buf(), + path: root.path().abs(), scope: SkillScope::Repo, + file_system: Arc::clone(&LOCAL_FS), }, SkillRoot { - path: root.path().to_path_buf(), + path: root.path().abs(), scope: SkillScope::User, + file_system: Arc::clone(&LOCAL_FS), }, - ]); + ]) + .await; assert!( outcome.errors.is_empty(), @@ -1463,7 +1510,7 @@ async fn keeps_duplicate_names_from_repo_and_user() { let cfg = make_config_for_cwd(&codex_home, repo_dir.path().to_path_buf()).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1526,16 +1573,15 @@ async fn keeps_duplicate_names_from_nested_codex_dirs() { ); let cfg = make_config_for_cwd(&codex_home, nested_dir).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); - let root_path = canonicalize_path(&root_skill_path).unwrap_or_else(|_| root_skill_path.clone()); - let nested_path = - canonicalize_path(&nested_skill_path).unwrap_or_else(|_| nested_skill_path.clone()); + let root_path = normalized(&root_skill_path); + let nested_path = normalized(&nested_skill_path); let (first_path, second_path, first_description, second_description) = if root_path <= nested_path { (root_path, nested_path, "from root", "from nested") @@ -1589,7 +1635,7 @@ async fn repo_skills_search_does_not_escape_repo_root() { let cfg = make_config_for_cwd(&codex_home, repo_dir).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1618,7 +1664,7 @@ async fn loads_skills_when_cwd_is_file_in_repo() { let cfg = make_config_for_cwd(&codex_home, file_path).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1658,7 +1704,7 @@ async fn non_git_repo_skills_search_does_not_walk_parents() { let cfg = make_config_for_cwd(&codex_home, nested_dir).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1676,7 +1722,7 @@ async fn loads_skills_from_system_cache_when_present() { let cfg = make_config_for_cwd(&codex_home, work_dir.path().to_path_buf()).await; - let outcome = load_skills_for_test(&cfg); + let outcome = load_skills_for_test(&cfg).await; assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", @@ -1702,10 +1748,16 @@ async fn skill_roots_include_admin_with_lowest_priority() { let codex_home = tempfile::tempdir().expect("tempdir"); let cfg = make_config(&codex_home).await; - let scopes: Vec = super::skill_roots(&cfg.config_layer_stack, &cfg.cwd, Vec::new()) - .into_iter() - .map(|root| root.scope) - .collect(); + let scopes: Vec = super::skill_roots( + Some(Arc::clone(&LOCAL_FS)), + &cfg.config_layer_stack, + &cfg.cwd, + Vec::new(), + ) + .await + .into_iter() + .map(|root| root.scope) + .collect(); let mut expected = vec![SkillScope::User, SkillScope::System]; if home_dir().is_some() { expected.insert(1, SkillScope::User); diff --git a/codex-rs/core-skills/src/manager.rs b/codex-rs/core-skills/src/manager.rs index cd3b427714..b7b7a4b64d 100644 --- a/codex-rs/core-skills/src/manager.rs +++ b/codex-rs/core-skills/src/manager.rs @@ -1,13 +1,13 @@ use std::collections::HashMap; use std::collections::HashSet; -use std::path::Path; -use std::path::PathBuf; use std::sync::Arc; use std::sync::RwLock; use codex_config::ConfigLayerStack; +use codex_exec_server::ExecutorFileSystem; use codex_protocol::protocol::Product; use codex_protocol::protocol::SkillScope; +use codex_utils_absolute_path::AbsolutePathBuf; use tracing::info; use tracing::warn; @@ -25,16 +25,16 @@ use codex_config::SkillsConfig; #[derive(Debug, Clone)] pub struct SkillsLoadInput { - pub cwd: PathBuf, - pub effective_skill_roots: Vec, + pub cwd: AbsolutePathBuf, + pub effective_skill_roots: Vec, pub config_layer_stack: ConfigLayerStack, pub bundled_skills_enabled: bool, } impl SkillsLoadInput { pub fn new( - cwd: PathBuf, - effective_skill_roots: Vec, + cwd: AbsolutePathBuf, + effective_skill_roots: Vec, config_layer_stack: ConfigLayerStack, bundled_skills_enabled: bool, ) -> Self { @@ -48,19 +48,19 @@ impl SkillsLoadInput { } pub struct SkillsManager { - codex_home: PathBuf, + codex_home: AbsolutePathBuf, restriction_product: Option, - cache_by_cwd: RwLock>, + cache_by_cwd: RwLock>, cache_by_config: RwLock>, } impl SkillsManager { - pub fn new(codex_home: PathBuf, bundled_skills_enabled: bool) -> Self { + pub fn new(codex_home: AbsolutePathBuf, bundled_skills_enabled: bool) -> Self { Self::new_with_restriction_product(codex_home, bundled_skills_enabled, Some(Product::Codex)) } pub fn new_with_restriction_product( - codex_home: PathBuf, + codex_home: AbsolutePathBuf, bundled_skills_enabled: bool, restriction_product: Option, ) -> Self { @@ -86,15 +86,19 @@ impl SkillsManager { /// This path uses a cache keyed by the effective skill-relevant config state rather than just /// cwd so role-local and session-local skill overrides cannot bleed across sessions that happen /// to share a directory. - pub fn skills_for_config(&self, input: &SkillsLoadInput) -> SkillLoadOutcome { - let roots = self.skill_roots_for_config(input); + pub async fn skills_for_config( + &self, + input: &SkillsLoadInput, + fs: Option>, + ) -> SkillLoadOutcome { + let roots = self.skill_roots_for_config(input, fs).await; let skill_config_rules = skill_config_rules_from_stack(&input.config_layer_stack); let cache_key = config_skills_cache_key(&roots, &skill_config_rules); if let Some(outcome) = self.cached_outcome_for_config(&cache_key) { return outcome; } - let outcome = self.build_skill_outcome(roots, &skill_config_rules); + let outcome = self.build_skill_outcome(roots, &skill_config_rules).await; let mut cache = self .cache_by_config .write() @@ -103,12 +107,18 @@ impl SkillsManager { outcome } - pub fn skill_roots_for_config(&self, input: &SkillsLoadInput) -> Vec { + pub async fn skill_roots_for_config( + &self, + input: &SkillsLoadInput, + fs: Option>, + ) -> Vec { let mut roots = skill_roots( + fs, &input.config_layer_stack, - input.cwd.as_path(), + &input.cwd, input.effective_skill_roots.clone(), - ); + ) + .await; if !input.bundled_skills_enabled { roots.retain(|root| root.scope != SkillScope::System); } @@ -119,12 +129,9 @@ impl SkillsManager { &self, input: &SkillsLoadInput, force_reload: bool, + fs: Option>, ) -> SkillLoadOutcome { - if !force_reload && let Some(outcome) = self.cached_outcome_for_cwd(input.cwd.as_path()) { - return outcome; - } - - self.skills_for_cwd_with_extra_user_roots(input, force_reload, &[]) + self.skills_for_cwd_with_extra_user_roots(input, force_reload, &[], fs) .await } @@ -132,47 +139,57 @@ impl SkillsManager { &self, input: &SkillsLoadInput, force_reload: bool, - extra_user_roots: &[PathBuf], + extra_user_roots: &[AbsolutePathBuf], + fs: Option>, ) -> SkillLoadOutcome { - if !force_reload && let Some(outcome) = self.cached_outcome_for_cwd(input.cwd.as_path()) { + let use_cwd_cache = fs.is_some(); + if use_cwd_cache + && !force_reload + && let Some(outcome) = self.cached_outcome_for_cwd(&input.cwd) + { return outcome; } - let normalized_extra_user_roots = normalize_extra_user_roots(extra_user_roots); let mut roots = skill_roots( + fs.clone(), &input.config_layer_stack, - input.cwd.as_path(), + &input.cwd, input.effective_skill_roots.clone(), - ); + ) + .await; if !bundled_skills_enabled_from_stack(&input.config_layer_stack) { roots.retain(|root| root.scope != SkillScope::System); } - roots.extend( - normalized_extra_user_roots - .iter() - .cloned() - .map(|path| SkillRoot { - path, - scope: SkillScope::User, - }), - ); + if let Some(fs) = fs { + roots.extend( + normalize_extra_user_roots(extra_user_roots) + .into_iter() + .map(|path| SkillRoot { + path, + scope: SkillScope::User, + file_system: Arc::clone(&fs), + }), + ); + } let skill_config_rules = skill_config_rules_from_stack(&input.config_layer_stack); - let outcome = self.build_skill_outcome(roots, &skill_config_rules); - let mut cache = self - .cache_by_cwd - .write() - .unwrap_or_else(std::sync::PoisonError::into_inner); - cache.insert(input.cwd.clone(), outcome.clone()); + let outcome = self.build_skill_outcome(roots, &skill_config_rules).await; + if use_cwd_cache { + let mut cache = self + .cache_by_cwd + .write() + .unwrap_or_else(std::sync::PoisonError::into_inner); + cache.insert(input.cwd.clone(), outcome.clone()); + } outcome } - fn build_skill_outcome( + async fn build_skill_outcome( &self, roots: Vec, skill_config_rules: &SkillConfigRules, ) -> SkillLoadOutcome { let outcome = crate::filter_skill_load_outcome_for_product( - load_skills_from_roots(roots), + load_skills_from_roots(roots).await, self.restriction_product, ); let disabled_paths = resolve_disabled_skill_paths(&outcome.skills, skill_config_rules); @@ -202,7 +219,7 @@ impl SkillsManager { info!("skills cache cleared ({cleared} entries)"); } - fn cached_outcome_for_cwd(&self, cwd: &Path) -> Option { + fn cached_outcome_for_cwd(&self, cwd: &AbsolutePathBuf) -> Option { match self.cache_by_cwd.read() { Ok(cache) => cache.get(cwd).cloned(), Err(err) => err.into_inner().get(cwd).cloned(), @@ -222,7 +239,7 @@ impl SkillsManager { #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct ConfigSkillsCacheKey { - roots: Vec<(PathBuf, u8)>, + roots: Vec<(AbsolutePathBuf, u8)>, skill_config_rules: SkillConfigRules, } @@ -271,7 +288,7 @@ fn config_skills_cache_key( fn finalize_skill_outcome( mut outcome: SkillLoadOutcome, - disabled_paths: HashSet, + disabled_paths: HashSet, ) -> SkillLoadOutcome { outcome.disabled_paths = disabled_paths; let (by_scripts_dir, by_doc_path) = @@ -281,10 +298,10 @@ fn finalize_skill_outcome( outcome } -fn normalize_extra_user_roots(extra_user_roots: &[PathBuf]) -> Vec { - let mut normalized: Vec = extra_user_roots +fn normalize_extra_user_roots(extra_user_roots: &[AbsolutePathBuf]) -> Vec { + let mut normalized: Vec = extra_user_roots .iter() - .map(|path| dunce::canonicalize(path).unwrap_or_else(|_| path.clone())) + .map(|root| root.canonicalize().unwrap_or_else(|_| root.clone())) .collect(); normalized.sort_unstable(); normalized.dedup(); diff --git a/codex-rs/core-skills/src/manager_tests.rs b/codex-rs/core-skills/src/manager_tests.rs index 62218f7311..73800a51d0 100644 --- a/codex-rs/core-skills/src/manager_tests.rs +++ b/codex-rs/core-skills/src/manager_tests.rs @@ -7,11 +7,16 @@ use codex_config::CONFIG_TOML_FILE; use codex_config::ConfigLayerEntry; use codex_config::ConfigLayerStack; use codex_config::ConfigRequirementsToml; +use codex_exec_server::LOCAL_FS; use codex_utils_absolute_path::AbsolutePathBuf; +use codex_utils_absolute_path::test_support::PathBufExt; +use codex_utils_absolute_path::test_support::PathExt; +use codex_utils_absolute_path::test_support::test_path_buf; use pretty_assertions::assert_eq; use std::collections::HashSet; use std::fs; use std::path::PathBuf; +use std::sync::Arc; use tempfile::TempDir; fn write_user_skill(codex_home: &TempDir, dir: &str, name: &str, description: &str) { @@ -57,11 +62,26 @@ fn test_skill(name: &str, path: PathBuf) -> SkillMetadata { interface: None, dependencies: None, policy: None, - path_to_skills_md: path, + path_to_skills_md: path + .abs() + .canonicalize() + .expect("skill path should canonicalize"), scope: SkillScope::User, } } +fn write_demo_skill(tempdir: &TempDir) -> PathBuf { + let skill_path = tempdir.path().join("skills").join("demo").join("SKILL.md"); + fs::create_dir_all(skill_path.parent().expect("skill path should have parent")) + .expect("create skill dir"); + fs::write( + &skill_path, + "---\nname: demo-skill\ndescription: demo description\n---\n\n# Body\n", + ) + .expect("write skill"); + skill_path +} + fn user_config_layer(codex_home: &TempDir, config_toml: &str) -> ConfigLayerEntry { let config_path = AbsolutePathBuf::try_from(codex_home.path().join(CONFIG_TOML_FILE)) .expect("user config path should be absolute"); @@ -118,19 +138,21 @@ enabled = {enabled} ) } -fn skills_for_config_with_stack( +async fn skills_for_config_with_stack( skills_manager: &SkillsManager, cwd: &TempDir, config_layer_stack: &ConfigLayerStack, - effective_skill_roots: &[PathBuf], + effective_skill_roots: &[AbsolutePathBuf], ) -> SkillLoadOutcome { let skills_input = SkillsLoadInput::new( - cwd.path().to_path_buf(), + cwd.path().abs(), effective_skill_roots.to_vec(), config_layer_stack.clone(), bundled_skills_enabled_from_stack(config_layer_stack), ); - skills_manager.skills_for_config(&skills_input) + skills_manager + .skills_for_config(&skills_input, Some(Arc::clone(&LOCAL_FS))) + .await } #[test] @@ -142,7 +164,7 @@ fn new_with_disabled_bundled_skills_removes_stale_cached_system_skills() { .expect("write stale system skill"); let _skills_manager = SkillsManager::new( - codex_home.path().to_path_buf(), + codex_home.path().abs(), /*bundled_skills_enabled*/ false, ); @@ -158,12 +180,13 @@ async fn skills_for_config_reuses_cache_for_same_effective_config() { let cwd = tempfile::tempdir().expect("tempdir"); let config_layer_stack = config_stack(&codex_home, ""); let skills_manager = SkillsManager::new( - codex_home.path().to_path_buf(), + codex_home.path().abs(), /*bundled_skills_enabled*/ true, ); write_user_skill(&codex_home, "a", "skill-a", "from a"); - let outcome1 = skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]); + let outcome1 = + skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]).await; assert!( outcome1.skills.iter().any(|s| s.name == "skill-a"), "expected skill-a to be discovered" @@ -172,7 +195,8 @@ async fn skills_for_config_reuses_cache_for_same_effective_config() { // Write a new skill after the first call; the second call should reuse the config-aware cache // entry because the effective skill config is unchanged. write_user_skill(&codex_home, "b", "skill-b", "from b"); - let outcome2 = skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]); + let outcome2 = + skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]).await; assert_eq!(outcome2.errors, outcome1.errors); assert_eq!(outcome2.skills, outcome1.skills); } @@ -197,9 +221,9 @@ async fn skills_for_config_disables_plugin_skills_by_name() { .parent() .and_then(std::path::Path::parent) .expect("plugin skill should live under a skills root") - .to_path_buf(); + .abs(); let skills_manager = SkillsManager::new( - codex_home.path().to_path_buf(), + codex_home.path().abs(), /*bundled_skills_enabled*/ true, ); @@ -208,13 +232,16 @@ async fn skills_for_config_disables_plugin_skills_by_name() { &cwd, &config_layer_stack, &[plugin_skill_root], - ); + ) + .await; let skill = outcome .skills .iter() .find(|skill| skill.name == "sample:sample-search") .expect("plugin skill should load"); - let skill_path = dunce::canonicalize(skill_path).expect("skill path should canonicalize"); + let skill_path = dunce::canonicalize(skill_path) + .expect("skill path should canonicalize") + .abs(); assert_eq!(skill.path_to_skills_md, skill_path); assert!(outcome.disabled_paths.contains(&skill.path_to_skills_md)); @@ -233,15 +260,15 @@ async fn skills_for_cwd_reuses_cached_entry_even_when_entry_has_extra_roots() { let extra_root = tempfile::tempdir().expect("tempdir"); let config_layer_stack = config_stack(&codex_home, ""); let skills_manager = SkillsManager::new( - codex_home.path().to_path_buf(), + codex_home.path().abs(), /*bundled_skills_enabled*/ true, ); - let _ = skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]); + let _ = skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]).await; write_user_skill(&extra_root, "x", "extra-skill", "from extra root"); - let extra_root_path = extra_root.path().to_path_buf(); + let extra_root_path = extra_root.path().abs(); let base_input = SkillsLoadInput::new( - cwd.path().to_path_buf(), + cwd.path().abs(), Vec::new(), config_layer_stack.clone(), bundled_skills_enabled_from_stack(&config_layer_stack), @@ -251,6 +278,7 @@ async fn skills_for_cwd_reuses_cached_entry_even_when_entry_has_extra_roots() { &base_input, /*force_reload*/ true, std::slice::from_ref(&extra_root_path), + Some(Arc::clone(&LOCAL_FS)), ) .await; assert!( @@ -269,18 +297,156 @@ async fn skills_for_cwd_reuses_cached_entry_even_when_entry_has_extra_roots() { // The cwd-only API returns the current cached entry for this cwd, even when that entry // was produced with extra roots. let base_input = SkillsLoadInput::new( - cwd.path().to_path_buf(), + cwd.path().abs(), Vec::new(), config_layer_stack.clone(), bundled_skills_enabled_from_stack(&config_layer_stack), ); let outcome_without_extra = skills_manager - .skills_for_cwd(&base_input, /*force_reload*/ false) + .skills_for_cwd( + &base_input, + /*force_reload*/ false, + Some(Arc::clone(&LOCAL_FS)), + ) .await; assert_eq!(outcome_without_extra.skills, outcome_with_extra.skills); assert_eq!(outcome_without_extra.errors, outcome_with_extra.errors); } +#[tokio::test] +async fn skills_for_cwd_loads_repo_user_and_extra_roots_with_local_fs() { + let codex_home = tempfile::tempdir().expect("tempdir"); + let cwd = tempfile::tempdir().expect("tempdir"); + let extra_root = tempfile::tempdir().expect("tempdir"); + let repo_dot_codex = cwd.path().join(".codex"); + fs::create_dir_all(&repo_dot_codex).expect("create repo config dir"); + + write_user_skill(&codex_home, "user", "user-skill", "from local user root"); + write_user_skill(&extra_root, "extra", "extra-skill", "from extra root"); + let repo_skill_dir = repo_dot_codex.join("skills/repo"); + fs::create_dir_all(&repo_skill_dir).expect("create repo skill dir"); + fs::write( + repo_skill_dir.join("SKILL.md"), + "---\nname: repo-skill\ndescription: from repo root\n---\n\n# Body\n", + ) + .expect("write repo skill"); + + let config_layer_stack = ConfigLayerStack::new( + vec![ + user_config_layer(&codex_home, ""), + ConfigLayerEntry::new( + ConfigLayerSource::Project { + dot_codex_folder: repo_dot_codex.abs(), + }, + toml::Value::Table(toml::map::Map::new()), + ), + ], + Default::default(), + ConfigRequirementsToml::default(), + ) + .expect("valid config layer stack"); + let skills_input = SkillsLoadInput::new( + cwd.path().abs(), + Vec::new(), + config_layer_stack.clone(), + bundled_skills_enabled_from_stack(&config_layer_stack), + ); + let skills_manager = SkillsManager::new( + codex_home.path().abs(), + /*bundled_skills_enabled*/ true, + ); + + let outcome = skills_manager + .skills_for_cwd_with_extra_user_roots( + &skills_input, + /*force_reload*/ true, + &[extra_root.path().abs()], + Some(Arc::clone(&LOCAL_FS)), + ) + .await; + + assert!( + outcome.errors.is_empty(), + "unexpected errors: {:?}", + outcome.errors + ); + let loaded_names = outcome + .skills + .iter() + .map(|skill| skill.name.as_str()) + .collect::>(); + assert!(loaded_names.contains("user-skill")); + assert!(loaded_names.contains("repo-skill")); + assert!(loaded_names.contains("extra-skill")); +} + +#[tokio::test] +async fn skills_for_cwd_without_fs_skips_repo_and_extra_roots() { + let codex_home = tempfile::tempdir().expect("tempdir"); + let cwd = tempfile::tempdir().expect("tempdir"); + let extra_root = tempfile::tempdir().expect("tempdir"); + let repo_dot_codex = cwd.path().join(".codex"); + fs::create_dir_all(&repo_dot_codex).expect("create repo config dir"); + + write_user_skill(&codex_home, "user", "user-skill", "from local user root"); + write_user_skill(&extra_root, "extra", "extra-skill", "from extra root"); + let repo_skill_dir = repo_dot_codex.join("skills/repo"); + fs::create_dir_all(&repo_skill_dir).expect("create repo skill dir"); + fs::write( + repo_skill_dir.join("SKILL.md"), + "---\nname: repo-skill\ndescription: from repo root\n---\n\n# Body\n", + ) + .expect("write repo skill"); + + let config_layer_stack = ConfigLayerStack::new( + vec![ + user_config_layer(&codex_home, ""), + ConfigLayerEntry::new( + ConfigLayerSource::Project { + dot_codex_folder: repo_dot_codex.abs(), + }, + toml::Value::Table(toml::map::Map::new()), + ), + ], + Default::default(), + ConfigRequirementsToml::default(), + ) + .expect("valid config layer stack"); + let skills_input = SkillsLoadInput::new( + cwd.path().abs(), + Vec::new(), + config_layer_stack.clone(), + bundled_skills_enabled_from_stack(&config_layer_stack), + ); + let skills_manager = SkillsManager::new( + codex_home.path().abs(), + /*bundled_skills_enabled*/ true, + ); + + let outcome = skills_manager + .skills_for_cwd_with_extra_user_roots( + &skills_input, + /*force_reload*/ true, + &[extra_root.path().abs()], + /*fs*/ None, + ) + .await; + + assert!( + outcome.errors.is_empty(), + "unexpected errors: {:?}", + outcome.errors + ); + let loaded_names = outcome + .skills + .iter() + .map(|skill| skill.name.as_str()) + .collect::>(); + assert!(loaded_names.contains("user-skill")); + assert!(!loaded_names.contains("repo-skill")); + assert!(!loaded_names.contains("extra-skill")); +} + #[tokio::test] async fn skills_for_config_excludes_bundled_skills_when_disabled_in_config() { let codex_home = tempfile::tempdir().expect("tempdir"); @@ -294,7 +460,7 @@ async fn skills_for_config_excludes_bundled_skills_when_disabled_in_config() { .expect("write bundled skill"); let config_layer_stack = config_stack(&codex_home, "[skills.bundled]\nenabled = false\n"); let skills_manager = SkillsManager::new( - codex_home.path().to_path_buf(), + codex_home.path().abs(), /*bundled_skills_enabled*/ false, ); @@ -307,7 +473,8 @@ async fn skills_for_config_excludes_bundled_skills_when_disabled_in_config() { ) .expect("rewrite bundled skill"); - let outcome = skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]); + let outcome = + skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]).await; assert!( outcome .skills @@ -330,17 +497,17 @@ async fn skills_for_cwd_with_extra_roots_only_refreshes_on_force_reload() { let extra_root_b = tempfile::tempdir().expect("tempdir"); let config_layer_stack = config_stack(&codex_home, ""); let skills_manager = SkillsManager::new( - codex_home.path().to_path_buf(), + codex_home.path().abs(), /*bundled_skills_enabled*/ true, ); - let _ = skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]); + let _ = skills_for_config_with_stack(&skills_manager, &cwd, &config_layer_stack, &[]).await; write_user_skill(&extra_root_a, "x", "extra-skill-a", "from extra root a"); write_user_skill(&extra_root_b, "x", "extra-skill-b", "from extra root b"); - let extra_root_a_path = extra_root_a.path().to_path_buf(); + let extra_root_a_path = extra_root_a.path().abs(); let base_input = SkillsLoadInput::new( - cwd.path().to_path_buf(), + cwd.path().abs(), Vec::new(), config_layer_stack.clone(), bundled_skills_enabled_from_stack(&config_layer_stack), @@ -350,6 +517,7 @@ async fn skills_for_cwd_with_extra_roots_only_refreshes_on_force_reload() { &base_input, /*force_reload*/ true, std::slice::from_ref(&extra_root_a_path), + Some(Arc::clone(&LOCAL_FS)), ) .await; assert!( @@ -365,12 +533,13 @@ async fn skills_for_cwd_with_extra_roots_only_refreshes_on_force_reload() { .all(|skill| skill.name != "extra-skill-b") ); - let extra_root_b_path = extra_root_b.path().to_path_buf(); + let extra_root_b_path = extra_root_b.path().abs(); let outcome_b = skills_manager .skills_for_cwd_with_extra_user_roots( &base_input, /*force_reload*/ false, std::slice::from_ref(&extra_root_b_path), + Some(Arc::clone(&LOCAL_FS)), ) .await; assert!( @@ -391,6 +560,7 @@ async fn skills_for_cwd_with_extra_roots_only_refreshes_on_force_reload() { &base_input, /*force_reload*/ true, std::slice::from_ref(&extra_root_b_path), + Some(Arc::clone(&LOCAL_FS)), ) .await; assert!( @@ -409,8 +579,8 @@ async fn skills_for_cwd_with_extra_roots_only_refreshes_on_force_reload() { #[test] fn normalize_extra_user_roots_is_stable_for_equivalent_inputs() { - let a = PathBuf::from("/tmp/a"); - let b = PathBuf::from("/tmp/b"); + let a = test_path_buf("/tmp/a").abs(); + let b = test_path_buf("/tmp/b").abs(); let first = normalize_extra_user_roots(&[a.clone(), b.clone(), a.clone()]); let second = normalize_extra_user_roots(&[b, a]); @@ -422,7 +592,7 @@ fn normalize_extra_user_roots_is_stable_for_equivalent_inputs() { #[test] fn disabled_paths_for_skills_allows_session_flags_to_override_user_layer() { let tempdir = tempfile::tempdir().expect("tempdir"); - let skill_path = tempdir.path().join("skills").join("demo").join("SKILL.md"); + let skill_path = write_demo_skill(&tempdir); let skill = test_skill("demo-skill", skill_path.clone()); let user_file = AbsolutePathBuf::try_from(tempdir.path().join("config.toml")) .expect("user config path should be absolute"); @@ -454,7 +624,7 @@ fn disabled_paths_for_skills_allows_session_flags_to_override_user_layer() { #[test] fn disabled_paths_for_skills_allows_session_flags_to_disable_user_enabled_skill() { let tempdir = tempfile::tempdir().expect("tempdir"); - let skill_path = tempdir.path().join("skills").join("demo").join("SKILL.md"); + let skill_path = write_demo_skill(&tempdir); let skill = test_skill("demo-skill", skill_path.clone()); let user_file = AbsolutePathBuf::try_from(tempdir.path().join("config.toml")) .expect("user config path should be absolute"); @@ -478,7 +648,10 @@ fn disabled_paths_for_skills_allows_session_flags_to_disable_user_enabled_skill( let skill_config_rules = skill_config_rules_from_stack(&stack); assert_eq!( resolve_disabled_skill_paths(&[skill], &skill_config_rules), - HashSet::from([skill_path]) + HashSet::from([skill_path + .abs() + .canonicalize() + .expect("skill path should canonicalize")]) ); } @@ -486,7 +659,7 @@ fn disabled_paths_for_skills_allows_session_flags_to_disable_user_enabled_skill( #[test] fn disabled_paths_for_skills_disables_matching_name_selectors() { let tempdir = tempfile::tempdir().expect("tempdir"); - let skill_path = tempdir.path().join("skills").join("demo").join("SKILL.md"); + let skill_path = write_demo_skill(&tempdir); let skill = test_skill("github:yeet", skill_path.clone()); let user_file = AbsolutePathBuf::try_from(tempdir.path().join("config.toml")) .expect("user config path should be absolute"); @@ -505,7 +678,10 @@ fn disabled_paths_for_skills_disables_matching_name_selectors() { let skill_config_rules = skill_config_rules_from_stack(&stack); assert_eq!( resolve_disabled_skill_paths(&[skill], &skill_config_rules), - HashSet::from([skill_path]) + HashSet::from([skill_path + .abs() + .canonicalize() + .expect("skill path should canonicalize")]) ); } @@ -513,7 +689,7 @@ fn disabled_paths_for_skills_disables_matching_name_selectors() { #[test] fn disabled_paths_for_skills_allows_name_selector_to_override_path_selector() { let tempdir = tempfile::tempdir().expect("tempdir"); - let skill_path = tempdir.path().join("skills").join("demo").join("SKILL.md"); + let skill_path = write_demo_skill(&tempdir); let skill = test_skill("github:yeet", skill_path.clone()); let user_file = AbsolutePathBuf::try_from(tempdir.path().join("config.toml")) .expect("user config path should be absolute"); @@ -560,18 +736,22 @@ async fn skills_for_config_ignores_cwd_cache_when_session_flags_reenable_skill() let child_stack = config_stack_with_session_flags(&codex_home, &disabled_skill_config, &enabled_skill_config); let skills_manager = SkillsManager::new( - codex_home.path().to_path_buf(), + codex_home.path().abs(), /*bundled_skills_enabled*/ true, ); let parent_input = SkillsLoadInput::new( - cwd.path().to_path_buf(), + cwd.path().abs(), Vec::new(), parent_stack.clone(), bundled_skills_enabled_from_stack(&parent_stack), ); let parent_outcome = skills_manager - .skills_for_cwd(&parent_input, /*force_reload*/ true) + .skills_for_cwd( + &parent_input, + /*force_reload*/ true, + Some(Arc::clone(&LOCAL_FS)), + ) .await; let parent_skill = parent_outcome .skills @@ -580,7 +760,8 @@ async fn skills_for_config_ignores_cwd_cache_when_session_flags_reenable_skill() .expect("demo skill should be discovered"); assert_eq!(parent_outcome.is_skill_enabled(parent_skill), false); - let child_outcome = skills_for_config_with_stack(&skills_manager, &cwd, &child_stack, &[]); + let child_outcome = + skills_for_config_with_stack(&skills_manager, &cwd, &child_stack, &[]).await; let child_skill = child_outcome .skills .iter() diff --git a/codex-rs/core-skills/src/mention_counts.rs b/codex-rs/core-skills/src/mention_counts.rs index a9b3da9d30..b7482ca36e 100644 --- a/codex-rs/core-skills/src/mention_counts.rs +++ b/codex-rs/core-skills/src/mention_counts.rs @@ -1,13 +1,13 @@ use std::collections::HashMap; use std::collections::HashSet; -use std::path::PathBuf; use super::SkillMetadata; +use codex_utils_absolute_path::AbsolutePathBuf; /// Counts how often each skill name appears (exact and ASCII-lowercase), excluding disabled paths. pub fn build_skill_name_counts( skills: &[SkillMetadata], - disabled_paths: &HashSet, + disabled_paths: &HashSet, ) -> (HashMap, HashMap) { let mut exact_counts: HashMap = HashMap::new(); let mut lower_counts: HashMap = HashMap::new(); diff --git a/codex-rs/core-skills/src/model.rs b/codex-rs/core-skills/src/model.rs index 319ca4e64e..eb9a6f132f 100644 --- a/codex-rs/core-skills/src/model.rs +++ b/codex-rs/core-skills/src/model.rs @@ -1,10 +1,12 @@ use std::collections::HashMap; use std::collections::HashSet; -use std::path::PathBuf; +use std::fmt; use std::sync::Arc; +use codex_exec_server::ExecutorFileSystem; use codex_protocol::protocol::Product; use codex_protocol::protocol::SkillScope; +use codex_utils_absolute_path::AbsolutePathBuf; #[derive(Debug, Clone, PartialEq)] pub struct SkillMetadata { @@ -15,7 +17,7 @@ pub struct SkillMetadata { pub dependencies: Option, pub policy: Option, /// Path to the SKILLS.md file that declares this skill. - pub path_to_skills_md: PathBuf, + pub path_to_skills_md: AbsolutePathBuf, pub scope: SkillScope, } @@ -55,8 +57,8 @@ pub struct SkillPolicy { pub struct SkillInterface { pub display_name: Option, pub short_description: Option, - pub icon_small: Option, - pub icon_large: Option, + pub icon_small: Option, + pub icon_large: Option, pub brand_color: Option, pub default_prompt: Option, } @@ -78,7 +80,7 @@ pub struct SkillToolDependency { #[derive(Debug, Clone, PartialEq, Eq)] pub struct SkillError { - pub path: PathBuf, + pub path: AbsolutePathBuf, pub message: String, } @@ -86,9 +88,10 @@ pub struct SkillError { pub struct SkillLoadOutcome { pub skills: Vec, pub errors: Vec, - pub disabled_paths: HashSet, - pub(crate) implicit_skills_by_scripts_dir: Arc>, - pub(crate) implicit_skills_by_doc_path: Arc>, + pub disabled_paths: HashSet, + pub(crate) file_systems_by_skill_path: SkillFileSystemsByPath, + pub(crate) implicit_skills_by_scripts_dir: Arc>, + pub(crate) implicit_skills_by_doc_path: Arc>, } impl SkillLoadOutcome { @@ -113,6 +116,49 @@ impl SkillLoadOutcome { .iter() .map(|skill| (skill, self.is_skill_enabled(skill))) } + + pub(crate) fn file_system_for_skill( + &self, + skill: &SkillMetadata, + ) -> Option> { + self.file_systems_by_skill_path + .get(&skill.path_to_skills_md) + } +} + +#[derive(Clone, Default)] +pub(crate) struct SkillFileSystemsByPath { + values: Arc>>, +} + +impl SkillFileSystemsByPath { + pub(crate) fn new(values: HashMap>) -> Self { + Self { + values: Arc::new(values), + } + } + + fn get(&self, path: &AbsolutePathBuf) -> Option> { + self.values.get(path).map(Arc::clone) + } + + fn retain_paths(&mut self, paths: &HashSet) { + self.values = Arc::new( + self.values + .iter() + .filter(|(path, _)| paths.contains(*path)) + .map(|(path, fs)| (path.clone(), Arc::clone(fs))) + .collect(), + ); + } +} + +impl fmt::Debug for SkillFileSystemsByPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SkillFileSystemsByPath") + .field("len", &self.values.len()) + .finish() + } } pub fn filter_skill_load_outcome_for_product( @@ -122,6 +168,14 @@ pub fn filter_skill_load_outcome_for_product( outcome .skills .retain(|skill| skill.matches_product_restriction_for_product(restriction_product)); + let retained_paths: HashSet = outcome + .skills + .iter() + .map(|skill| skill.path_to_skills_md.clone()) + .collect(); + outcome + .file_systems_by_skill_path + .retain_paths(&retained_paths); outcome.implicit_skills_by_scripts_dir = Arc::new( outcome .implicit_skills_by_scripts_dir diff --git a/codex-rs/core-skills/src/system.rs b/codex-rs/core-skills/src/system.rs index 394fe00c3b..5eec94c729 100644 --- a/codex-rs/core-skills/src/system.rs +++ b/codex-rs/core-skills/src/system.rs @@ -1,9 +1,8 @@ pub(crate) use codex_skills::install_system_skills; pub(crate) use codex_skills::system_cache_root_dir; -use std::path::Path; +use codex_utils_absolute_path::AbsolutePathBuf; -pub(crate) fn uninstall_system_skills(codex_home: &Path) { - let system_skills_dir = system_cache_root_dir(codex_home); - let _ = std::fs::remove_dir_all(&system_skills_dir); +pub(crate) fn uninstall_system_skills(codex_home: &AbsolutePathBuf) { + let _ = std::fs::remove_dir_all(system_cache_root_dir(codex_home)); } diff --git a/codex-rs/core/BUILD.bazel b/codex-rs/core/BUILD.bazel index ed01996273..434dc1f6a4 100644 --- a/codex-rs/core/BUILD.bazel +++ b/codex-rs/core/BUILD.bazel @@ -53,6 +53,7 @@ codex_rust_crate( "//codex-rs/linux-sandbox:codex-linux-sandbox", "//codex-rs/rmcp-client:test_stdio_server", "//codex-rs/rmcp-client:test_streamable_http_server", + "//codex-rs/responses-api-proxy:codex-responses-api-proxy", "//codex-rs/cli:codex", ], ) diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index 55ce13afdc..a0d4259810 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -33,7 +33,9 @@ codex-async-utils = { workspace = true } codex-code-mode = { workspace = true } codex-connectors = { workspace = true } codex-config = { workspace = true } +codex-core-plugins = { workspace = true } codex-core-skills = { workspace = true } +crypto_box = { workspace = true } codex-exec-server = { workspace = true } codex-features = { workspace = true } codex-feedback = { workspace = true } @@ -41,6 +43,7 @@ codex-login = { workspace = true } codex-mcp = { workspace = true } codex-model-provider-info = { workspace = true } codex-models-manager = { workspace = true } +ed25519-dalek = { workspace = true } codex-shell-command = { workspace = true } codex-execpolicy = { workspace = true } codex-git-utils = { workspace = true } @@ -56,6 +59,7 @@ codex-rmcp-client = { workspace = true } codex-sandboxing = { workspace = true } codex-state = { workspace = true } codex-terminal-detection = { workspace = true } +codex-thread-store = { workspace = true } codex-tools = { workspace = true } codex-utils-absolute-path = { workspace = true } codex-utils-cache = { workspace = true } @@ -96,6 +100,7 @@ rmcp = { workspace = true, default-features = false, features = [ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sha1 = { workspace = true } +sha2 = { workspace = true } shlex = { workspace = true } similar = { workspace = true } tempfile = { workspace = true } @@ -145,6 +150,7 @@ assert_cmd = { workspace = true } assert_matches = { workspace = true } codex-arg0 = { workspace = true } codex-otel = { workspace = true } +codex-test-binary-support = { workspace = true } codex-utils-cargo-bin = { workspace = true } core_test_support = { workspace = true } ctor = { workspace = true } diff --git a/codex-rs/core/README.md b/codex-rs/core/README.md index e1eab34935..2e311790d9 100644 --- a/codex-rs/core/README.md +++ b/codex-rs/core/README.md @@ -84,4 +84,6 @@ instead of running with weaker enforcement. ### All Platforms -Expects the binary containing `codex-core` to simulate the virtual `apply_patch` CLI when `arg1` is `--codex-run-as-apply-patch`. See the `codex-arg0` crate for details. +Expects the binary containing `codex-core` to simulate the virtual +`apply_patch` CLI when `arg1` is `--codex-run-as-apply-patch`. See the +`codex-arg0` crate for details. diff --git a/codex-rs/core/config.schema.json b/codex-rs/core/config.schema.json index bd74c46595..eb44f5c8fb 100644 --- a/codex-rs/core/config.schema.json +++ b/codex-rs/core/config.schema.json @@ -335,6 +335,9 @@ "apply_patch_freeform": { "type": "boolean" }, + "apply_patch_streaming_events": { + "type": "boolean" + }, "apps": { "type": "boolean" }, @@ -485,6 +488,9 @@ "steer": { "type": "boolean" }, + "telepathy": { + "type": "boolean" + }, "tool_call_mcp_elicitation": { "type": "boolean" }, @@ -497,6 +503,9 @@ "tui_app_server": { "type": "boolean" }, + "unavailable_dummy_tools": { + "type": "boolean" + }, "undo": { "type": "boolean" }, @@ -520,6 +529,9 @@ }, "web_search_request": { "type": "boolean" + }, + "workspace_dependencies": { + "type": "boolean" } }, "type": "object" @@ -664,6 +676,14 @@ ] }, "FilesystemPermissionsToml": { + "properties": { + "glob_scan_max_depth": { + "description": "Optional maximum depth for expanding unreadable glob patterns on platforms that snapshot glob matches before sandbox startup.", + "format": "uint", + "minimum": 1.0, + "type": "integer" + } + }, "type": "object" }, "ForcedLoginMethod": { @@ -770,6 +790,11 @@ "MarketplaceConfig": { "additionalProperties": false, "properties": { + "last_revision": { + "default": null, + "description": "Git revision Codex last successfully activated for this marketplace.", + "type": "string" + }, "last_updated": { "default": null, "description": "Last time Codex successfully added or refreshed this marketplace.", @@ -807,7 +832,8 @@ }, "MarketplaceSourceType": { "enum": [ - "git" + "git", + "local" ], "type": "string" }, @@ -845,7 +871,8 @@ "max_raw_memories_for_consolidation": { "description": "Maximum number of recent raw memories retained for global consolidation.", "format": "uint", - "minimum": 0.0, + "maximum": 4096.0, + "minimum": 1.0, "type": "integer" }, "max_rollout_age_days": { @@ -856,7 +883,8 @@ "max_rollouts_per_startup": { "description": "Maximum number of rollout candidates processed per pass.", "format": "uint", - "minimum": 0.0, + "maximum": 128.0, + "minimum": 1.0, "type": "integer" }, "max_unused_days": { @@ -1446,6 +1474,14 @@ "default": null, "type": "string" }, + "default_tools_approval_mode": { + "allOf": [ + { + "$ref": "#/definitions/AppToolApproval" + } + ], + "default": null + }, "disabled_tools": { "default": null, "items": { @@ -1485,6 +1521,10 @@ }, "type": "array" }, + "experimental_environment": { + "default": null, + "type": "string" + }, "http_headers": { "additionalProperties": { "type": "string" @@ -1522,6 +1562,10 @@ "format": "double", "type": "number" }, + "supports_parallel_tool_calls": { + "default": null, + "type": "boolean" + }, "tool_timeout_sec": { "default": null, "format": "double", @@ -2184,6 +2228,9 @@ "apply_patch_freeform": { "type": "boolean" }, + "apply_patch_streaming_events": { + "type": "boolean" + }, "apps": { "type": "boolean" }, @@ -2334,6 +2381,9 @@ "steer": { "type": "boolean" }, + "telepathy": { + "type": "boolean" + }, "tool_call_mcp_elicitation": { "type": "boolean" }, @@ -2346,6 +2396,9 @@ "tui_app_server": { "type": "boolean" }, + "unavailable_dummy_tools": { + "type": "boolean" + }, "undo": { "type": "boolean" }, @@ -2369,6 +2422,9 @@ }, "web_search_request": { "type": "boolean" + }, + "workspace_dependencies": { + "type": "boolean" } }, "type": "object" diff --git a/codex-rs/core/src/agent/control_tests.rs b/codex-rs/core/src/agent/control_tests.rs index d332853167..b444d8c67c 100644 --- a/codex-rs/core/src/agent/control_tests.rs +++ b/codex-rs/core/src/agent/control_tests.rs @@ -7,7 +7,6 @@ use crate::config::Config; use crate::config::ConfigBuilder; use crate::contextual_user_message::SUBAGENT_NOTIFICATION_OPEN_TAG; use assert_matches::assert_matches; -use chrono::Utc; use codex_features::Feature; use codex_login::CodexAuth; use codex_protocol::AgentPath; @@ -24,6 +23,9 @@ use codex_protocol::protocol::TurnAbortReason; use codex_protocol::protocol::TurnAbortedEvent; use codex_protocol::protocol::TurnCompleteEvent; use codex_protocol::protocol::TurnStartedEvent; +use codex_thread_store::ArchiveThreadParams; +use codex_thread_store::LocalThreadStore; +use codex_thread_store::ThreadStore; use pretty_assertions::assert_eq; use tempfile::TempDir; use tokio::time::Duration; @@ -91,7 +93,7 @@ impl AgentControlHarness { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -186,7 +188,9 @@ async fn wait_for_subagent_notification(parent_thread: &Arc) -> boo sleep(Duration::from_millis(25)).await; } }; - timeout(Duration::from_secs(2), wait).await.is_ok() + // CI can take several seconds to schedule the detached completion watcher, + // especially on slower Windows runners. + timeout(Duration::from_secs(10), wait).await.is_ok() } async fn persist_thread_for_tree_resume(thread: &Arc, message: &str) { @@ -905,7 +909,7 @@ async fn spawn_agent_respects_max_threads_limit() { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -959,7 +963,7 @@ async fn spawn_agent_releases_slot_after_shutdown() { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -1004,7 +1008,7 @@ async fn spawn_agent_limit_shared_across_clones() { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -1051,7 +1055,7 @@ async fn resume_agent_respects_max_threads_limit() { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -1109,7 +1113,7 @@ async fn resume_agent_releases_slot_after_resume_failure() { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -1506,7 +1510,7 @@ async fn resume_thread_subagent_restores_stored_nickname_and_role() { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -1658,38 +1662,18 @@ async fn resume_agent_from_rollout_reads_archived_rollout_path() { .await .expect("child thread should exist"); persist_thread_for_tree_resume(&child_thread, "persist before archiving").await; - let rollout_path = child_thread - .rollout_path() - .expect("thread should have rollout path"); - let state_db = child_thread - .state_db() - .expect("thread should have state db handle"); - let _ = harness .control .shutdown_live_agent(child_thread_id) .await .expect("child shutdown should succeed"); - - let archived_root = harness - .config - .codex_home - .join(crate::ARCHIVED_SESSIONS_SUBDIR); - tokio::fs::create_dir_all(&archived_root) + let store = LocalThreadStore::new(codex_rollout::RolloutConfig::from_view(&harness.config)); + store + .archive_thread(ArchiveThreadParams { + thread_id: child_thread_id, + }) .await - .expect("archived root should exist"); - let archived_rollout_path = archived_root.join( - rollout_path - .file_name() - .expect("rollout file name should be present"), - ); - tokio::fs::rename(&rollout_path, &archived_rollout_path) - .await - .expect("rollout should move to archived path"); - state_db - .mark_archived(child_thread_id, archived_rollout_path.as_path(), Utc::now()) - .await - .expect("state db archive update should succeed"); + .expect("child thread should archive"); let resumed_thread_id = harness .control diff --git a/codex-rs/core/src/agent/role.rs b/codex-rs/core/src/agent/role.rs index 83c2b1843b..9569c02d71 100644 --- a/codex-rs/core/src/agent/role.rs +++ b/codex-rs/core/src/agent/role.rs @@ -18,6 +18,7 @@ use crate::config_loader::resolve_relative_paths_in_config_toml; use anyhow::anyhow; use codex_app_server_protocol::ConfigLayerSource; use codex_config::config_toml::ConfigToml; +use codex_exec_server::LOCAL_FS; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::path::Path; @@ -78,7 +79,8 @@ async fn apply_role_to_config_inner( role_layer_toml, preserve_current_profile, preserve_current_provider, - )?; + ) + .await?; Ok(()) } @@ -150,7 +152,7 @@ fn preservation_policy(config: &Config, role_layer_toml: &TomlValue) -> (bool, b mod reload { use super::*; - pub(super) fn build_next_config( + pub(super) async fn build_next_config( config: &Config, role_layer_toml: TomlValue, preserve_current_profile: bool, @@ -167,11 +169,13 @@ mod reload { } let mut next_config = Config::load_config_with_layer_stack( + LOCAL_FS.as_ref(), merged_config, reload_overrides(config, preserve_current_provider), config.codex_home.clone(), config_layer_stack, - )?; + ) + .await?; if preserve_current_profile { next_config.active_profile = config.active_profile.clone(); } diff --git a/codex-rs/core/src/agent/role_tests.rs b/codex-rs/core/src/agent/role_tests.rs index d68376ddd6..69aa10adb0 100644 --- a/codex-rs/core/src/agent/role_tests.rs +++ b/codex-rs/core/src/agent/role_tests.rs @@ -8,6 +8,7 @@ use crate::skills_load_input_from_config; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::config_types::Verbosity; use codex_protocol::openai_models::ReasoningEffort; +use codex_utils_absolute_path::test_support::PathExt; use pretty_assertions::assert_eq; use std::fs; use std::path::PathBuf; @@ -652,14 +653,17 @@ enabled = false .expect("custom role should apply"); let plugins_manager = Arc::new(PluginsManager::new(home.path().to_path_buf())); - let skills_manager = SkillsManager::new( - home.path().to_path_buf(), - /*bundled_skills_enabled*/ true, - ); - let plugin_outcome = plugins_manager.plugins_for_config(&config); + let skills_manager = + SkillsManager::new(home.path().abs(), /*bundled_skills_enabled*/ true); + let plugin_outcome = plugins_manager.plugins_for_config(&config).await; let effective_skill_roots = plugin_outcome.effective_skill_roots(); let skills_input = skills_load_input_from_config(&config, effective_skill_roots); - let outcome = skills_manager.skills_for_config(&skills_input); + let outcome = skills_manager + .skills_for_config( + &skills_input, + Some(Arc::clone(&codex_exec_server::LOCAL_FS)), + ) + .await; let skill = outcome .skills .iter() diff --git a/codex-rs/core/src/agent_identity.rs b/codex-rs/core/src/agent_identity.rs new file mode 100644 index 0000000000..cfcab507bb --- /dev/null +++ b/codex-rs/core/src/agent_identity.rs @@ -0,0 +1,810 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use anyhow::Result; +use base64::Engine as _; +use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; +use base64::engine::general_purpose::URL_SAFE_NO_PAD; +use chrono::SecondsFormat; +use chrono::Utc; +use codex_features::Feature; +use codex_login::AgentIdentityAuthRecord; +use codex_login::AuthManager; +use codex_login::CodexAuth; +use codex_login::default_client::create_client; +use codex_protocol::protocol::SessionSource; +use ed25519_dalek::SigningKey; +use ed25519_dalek::VerifyingKey; +use ed25519_dalek::pkcs8::DecodePrivateKey; +use ed25519_dalek::pkcs8::EncodePrivateKey; +use rand::TryRngCore; +use rand::rngs::OsRng; +use serde::Deserialize; +use serde::Serialize; +use tokio::sync::Mutex; +use tracing::debug; +use tracing::info; +use tracing::warn; + +mod task_registration; + +pub(crate) use task_registration::RegisteredAgentTask; + +use crate::config::Config; + +const AGENT_REGISTRATION_TIMEOUT: Duration = Duration::from_secs(15); +const AGENT_IDENTITY_BISCUIT_TIMEOUT: Duration = Duration::from_secs(15); + +#[derive(Clone)] +pub(crate) struct AgentIdentityManager { + auth_manager: Arc, + chatgpt_base_url: String, + feature_enabled: bool, + abom: AgentBillOfMaterials, + ensure_lock: Arc>, +} + +impl std::fmt::Debug for AgentIdentityManager { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AgentIdentityManager") + .field("chatgpt_base_url", &self.chatgpt_base_url) + .field("feature_enabled", &self.feature_enabled) + .field("abom", &self.abom) + .finish_non_exhaustive() + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub(crate) struct StoredAgentIdentity { + pub(crate) binding_id: String, + pub(crate) chatgpt_account_id: String, + pub(crate) chatgpt_user_id: Option, + pub(crate) agent_runtime_id: String, + pub(crate) private_key_pkcs8_base64: String, + pub(crate) public_key_ssh: String, + pub(crate) registered_at: String, + pub(crate) abom: AgentBillOfMaterials, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub(crate) struct AgentBillOfMaterials { + pub(crate) agent_version: String, + pub(crate) agent_harness_id: String, + pub(crate) running_location: String, +} + +#[derive(Debug, Serialize)] +struct RegisterAgentRequest { + abom: AgentBillOfMaterials, + agent_public_key: String, + capabilities: Vec, +} + +#[derive(Debug, Deserialize)] +struct RegisterAgentResponse { + agent_runtime_id: String, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct AgentIdentityBinding { + binding_id: String, + chatgpt_account_id: String, + chatgpt_user_id: Option, + access_token: String, +} + +struct GeneratedAgentKeyMaterial { + private_key_pkcs8_base64: String, + public_key_ssh: String, +} + +impl AgentIdentityManager { + pub(crate) fn new( + config: &Config, + auth_manager: Arc, + session_source: SessionSource, + ) -> Self { + Self { + auth_manager, + chatgpt_base_url: config.chatgpt_base_url.clone(), + feature_enabled: config.features.enabled(Feature::UseAgentIdentity), + abom: build_abom(session_source), + ensure_lock: Arc::new(Mutex::new(())), + } + } + + pub(crate) fn is_enabled(&self) -> bool { + self.feature_enabled + } + + pub(crate) async fn ensure_registered_identity(&self) -> Result> { + if !self.feature_enabled { + return Ok(None); + } + + let Some((auth, binding)) = self.current_auth_binding().await else { + return Ok(None); + }; + + self.ensure_registered_identity_for_binding(&auth, &binding) + .await + .map(Some) + } + + async fn ensure_registered_identity_for_binding( + &self, + auth: &CodexAuth, + binding: &AgentIdentityBinding, + ) -> Result { + let _guard = self.ensure_lock.lock().await; + + if let Some(stored_identity) = self.load_stored_identity(auth, binding)? { + info!( + agent_runtime_id = %stored_identity.agent_runtime_id, + binding_id = %binding.binding_id, + "reusing stored agent identity" + ); + return Ok(stored_identity); + } + + let stored_identity = self.register_agent_identity(binding).await?; + self.store_identity(auth, &stored_identity)?; + Ok(stored_identity) + } + + pub(crate) async fn task_matches_current_binding(&self, task: &RegisteredAgentTask) -> bool { + if !self.feature_enabled { + return false; + } + + self.current_auth_binding() + .await + .is_some_and(|(_, binding)| task.matches_binding(&binding)) + } + + async fn current_auth_binding(&self) -> Option<(CodexAuth, AgentIdentityBinding)> { + let Some(auth) = self.auth_manager.auth().await else { + debug!("skipping agent identity flow because no auth is available"); + return None; + }; + + let binding = + AgentIdentityBinding::from_auth(&auth, self.auth_manager.forced_chatgpt_workspace_id()); + if binding.is_none() { + debug!("skipping agent identity flow because ChatGPT auth is unavailable"); + } + binding.map(|binding| (auth, binding)) + } + + async fn register_agent_identity( + &self, + binding: &AgentIdentityBinding, + ) -> Result { + let key_material = generate_agent_key_material()?; + let request_body = RegisterAgentRequest { + abom: self.abom.clone(), + agent_public_key: key_material.public_key_ssh.clone(), + capabilities: Vec::new(), + }; + + let url = agent_registration_url(&self.chatgpt_base_url); + let human_biscuit = self.mint_human_biscuit(binding, "POST", &url).await?; + let client = create_client(); + let response = client + .post(&url) + .header("X-OpenAI-Authorization", human_biscuit) + .json(&request_body) + .timeout(AGENT_REGISTRATION_TIMEOUT) + .send() + .await + .with_context(|| { + format!("failed to send agent identity registration request to {url}") + })?; + + if response.status().is_success() { + let response_body = response + .json::() + .await + .with_context(|| format!("failed to parse agent identity response from {url}"))?; + let stored_identity = StoredAgentIdentity { + binding_id: binding.binding_id.clone(), + chatgpt_account_id: binding.chatgpt_account_id.clone(), + chatgpt_user_id: binding.chatgpt_user_id.clone(), + agent_runtime_id: response_body.agent_runtime_id, + private_key_pkcs8_base64: key_material.private_key_pkcs8_base64, + public_key_ssh: key_material.public_key_ssh, + registered_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + abom: self.abom.clone(), + }; + info!( + agent_runtime_id = %stored_identity.agent_runtime_id, + binding_id = %binding.binding_id, + "registered agent identity" + ); + return Ok(stored_identity); + } + + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + anyhow::bail!("agent identity registration failed with status {status} from {url}: {body}") + } + + async fn mint_human_biscuit( + &self, + binding: &AgentIdentityBinding, + target_method: &str, + target_url: &str, + ) -> Result { + let url = agent_identity_biscuit_url(&self.chatgpt_base_url); + let request_id = agent_identity_request_id()?; + let client = create_client(); + let response = client + .get(&url) + .bearer_auth(&binding.access_token) + .header("X-Request-Id", request_id.clone()) + .header("X-Original-Method", target_method) + .header("X-Original-Url", target_url) + .timeout(AGENT_IDENTITY_BISCUIT_TIMEOUT) + .send() + .await + .with_context(|| format!("failed to send agent identity biscuit request to {url}"))?; + + if response.status().is_success() { + let human_biscuit = response + .headers() + .get("x-openai-authorization") + .context("agent identity biscuit response did not include x-openai-authorization")? + .to_str() + .context("agent identity biscuit response header was not valid UTF-8")? + .to_string(); + info!( + request_id = %request_id, + "minted human biscuit for agent identity registration" + ); + return Ok(human_biscuit); + } + + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + anyhow::bail!( + "agent identity biscuit minting failed with status {status} from {url}: {body}" + ) + } + + fn load_stored_identity( + &self, + auth: &CodexAuth, + binding: &AgentIdentityBinding, + ) -> Result> { + let Some(record) = auth.get_agent_identity(&binding.chatgpt_account_id) else { + return Ok(None); + }; + + let stored_identity = + match StoredAgentIdentity::from_auth_record(binding, record, self.abom.clone()) { + Ok(stored_identity) => stored_identity, + Err(error) => { + warn!( + binding_id = %binding.binding_id, + error = %error, + "stored agent identity is invalid; deleting cached value" + ); + auth.remove_agent_identity()?; + return Ok(None); + } + }; + + if !stored_identity.matches_binding(binding) { + warn!( + binding_id = %binding.binding_id, + "stored agent identity binding no longer matches current auth; deleting cached value" + ); + auth.remove_agent_identity()?; + return Ok(None); + } + + if let Err(error) = stored_identity.validate_key_material() { + warn!( + agent_runtime_id = %stored_identity.agent_runtime_id, + binding_id = %binding.binding_id, + error = %error, + "stored agent identity key material is invalid; deleting cached value" + ); + auth.remove_agent_identity()?; + return Ok(None); + } + + Ok(Some(stored_identity)) + } + + fn store_identity( + &self, + auth: &CodexAuth, + stored_identity: &StoredAgentIdentity, + ) -> Result<()> { + auth.set_agent_identity(stored_identity.to_auth_record())?; + Ok(()) + } + + #[cfg(test)] + fn new_for_tests( + auth_manager: Arc, + feature_enabled: bool, + chatgpt_base_url: String, + session_source: SessionSource, + ) -> Self { + Self { + auth_manager, + chatgpt_base_url, + feature_enabled, + abom: build_abom(session_source), + ensure_lock: Arc::new(Mutex::new(())), + } + } +} + +impl StoredAgentIdentity { + fn from_auth_record( + binding: &AgentIdentityBinding, + record: AgentIdentityAuthRecord, + abom: AgentBillOfMaterials, + ) -> Result { + if record.workspace_id != binding.chatgpt_account_id { + anyhow::bail!( + "stored agent identity workspace {:?} does not match current workspace {:?}", + record.workspace_id, + binding.chatgpt_account_id + ); + } + let signing_key = signing_key_from_private_key_pkcs8_base64(&record.agent_private_key)?; + Ok(Self { + binding_id: binding.binding_id.clone(), + chatgpt_account_id: binding.chatgpt_account_id.clone(), + chatgpt_user_id: record.chatgpt_user_id, + agent_runtime_id: record.agent_runtime_id, + private_key_pkcs8_base64: record.agent_private_key, + public_key_ssh: encode_ssh_ed25519_public_key(&signing_key.verifying_key()), + registered_at: record.registered_at, + abom, + }) + } + + fn to_auth_record(&self) -> AgentIdentityAuthRecord { + AgentIdentityAuthRecord { + workspace_id: self.chatgpt_account_id.clone(), + chatgpt_user_id: self.chatgpt_user_id.clone(), + agent_runtime_id: self.agent_runtime_id.clone(), + agent_private_key: self.private_key_pkcs8_base64.clone(), + registered_at: self.registered_at.clone(), + } + } + + fn matches_binding(&self, binding: &AgentIdentityBinding) -> bool { + binding.matches_parts( + &self.binding_id, + &self.chatgpt_account_id, + self.chatgpt_user_id.as_deref(), + ) + } + + fn validate_key_material(&self) -> Result<()> { + let signing_key = self.signing_key()?; + let derived_public_key = encode_ssh_ed25519_public_key(&signing_key.verifying_key()); + anyhow::ensure!( + self.public_key_ssh == derived_public_key, + "stored public key does not match the private key" + ); + Ok(()) + } + + pub(crate) fn signing_key(&self) -> Result { + signing_key_from_private_key_pkcs8_base64(&self.private_key_pkcs8_base64) + } +} + +impl AgentIdentityBinding { + fn matches_parts( + &self, + binding_id: &str, + chatgpt_account_id: &str, + chatgpt_user_id: Option<&str>, + ) -> bool { + binding_id == self.binding_id + && chatgpt_account_id == self.chatgpt_account_id + && match self.chatgpt_user_id.as_deref() { + Some(expected_user_id) => chatgpt_user_id == Some(expected_user_id), + None => true, + } + } + + fn from_auth(auth: &CodexAuth, forced_workspace_id: Option) -> Option { + if !auth.is_chatgpt_auth() { + return None; + } + + let token_data = auth.get_token_data().ok()?; + let resolved_account_id = + forced_workspace_id + .filter(|value| !value.is_empty()) + .or(token_data + .account_id + .clone() + .filter(|value| !value.is_empty()))?; + + Some(Self { + binding_id: format!("chatgpt-account-{resolved_account_id}"), + chatgpt_account_id: resolved_account_id, + chatgpt_user_id: token_data + .id_token + .chatgpt_user_id + .filter(|value| !value.is_empty()), + access_token: token_data.access_token, + }) + } +} + +fn build_abom(session_source: SessionSource) -> AgentBillOfMaterials { + AgentBillOfMaterials { + agent_version: env!("CARGO_PKG_VERSION").to_string(), + agent_harness_id: match &session_source { + SessionSource::VSCode => "codex-app".to_string(), + SessionSource::Cli + | SessionSource::Exec + | SessionSource::Mcp + | SessionSource::Custom(_) + | SessionSource::SubAgent(_) + | SessionSource::Unknown => "codex-cli".to_string(), + }, + running_location: format!("{}-{}", session_source, std::env::consts::OS), + } +} + +fn generate_agent_key_material() -> Result { + let mut secret_key_bytes = [0u8; 32]; + OsRng + .try_fill_bytes(&mut secret_key_bytes) + .context("failed to generate agent identity private key bytes")?; + let signing_key = SigningKey::from_bytes(&secret_key_bytes); + let private_key_pkcs8 = signing_key + .to_pkcs8_der() + .context("failed to encode agent identity private key as PKCS#8")?; + + Ok(GeneratedAgentKeyMaterial { + private_key_pkcs8_base64: BASE64_STANDARD.encode(private_key_pkcs8.as_bytes()), + public_key_ssh: encode_ssh_ed25519_public_key(&signing_key.verifying_key()), + }) +} + +fn encode_ssh_ed25519_public_key(verifying_key: &VerifyingKey) -> String { + let mut blob = Vec::with_capacity(4 + 11 + 4 + 32); + append_ssh_string(&mut blob, b"ssh-ed25519"); + append_ssh_string(&mut blob, verifying_key.as_bytes()); + format!("ssh-ed25519 {}", BASE64_STANDARD.encode(blob)) +} + +fn append_ssh_string(buf: &mut Vec, value: &[u8]) { + buf.extend_from_slice(&(value.len() as u32).to_be_bytes()); + buf.extend_from_slice(value); +} + +fn agent_registration_url(chatgpt_base_url: &str) -> String { + let trimmed = chatgpt_base_url.trim_end_matches('/'); + format!("{trimmed}/v1/agent/register") +} + +fn signing_key_from_private_key_pkcs8_base64(private_key_pkcs8_base64: &str) -> Result { + let private_key = BASE64_STANDARD + .decode(private_key_pkcs8_base64) + .context("stored agent identity private key is not valid base64")?; + SigningKey::from_pkcs8_der(&private_key) + .context("stored agent identity private key is not valid PKCS#8") +} + +fn agent_identity_biscuit_url(chatgpt_base_url: &str) -> String { + let trimmed = chatgpt_base_url.trim_end_matches('/'); + format!("{trimmed}/authenticate_app_v2") +} + +fn agent_identity_request_id() -> Result { + let mut request_id_bytes = [0u8; 16]; + OsRng + .try_fill_bytes(&mut request_id_bytes) + .context("failed to generate agent identity request id")?; + Ok(format!( + "codex-agent-identity-{}", + URL_SAFE_NO_PAD.encode(request_id_bytes) + )) +} + +#[cfg(test)] +mod tests { + use super::*; + + use base64::engine::general_purpose::URL_SAFE_NO_PAD; + use codex_app_server_protocol::AuthMode as ApiAuthMode; + use codex_login::AuthCredentialsStoreMode; + use codex_login::AuthDotJson; + use codex_login::save_auth; + use codex_login::token_data::IdTokenInfo; + use codex_login::token_data::TokenData; + use pretty_assertions::assert_eq; + use wiremock::Mock; + use wiremock::MockServer; + use wiremock::ResponseTemplate; + use wiremock::matchers::header; + use wiremock::matchers::method; + use wiremock::matchers::path; + + #[tokio::test] + async fn ensure_registered_identity_skips_when_feature_is_disabled() { + let auth_manager = + AuthManager::from_auth_for_testing(make_chatgpt_auth("account-123", Some("user-123"))); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ false, + "https://chatgpt.com/backend-api/".to_string(), + SessionSource::Cli, + ); + + assert_eq!(manager.ensure_registered_identity().await.unwrap(), None); + } + + #[tokio::test] + async fn ensure_registered_identity_skips_for_api_key_auth() { + let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("test-key")); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + "https://chatgpt.com/backend-api/".to_string(), + SessionSource::Cli, + ); + + assert_eq!(manager.ensure_registered_identity().await.unwrap(), None); + } + + #[tokio::test] + async fn ensure_registered_identity_registers_and_reuses_cached_identity() { + let server = MockServer::start().await; + let chatgpt_base_url = server.uri(); + mount_human_biscuit(&server, &chatgpt_base_url).await; + Mock::given(method("POST")) + .and(path("/v1/agent/register")) + .and(header("x-openai-authorization", "human-biscuit")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "agent_runtime_id": "agent_123", + }))) + .expect(1) + .mount(&server) + .await; + + let auth_manager = + AuthManager::from_auth_for_testing(make_chatgpt_auth("account-123", Some("user-123"))); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + chatgpt_base_url, + SessionSource::Cli, + ); + + let first = manager + .ensure_registered_identity() + .await + .unwrap() + .expect("identity should be registered"); + let second = manager + .ensure_registered_identity() + .await + .unwrap() + .expect("identity should be reused"); + + assert_eq!(first.agent_runtime_id, "agent_123"); + assert_eq!(first, second); + assert_eq!(first.abom.agent_harness_id, "codex-cli"); + assert_eq!(first.chatgpt_account_id, "account-123"); + assert_eq!(first.chatgpt_user_id.as_deref(), Some("user-123")); + } + + #[tokio::test] + async fn ensure_registered_identity_deletes_invalid_cached_identity_and_reregisters() { + let server = MockServer::start().await; + let chatgpt_base_url = server.uri(); + mount_human_biscuit(&server, &chatgpt_base_url).await; + Mock::given(method("POST")) + .and(path("/v1/agent/register")) + .and(header("x-openai-authorization", "human-biscuit")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "agent_runtime_id": "agent_456", + }))) + .expect(1) + .mount(&server) + .await; + + let auth = make_chatgpt_auth("account-123", Some("user-123")); + let auth_manager = AuthManager::from_auth_for_testing(auth.clone()); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + chatgpt_base_url, + SessionSource::Cli, + ); + + let binding = + AgentIdentityBinding::from_auth(&auth, /*forced_workspace_id*/ None).expect("binding"); + auth.set_agent_identity(AgentIdentityAuthRecord { + workspace_id: "account-123".to_string(), + chatgpt_user_id: Some("user-123".to_string()), + agent_runtime_id: "agent_invalid".to_string(), + agent_private_key: "not-valid-base64".to_string(), + registered_at: "2026-01-01T00:00:00Z".to_string(), + }) + .expect("seed invalid identity"); + + let stored = manager + .ensure_registered_identity() + .await + .unwrap() + .expect("identity should be registered"); + + assert_eq!(stored.agent_runtime_id, "agent_456"); + let persisted = auth + .get_agent_identity(&binding.chatgpt_account_id) + .expect("stored identity"); + assert_eq!(persisted.agent_runtime_id, "agent_456"); + } + + #[tokio::test] + async fn ensure_registered_identity_deletes_different_user_identity_and_reregisters() { + let server = MockServer::start().await; + let chatgpt_base_url = server.uri(); + mount_human_biscuit(&server, &chatgpt_base_url).await; + Mock::given(method("POST")) + .and(path("/v1/agent/register")) + .and(header("x-openai-authorization", "human-biscuit")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "agent_runtime_id": "agent_new", + }))) + .expect(1) + .mount(&server) + .await; + + let auth = make_chatgpt_auth("account-123", Some("user-new")); + let stale_key = generate_agent_key_material().expect("key material"); + auth.set_agent_identity(AgentIdentityAuthRecord { + workspace_id: "account-123".to_string(), + chatgpt_user_id: Some("user-old".to_string()), + agent_runtime_id: "agent_old".to_string(), + agent_private_key: stale_key.private_key_pkcs8_base64, + registered_at: "2026-01-01T00:00:00Z".to_string(), + }) + .expect("seed stale identity"); + + let auth_manager = AuthManager::from_auth_for_testing(auth.clone()); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + chatgpt_base_url, + SessionSource::Cli, + ); + + let stored = manager + .ensure_registered_identity() + .await + .unwrap() + .expect("identity should be registered"); + + assert_eq!(stored.agent_runtime_id, "agent_new"); + assert_eq!(stored.chatgpt_user_id.as_deref(), Some("user-new")); + let persisted = auth + .get_agent_identity("account-123") + .expect("stored identity"); + assert_eq!(persisted.agent_runtime_id, "agent_new"); + assert_eq!(persisted.chatgpt_user_id.as_deref(), Some("user-new")); + } + + #[tokio::test] + async fn ensure_registered_identity_uses_chatgpt_base_url() { + let server = MockServer::start().await; + let chatgpt_base_url = format!("{}/backend-api", server.uri()); + mount_human_biscuit(&server, &chatgpt_base_url).await; + Mock::given(method("POST")) + .and(path("/backend-api/v1/agent/register")) + .and(header("x-openai-authorization", "human-biscuit")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "agent_runtime_id": "agent_canonical", + }))) + .expect(1) + .mount(&server) + .await; + + let auth_manager = + AuthManager::from_auth_for_testing(make_chatgpt_auth("account-123", Some("user-123"))); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + chatgpt_base_url, + SessionSource::Cli, + ); + + let stored = manager + .ensure_registered_identity() + .await + .unwrap() + .expect("identity should be registered"); + assert_eq!(stored.agent_runtime_id, "agent_canonical"); + } + + async fn mount_human_biscuit(server: &MockServer, chatgpt_base_url: &str) { + let biscuit_url = agent_identity_biscuit_url(chatgpt_base_url); + let biscuit_path = reqwest::Url::parse(&biscuit_url) + .expect("biscuit URL parses") + .path() + .to_string(); + let target_url = agent_registration_url(chatgpt_base_url); + Mock::given(method("GET")) + .and(path(biscuit_path)) + .and(header("authorization", "Bearer access-token-account-123")) + .and(header("x-original-method", "POST")) + .and(header("x-original-url", target_url)) + .respond_with( + ResponseTemplate::new(200).insert_header("x-openai-authorization", "human-biscuit"), + ) + .expect(1) + .mount(server) + .await; + } + + #[test] + fn encode_ssh_ed25519_public_key_matches_expected_wire_shape() { + let key_material = generate_agent_key_material().expect("key material"); + let (_, encoded_blob) = key_material + .public_key_ssh + .split_once(' ') + .expect("public key contains scheme"); + let decoded = BASE64_STANDARD.decode(encoded_blob).expect("base64"); + + assert_eq!(&decoded[..4], 11u32.to_be_bytes().as_slice()); + assert_eq!(&decoded[4..15], b"ssh-ed25519"); + assert_eq!(&decoded[15..19], 32u32.to_be_bytes().as_slice()); + assert_eq!(decoded.len(), 51); + } + + fn make_chatgpt_auth(account_id: &str, user_id: Option<&str>) -> CodexAuth { + let tempdir = tempfile::tempdir().expect("tempdir"); + let auth_json = AuthDotJson { + auth_mode: Some(ApiAuthMode::Chatgpt), + openai_api_key: None, + tokens: Some(TokenData { + id_token: IdTokenInfo { + email: None, + chatgpt_plan_type: None, + chatgpt_user_id: user_id.map(ToOwned::to_owned), + chatgpt_account_id: Some(account_id.to_string()), + chatgpt_account_is_fedramp: false, + raw_jwt: fake_id_token(account_id, user_id), + }, + access_token: format!("access-token-{account_id}"), + refresh_token: "refresh-token".to_string(), + account_id: Some(account_id.to_string()), + }), + last_refresh: Some(Utc::now()), + agent_identity: None, + }; + save_auth(tempdir.path(), &auth_json, AuthCredentialsStoreMode::File).expect("save auth"); + CodexAuth::from_auth_storage(tempdir.path(), AuthCredentialsStoreMode::File) + .expect("load auth") + .expect("auth") + } + + fn fake_id_token(account_id: &str, user_id: Option<&str>) -> String { + let header = URL_SAFE_NO_PAD.encode(r#"{"alg":"none","typ":"JWT"}"#); + let payload = serde_json::json!({ + "https://api.openai.com/auth": { + "chatgpt_user_id": user_id, + "chatgpt_account_id": account_id, + } + }); + let payload = URL_SAFE_NO_PAD.encode(payload.to_string()); + format!("{header}.{payload}.signature") + } +} diff --git a/codex-rs/core/src/agent_identity/task_registration.rs b/codex-rs/core/src/agent_identity/task_registration.rs new file mode 100644 index 0000000000..4fc5d51282 --- /dev/null +++ b/codex-rs/core/src/agent_identity/task_registration.rs @@ -0,0 +1,470 @@ +use std::time::Duration; + +use anyhow::Context; +use anyhow::Result; +use crypto_box::SecretKey as Curve25519SecretKey; +use ed25519_dalek::Signer as _; +use serde::Deserialize; +use serde::Serialize; +use sha2::Digest as _; +use sha2::Sha512; +use tracing::info; + +use super::*; + +const AGENT_TASK_REGISTRATION_TIMEOUT: Duration = Duration::from_secs(15); + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct RegisteredAgentTask { + pub(crate) binding_id: String, + pub(crate) chatgpt_account_id: String, + pub(crate) chatgpt_user_id: Option, + pub(crate) agent_runtime_id: String, + pub(crate) task_id: String, + pub(crate) registered_at: String, +} + +#[derive(Debug, Serialize)] +struct RegisterTaskRequest { + signature: String, + timestamp: String, +} + +#[derive(Debug, Deserialize)] +struct RegisterTaskResponse { + encrypted_task_id: String, +} + +impl AgentIdentityManager { + pub(crate) async fn register_task(&self) -> Result> { + if !self.feature_enabled { + return Ok(None); + } + + let Some((auth, binding)) = self.current_auth_binding().await else { + return Ok(None); + }; + + self.register_task_for_binding(auth, binding).await + } + + async fn register_task_for_binding( + &self, + auth: CodexAuth, + binding: AgentIdentityBinding, + ) -> Result> { + let stored_identity = self + .ensure_registered_identity_for_binding(&auth, &binding) + .await?; + + let timestamp = Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true); + let request_body = RegisterTaskRequest { + signature: sign_task_registration_payload(&stored_identity, ×tamp)?, + timestamp, + }; + + let client = create_client(); + let url = + agent_task_registration_url(&self.chatgpt_base_url, &stored_identity.agent_runtime_id); + let human_biscuit = self.mint_human_biscuit(&binding, "POST", &url).await?; + let response = client + .post(&url) + .header("X-OpenAI-Authorization", human_biscuit) + .json(&request_body) + .timeout(AGENT_TASK_REGISTRATION_TIMEOUT) + .send() + .await + .with_context(|| format!("failed to send agent task registration request to {url}"))?; + + if response.status().is_success() { + let response_body = response + .json::() + .await + .with_context(|| format!("failed to parse agent task response from {url}"))?; + let registered_task = RegisteredAgentTask { + binding_id: stored_identity.binding_id.clone(), + chatgpt_account_id: stored_identity.chatgpt_account_id.clone(), + chatgpt_user_id: stored_identity.chatgpt_user_id.clone(), + agent_runtime_id: stored_identity.agent_runtime_id.clone(), + task_id: decrypt_task_id_response( + &stored_identity, + &response_body.encrypted_task_id, + )?, + registered_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + }; + info!( + agent_runtime_id = %registered_task.agent_runtime_id, + task_id = %registered_task.task_id, + "registered agent task" + ); + return Ok(Some(registered_task)); + } + + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + anyhow::bail!("agent task registration failed with status {status} from {url}: {body}") + } +} + +impl RegisteredAgentTask { + pub(super) fn matches_binding(&self, binding: &AgentIdentityBinding) -> bool { + binding.matches_parts( + &self.binding_id, + &self.chatgpt_account_id, + self.chatgpt_user_id.as_deref(), + ) + } + + pub(crate) fn has_same_binding(&self, other: &Self) -> bool { + self.binding_id == other.binding_id + && self.chatgpt_account_id == other.chatgpt_account_id + && self.chatgpt_user_id == other.chatgpt_user_id + } +} + +fn sign_task_registration_payload( + stored_identity: &StoredAgentIdentity, + timestamp: &str, +) -> Result { + let signing_key = stored_identity.signing_key()?; + let payload = format!("{}:{timestamp}", stored_identity.agent_runtime_id); + Ok(BASE64_STANDARD.encode(signing_key.sign(payload.as_bytes()).to_bytes())) +} + +fn decrypt_task_id_response( + stored_identity: &StoredAgentIdentity, + encrypted_task_id: &str, +) -> Result { + let signing_key = stored_identity.signing_key()?; + let ciphertext = BASE64_STANDARD + .decode(encrypted_task_id) + .context("encrypted task id is not valid base64")?; + let plaintext = curve25519_secret_key_from_signing_key(&signing_key) + .unseal(&ciphertext) + .map_err(|_| anyhow::anyhow!("failed to decrypt encrypted task id"))?; + String::from_utf8(plaintext).context("decrypted task id is not valid UTF-8") +} + +fn curve25519_secret_key_from_signing_key(signing_key: &SigningKey) -> Curve25519SecretKey { + let digest = Sha512::digest(signing_key.to_bytes()); + let mut secret_key = [0u8; 32]; + secret_key.copy_from_slice(&digest[..32]); + secret_key[0] &= 248; + secret_key[31] &= 127; + secret_key[31] |= 64; + Curve25519SecretKey::from(secret_key) +} + +fn agent_task_registration_url(chatgpt_base_url: &str, agent_runtime_id: &str) -> String { + let trimmed = chatgpt_base_url.trim_end_matches('/'); + format!("{trimmed}/v1/agent/{agent_runtime_id}/task/register") +} + +#[cfg(test)] +mod tests { + use base64::engine::general_purpose::URL_SAFE_NO_PAD; + use codex_app_server_protocol::AuthMode as ApiAuthMode; + use codex_login::AuthCredentialsStoreMode; + use codex_login::AuthDotJson; + use codex_login::save_auth; + use codex_login::token_data::IdTokenInfo; + use codex_login::token_data::TokenData; + use pretty_assertions::assert_eq; + use wiremock::Mock; + use wiremock::MockServer; + use wiremock::ResponseTemplate; + use wiremock::matchers::header; + use wiremock::matchers::method; + use wiremock::matchers::path; + + use super::*; + + #[tokio::test] + async fn register_task_skips_when_feature_is_disabled() { + let auth_manager = + AuthManager::from_auth_for_testing(make_chatgpt_auth("account-123", Some("user-123"))); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ false, + "https://chatgpt.com/backend-api/".to_string(), + SessionSource::Cli, + ); + + assert_eq!(manager.register_task().await.unwrap(), None); + } + + #[tokio::test] + async fn register_task_skips_for_api_key_auth() { + let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("test-key")); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + "https://chatgpt.com/backend-api/".to_string(), + SessionSource::Cli, + ); + + assert_eq!(manager.register_task().await.unwrap(), None); + } + + #[tokio::test] + async fn register_task_registers_and_decrypts_plaintext_task_id() { + let server = MockServer::start().await; + let chatgpt_base_url = server.uri(); + mount_human_biscuit(&server, &chatgpt_base_url, "agent-123").await; + let auth = make_chatgpt_auth("account-123", Some("user-123")); + let auth_manager = AuthManager::from_auth_for_testing(auth.clone()); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + chatgpt_base_url, + SessionSource::Cli, + ); + let stored_identity = seed_stored_identity(&manager, &auth, "agent-123", "account-123"); + let encrypted_task_id = + encrypt_task_id_for_identity(&stored_identity, "task_123").expect("task ciphertext"); + + Mock::given(method("POST")) + .and(path("/v1/agent/agent-123/task/register")) + .and(header("x-openai-authorization", "human-biscuit")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "encrypted_task_id": encrypted_task_id, + }))) + .expect(1) + .mount(&server) + .await; + + let task = manager + .register_task() + .await + .unwrap() + .expect("task should be registered"); + + assert_eq!( + task, + RegisteredAgentTask { + binding_id: "chatgpt-account-account-123".to_string(), + chatgpt_account_id: "account-123".to_string(), + chatgpt_user_id: Some("user-123".to_string()), + agent_runtime_id: "agent-123".to_string(), + task_id: "task_123".to_string(), + registered_at: task.registered_at.clone(), + } + ); + } + + #[tokio::test] + async fn register_task_uses_chatgpt_base_url() { + let server = MockServer::start().await; + let chatgpt_base_url = format!("{}/backend-api", server.uri()); + mount_human_biscuit(&server, &chatgpt_base_url, "agent-fallback").await; + let auth = make_chatgpt_auth("account-123", Some("user-123")); + let auth_manager = AuthManager::from_auth_for_testing(auth.clone()); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + chatgpt_base_url, + SessionSource::Cli, + ); + let stored_identity = + seed_stored_identity(&manager, &auth, "agent-fallback", "account-123"); + let encrypted_task_id = encrypt_task_id_for_identity(&stored_identity, "task_fallback") + .expect("task ciphertext"); + + Mock::given(method("POST")) + .and(path("/backend-api/v1/agent/agent-fallback/task/register")) + .and(header("x-openai-authorization", "human-biscuit")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "encrypted_task_id": encrypted_task_id, + }))) + .expect(1) + .mount(&server) + .await; + + let task = manager + .register_task() + .await + .unwrap() + .expect("task should be registered"); + + assert_eq!(task.agent_runtime_id, "agent-fallback"); + assert_eq!(task.task_id, "task_fallback"); + } + + #[tokio::test] + async fn register_task_for_binding_keeps_one_auth_snapshot() { + let server = MockServer::start().await; + let chatgpt_base_url = server.uri(); + mount_human_biscuit(&server, &chatgpt_base_url, "agent-123").await; + let binding_auth = make_chatgpt_auth("account-123", Some("user-123")); + let auth_manager = + AuthManager::from_auth_for_testing(make_chatgpt_auth("account-456", Some("user-456"))); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + chatgpt_base_url, + SessionSource::Cli, + ); + let stored_identity = + seed_stored_identity(&manager, &binding_auth, "agent-123", "account-123"); + let encrypted_task_id = + encrypt_task_id_for_identity(&stored_identity, "task_123").expect("task ciphertext"); + let binding = + AgentIdentityBinding::from_auth(&binding_auth, /*forced_workspace_id*/ None) + .expect("binding"); + + Mock::given(method("POST")) + .and(path("/v1/agent/agent-123/task/register")) + .and(header("x-openai-authorization", "human-biscuit")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "encrypted_task_id": encrypted_task_id, + }))) + .expect(1) + .mount(&server) + .await; + + let task = manager + .register_task_for_binding(binding_auth, binding) + .await + .unwrap() + .expect("task should be registered"); + + assert_eq!( + task, + RegisteredAgentTask { + binding_id: "chatgpt-account-account-123".to_string(), + chatgpt_account_id: "account-123".to_string(), + chatgpt_user_id: Some("user-123".to_string()), + agent_runtime_id: "agent-123".to_string(), + task_id: "task_123".to_string(), + registered_at: task.registered_at.clone(), + } + ); + } + + #[tokio::test] + async fn task_matches_current_binding_rejects_stale_auth_binding() { + let auth_manager = + AuthManager::from_auth_for_testing(make_chatgpt_auth("account-456", Some("user-456"))); + let manager = AgentIdentityManager::new_for_tests( + auth_manager, + /*feature_enabled*/ true, + "https://chatgpt.com/backend-api/".to_string(), + SessionSource::Cli, + ); + let task = RegisteredAgentTask { + binding_id: "chatgpt-account-account-123".to_string(), + chatgpt_account_id: "account-123".to_string(), + chatgpt_user_id: Some("user-123".to_string()), + agent_runtime_id: "agent-123".to_string(), + task_id: "task_123".to_string(), + registered_at: "2026-03-23T12:00:00Z".to_string(), + }; + + assert!(!manager.task_matches_current_binding(&task).await); + } + + async fn mount_human_biscuit( + server: &MockServer, + chatgpt_base_url: &str, + agent_runtime_id: &str, + ) { + let biscuit_url = agent_identity_biscuit_url(chatgpt_base_url); + let biscuit_path = reqwest::Url::parse(&biscuit_url) + .expect("biscuit URL parses") + .path() + .to_string(); + let target_url = agent_task_registration_url(chatgpt_base_url, agent_runtime_id); + Mock::given(method("GET")) + .and(path(biscuit_path)) + .and(header("authorization", "Bearer access-token-account-123")) + .and(header("x-original-method", "POST")) + .and(header("x-original-url", target_url)) + .respond_with( + ResponseTemplate::new(200).insert_header("x-openai-authorization", "human-biscuit"), + ) + .expect(1) + .mount(server) + .await; + } + + fn seed_stored_identity( + manager: &AgentIdentityManager, + auth: &CodexAuth, + agent_runtime_id: &str, + account_id: &str, + ) -> StoredAgentIdentity { + let key_material = generate_agent_key_material().expect("key material"); + let binding = + AgentIdentityBinding::from_auth(auth, /*forced_workspace_id*/ None).expect("binding"); + let stored_identity = StoredAgentIdentity { + binding_id: binding.binding_id, + chatgpt_account_id: account_id.to_string(), + chatgpt_user_id: Some("user-123".to_string()), + agent_runtime_id: agent_runtime_id.to_string(), + private_key_pkcs8_base64: key_material.private_key_pkcs8_base64, + public_key_ssh: key_material.public_key_ssh, + registered_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + abom: manager.abom.clone(), + }; + manager + .store_identity(auth, &stored_identity) + .expect("store identity"); + let persisted = auth + .get_agent_identity(account_id) + .expect("persisted identity"); + assert_eq!(persisted.agent_runtime_id, agent_runtime_id); + stored_identity + } + + fn encrypt_task_id_for_identity( + stored_identity: &StoredAgentIdentity, + task_id: &str, + ) -> Result { + let mut rng = crypto_box::aead::OsRng; + let public_key = + curve25519_secret_key_from_signing_key(&stored_identity.signing_key()?).public_key(); + let ciphertext = public_key + .seal(&mut rng, task_id.as_bytes()) + .map_err(|_| anyhow::anyhow!("failed to encrypt test task id"))?; + Ok(BASE64_STANDARD.encode(ciphertext)) + } + + fn make_chatgpt_auth(account_id: &str, user_id: Option<&str>) -> CodexAuth { + let tempdir = tempfile::tempdir().expect("tempdir"); + let auth_json = AuthDotJson { + auth_mode: Some(ApiAuthMode::Chatgpt), + openai_api_key: None, + tokens: Some(TokenData { + id_token: IdTokenInfo { + email: None, + chatgpt_plan_type: None, + chatgpt_user_id: user_id.map(ToOwned::to_owned), + chatgpt_account_id: Some(account_id.to_string()), + chatgpt_account_is_fedramp: false, + raw_jwt: fake_id_token(account_id, user_id), + }, + access_token: format!("access-token-{account_id}"), + refresh_token: "refresh-token".to_string(), + account_id: Some(account_id.to_string()), + }), + last_refresh: Some(Utc::now()), + agent_identity: None, + }; + save_auth(tempdir.path(), &auth_json, AuthCredentialsStoreMode::File).expect("save auth"); + CodexAuth::from_auth_storage(tempdir.path(), AuthCredentialsStoreMode::File) + .expect("load auth") + .expect("auth") + } + + fn fake_id_token(account_id: &str, user_id: Option<&str>) -> String { + let header = URL_SAFE_NO_PAD.encode(r#"{"alg":"none","typ":"JWT"}"#); + let payload = serde_json::json!({ + "https://api.openai.com/auth": { + "chatgpt_user_id": user_id, + "chatgpt_account_id": account_id, + } + }); + let payload = URL_SAFE_NO_PAD.encode(payload.to_string()); + format!("{header}.{payload}.signature") + } +} diff --git a/codex-rs/core/src/agents_md.rs b/codex-rs/core/src/agents_md.rs new file mode 100644 index 0000000000..a1a883e839 --- /dev/null +++ b/codex-rs/core/src/agents_md.rs @@ -0,0 +1,367 @@ +//! AGENTS.md discovery and user instruction assembly. +//! +//! Project-level documentation is primarily stored in files named `AGENTS.md`. +//! Additional fallback filenames can be configured via `project_doc_fallback_filenames`. +//! We include the concatenation of all files found along the path from the +//! project root to the current working directory as follows: +//! +//! 1. Determine the project root by walking upwards from the current working +//! directory until a configured `project_root_markers` entry is found. +//! When `project_root_markers` is unset, the default marker list is used +//! (`.git`). If no marker is found, only the current working directory is +//! considered. An empty marker list disables parent traversal. +//! 2. Collect every `AGENTS.md` found from the project root down to the +//! current working directory (inclusive) and concatenate their contents in +//! that order. +//! 3. We do **not** walk past the project root. + +use crate::config::Config; +use crate::config_loader::ConfigLayerStackOrdering; +use crate::config_loader::default_project_root_markers; +use crate::config_loader::merge_toml_values; +use crate::config_loader::project_root_markers_from_config; +use codex_app_server_protocol::ConfigLayerSource; +use codex_exec_server::Environment; +use codex_exec_server::ExecutorFileSystem; +use codex_features::Feature; +use codex_utils_absolute_path::AbsolutePathBuf; +use dunce::canonicalize as normalize_path; +use std::io; +use toml::Value as TomlValue; +use tracing::error; + +pub(crate) const HIERARCHICAL_AGENTS_MESSAGE: &str = + include_str!("../hierarchical_agents_message.md"); + +/// Default filename scanned for AGENTS.md instructions. +pub const DEFAULT_AGENTS_MD_FILENAME: &str = "AGENTS.md"; +/// Preferred local override for AGENTS.md instructions. +pub const LOCAL_AGENTS_MD_FILENAME: &str = "AGENTS.override.md"; + +/// When both `Config::instructions` and AGENTS.md docs are present, they will +/// be concatenated with the following separator. +const AGENTS_MD_SEPARATOR: &str = "\n\n--- project-doc ---\n\n"; + +fn render_js_repl_instructions(config: &Config) -> Option { + if !config.features.enabled(Feature::JsRepl) { + return None; + } + + let mut section = String::from("## JavaScript REPL (Node)\n"); + section.push_str( + "- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n", + ); + section.push_str("- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n"); + section.push_str( + "- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n", + ); + section.push_str("- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n"); + section.push_str("- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n"); + section.push_str("- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n"); + section.push_str("- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n"); + section.push_str("- Raw MCP image blocks can request the same behavior by returning `_meta: { \"codex/imageDetail\": \"original\" }` on the image content item.\n"); + section.push_str("- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n"); + section.push_str("- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n"); + section.push_str("- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n"); + section.push_str("- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n"); + section.push_str("- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n"); + + if config.features.enabled(Feature::JsReplToolsOnly) { + section.push_str("- Do not call tools directly; use `js_repl` + `codex.tool(...)` for all tool calls, including shell commands.\n"); + section + .push_str("- MCP tools (if any) can also be called by name via `codex.tool(...)`.\n"); + } + + section.push_str("- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`."); + + Some(section) +} + +/// Resolves AGENTS.md files into model-visible user instructions and source +/// paths. +pub struct AgentsMdManager<'a> { + config: &'a Config, +} + +pub(crate) struct LoadedAgentsMd { + pub(crate) contents: String, + pub(crate) path: AbsolutePathBuf, +} + +impl<'a> AgentsMdManager<'a> { + pub fn new(config: &'a Config) -> Self { + Self { config } + } + + pub(crate) fn load_global_instructions( + codex_dir: Option<&AbsolutePathBuf>, + ) -> Option { + let base = codex_dir?; + for candidate in [LOCAL_AGENTS_MD_FILENAME, DEFAULT_AGENTS_MD_FILENAME] { + let path = base.join(candidate); + if let Ok(contents) = std::fs::read_to_string(&path) { + let trimmed = contents.trim(); + if !trimmed.is_empty() { + return Some(LoadedAgentsMd { + contents: trimmed.to_string(), + path, + }); + } + } + } + None + } + + /// Combines configured user instructions and AGENTS.md content into a + /// single model-visible instruction string. + pub(crate) async fn user_instructions( + &self, + environment: Option<&Environment>, + ) -> Option { + let fs = environment?.get_filesystem(); + self.user_instructions_with_fs(fs.as_ref()).await + } + + pub(crate) async fn user_instructions_with_fs( + &self, + fs: &dyn ExecutorFileSystem, + ) -> Option { + let agents_md_docs = self.read_agents_md(fs).await; + + let mut output = String::new(); + + if let Some(instructions) = self.config.user_instructions.clone() { + output.push_str(&instructions); + } + + match agents_md_docs { + Ok(Some(docs)) => { + if !output.is_empty() { + output.push_str(AGENTS_MD_SEPARATOR); + } + output.push_str(&docs); + } + Ok(None) => {} + Err(e) => { + error!("error trying to find AGENTS.md docs: {e:#}"); + } + }; + + if let Some(js_repl_section) = render_js_repl_instructions(self.config) { + if !output.is_empty() { + output.push_str("\n\n"); + } + output.push_str(&js_repl_section); + } + + if self.config.features.enabled(Feature::ChildAgentsMd) { + if !output.is_empty() { + output.push_str("\n\n"); + } + output.push_str(HIERARCHICAL_AGENTS_MESSAGE); + } + + if !output.is_empty() { + Some(output) + } else { + None + } + } + + /// Returns all instruction source files included in the current config. + pub async fn instruction_sources(&self, fs: &dyn ExecutorFileSystem) -> Vec { + let mut paths = Self::load_global_instructions(Some(&self.config.codex_home)) + .map(|loaded| vec![loaded.path]) + .unwrap_or_default(); + match self.agents_md_paths(fs).await { + Ok(agents_md_paths) => paths.extend(agents_md_paths), + Err(err) => { + tracing::warn!(error = %err, "failed to discover AGENTS.md docs for instruction sources"); + } + } + paths + } + + /// Attempt to locate and load AGENTS.md documentation. + /// + /// On success returns `Ok(Some(contents))` where `contents` is the + /// concatenation of all discovered docs. If no documentation file is found + /// the function returns `Ok(None)`. Unexpected I/O failures bubble up as + /// `Err` so callers can decide how to handle them. + async fn read_agents_md(&self, fs: &dyn ExecutorFileSystem) -> io::Result> { + let max_total = self.config.project_doc_max_bytes; + + if max_total == 0 { + return Ok(None); + } + + let paths = self.agents_md_paths(fs).await?; + if paths.is_empty() { + return Ok(None); + } + + let mut remaining: u64 = max_total as u64; + let mut parts: Vec = Vec::new(); + + for p in paths { + if remaining == 0 { + break; + } + + match fs.get_metadata(&p, /*sandbox*/ None).await { + Ok(metadata) if !metadata.is_file => continue, + Ok(_) => {} + Err(err) if err.kind() == io::ErrorKind::NotFound => continue, + Err(err) => return Err(err), + } + + let mut data = match fs.read_file(&p, /*sandbox*/ None).await { + Ok(data) => data, + Err(err) if err.kind() == io::ErrorKind::NotFound => continue, + Err(err) => return Err(err), + }; + let size = data.len() as u64; + if size > remaining { + data.truncate(remaining as usize); + } + + if size > remaining { + tracing::warn!( + "Project doc `{}` exceeds remaining budget ({} bytes) - truncating.", + p.display(), + remaining, + ); + } + + let text = String::from_utf8_lossy(&data).to_string(); + if !text.trim().is_empty() { + parts.push(text); + remaining = remaining.saturating_sub(data.len() as u64); + } + } + + if parts.is_empty() { + Ok(None) + } else { + Ok(Some(parts.join("\n\n"))) + } + } + + /// Discover the list of AGENTS.md files using the same search rules as + /// `read_agents_md`, but return the file paths instead of concatenated + /// contents. The list is ordered from project root to the current working + /// directory (inclusive). Symlinks are allowed. When `project_doc_max_bytes` + /// is zero, returns an empty list. + async fn agents_md_paths( + &self, + fs: &dyn ExecutorFileSystem, + ) -> io::Result> { + if self.config.project_doc_max_bytes == 0 { + return Ok(Vec::new()); + } + + let mut dir = self.config.cwd.clone(); + if let Ok(canon) = normalize_path(&dir) { + dir = AbsolutePathBuf::try_from(canon)?; + } + + let mut merged = TomlValue::Table(toml::map::Map::new()); + for layer in self.config.config_layer_stack.get_layers( + ConfigLayerStackOrdering::LowestPrecedenceFirst, + /*include_disabled*/ false, + ) { + if matches!(layer.name, ConfigLayerSource::Project { .. }) { + continue; + } + merge_toml_values(&mut merged, &layer.config); + } + let project_root_markers = match project_root_markers_from_config(&merged) { + Ok(Some(markers)) => markers, + Ok(None) => default_project_root_markers(), + Err(err) => { + tracing::warn!("invalid project_root_markers: {err}"); + default_project_root_markers() + } + }; + let mut project_root = None; + if !project_root_markers.is_empty() { + for ancestor in dir.ancestors() { + for marker in &project_root_markers { + let marker_path = ancestor.join(marker); + let marker_exists = match fs.get_metadata(&marker_path, /*sandbox*/ None).await + { + Ok(_) => true, + Err(err) if err.kind() == io::ErrorKind::NotFound => false, + Err(err) => return Err(err), + }; + if marker_exists { + project_root = Some(ancestor.clone()); + break; + } + } + if project_root.is_some() { + break; + } + } + } + + let search_dirs: Vec = if let Some(root) = project_root { + let mut dirs = Vec::new(); + let mut cursor = dir.clone(); + loop { + dirs.push(cursor.clone()); + if cursor == root { + break; + } + let Some(parent) = cursor.parent() else { + break; + }; + cursor = parent; + } + dirs.reverse(); + dirs + } else { + vec![dir] + }; + + let mut found: Vec = Vec::new(); + let candidate_filenames = self.candidate_filenames(); + for d in search_dirs { + for name in &candidate_filenames { + let candidate = d.join(name); + match fs.get_metadata(&candidate, /*sandbox*/ None).await { + Ok(md) if md.is_file => { + found.push(candidate); + break; + } + Ok(_) => {} + Err(err) if err.kind() == io::ErrorKind::NotFound => continue, + Err(err) => return Err(err), + } + } + } + + Ok(found) + } + + fn candidate_filenames(&self) -> Vec<&str> { + let mut names: Vec<&str> = + Vec::with_capacity(2 + self.config.project_doc_fallback_filenames.len()); + names.push(LOCAL_AGENTS_MD_FILENAME); + names.push(DEFAULT_AGENTS_MD_FILENAME); + for candidate in &self.config.project_doc_fallback_filenames { + let candidate = candidate.as_str(); + if candidate.is_empty() { + continue; + } + if !names.contains(&candidate) { + names.push(candidate); + } + } + names + } +} + +#[cfg(test)] +#[path = "agents_md_tests.rs"] +mod tests; diff --git a/codex-rs/core/src/project_doc_tests.rs b/codex-rs/core/src/agents_md_tests.rs similarity index 61% rename from codex-rs/core/src/project_doc_tests.rs rename to codex-rs/core/src/agents_md_tests.rs index c8caf2ff9a..012724b43e 100644 --- a/codex-rs/core/src/project_doc_tests.rs +++ b/codex-rs/core/src/agents_md_tests.rs @@ -11,11 +11,15 @@ use std::path::PathBuf; use tempfile::TempDir; async fn get_user_instructions(config: &Config) -> Option { - super::get_user_instructions_with_fs(config, LOCAL_FS.as_ref()).await + AgentsMdManager::new(config) + .user_instructions_with_fs(LOCAL_FS.as_ref()) + .await } -async fn discover_project_doc_paths(config: &Config) -> std::io::Result> { - super::discover_project_doc_paths(config, LOCAL_FS.as_ref()).await +async fn agents_md_paths(config: &Config) -> std::io::Result> { + AgentsMdManager::new(config) + .agents_md_paths(LOCAL_FS.as_ref()) + .await } /// Helper that returns a `Config` pointing at `root` and using `limit` as @@ -101,7 +105,9 @@ async fn no_environment_returns_none() { let tmp = tempfile::tempdir().expect("tempdir"); let config = make_config(&tmp, /*limit*/ 4096, Some("user instructions")).await; - let res = super::get_user_instructions(&config, /*environment*/ None).await; + let res = AgentsMdManager::new(&config) + .user_instructions(/*environment*/ None) + .await; assert_eq!(res, None); } @@ -187,10 +193,9 @@ async fn zero_byte_limit_disables_discovery() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "something").unwrap(); - let discovery = - discover_project_doc_paths(&make_config(&tmp, /*limit*/ 0, /*instructions*/ None).await) - .await - .expect("discover paths"); + let discovery = agents_md_paths(&make_config(&tmp, /*limit*/ 0, /*instructions*/ None).await) + .await + .expect("discover paths"); assert_eq!(discovery, Vec::::new()); } @@ -205,7 +210,7 @@ async fn js_repl_instructions_are_appended_when_enabled() { let res = get_user_instructions(&cfg) .await .expect("js_repl instructions expected"); - let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`."; + let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Raw MCP image blocks can request the same behavior by returning `_meta: { \"codex/imageDetail\": \"original\" }` on the image content item.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`."; assert_eq!(res, expected); } @@ -224,33 +229,14 @@ async fn js_repl_tools_only_instructions_are_feature_gated() { let res = get_user_instructions(&cfg) .await .expect("js_repl instructions expected"); - let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Do not call tools directly; use `js_repl` + `codex.tool(...)` for all tool calls, including shell commands.\n- MCP tools (if any) can also be called by name via `codex.tool(...)`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`."; + let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Raw MCP image blocks can request the same behavior by returning `_meta: { \"codex/imageDetail\": \"original\" }` on the image content item.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Do not call tools directly; use `js_repl` + `codex.tool(...)` for all tool calls, including shell commands.\n- MCP tools (if any) can also be called by name via `codex.tool(...)`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`."; assert_eq!(res, expected); } -#[tokio::test] -async fn js_repl_image_detail_original_does_not_change_instructions() { - let tmp = tempfile::tempdir().expect("tempdir"); - let mut cfg = make_config(&tmp, /*limit*/ 4096, /*instructions*/ None).await; - let mut features = cfg.features.get().clone(); - features - .enable(Feature::JsRepl) - .enable(Feature::ImageDetailOriginal); - cfg.features - .set(features) - .expect("test config should allow js_repl image detail settings"); - - let res = get_user_instructions(&cfg) - .await - .expect("js_repl instructions expected"); - let expected = "## JavaScript REPL (Node)\n- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`."; - assert_eq!(res, expected); -} - -/// When both system instructions *and* a project doc are present the two +/// When both system instructions and AGENTS.md docs are present the two /// should be concatenated with the separator. #[tokio::test] -async fn merges_existing_instructions_with_project_doc() { +async fn merges_existing_instructions_with_agents_md() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "proj doc").unwrap(); @@ -260,12 +246,12 @@ async fn merges_existing_instructions_with_project_doc() { .await .expect("should produce a combined instruction string"); - let expected = format!("{INSTRUCTIONS}{PROJECT_DOC_SEPARATOR}{}", "proj doc"); + let expected = format!("{INSTRUCTIONS}{AGENTS_MD_SEPARATOR}{}", "proj doc"); assert_eq!(res, expected); } -/// If there are existing system instructions but the project doc is +/// If there are existing system instructions but AGENTS.md docs are /// missing we expect the original instructions to be returned unchanged. #[tokio::test] async fn keeps_existing_instructions_when_doc_missing() { @@ -326,9 +312,7 @@ async fn project_root_markers_are_honored_for_agents_discovery() { .await; cfg.cwd = nested.abs(); - let discovery = discover_project_doc_paths(&cfg) - .await - .expect("discover paths"); + let discovery = agents_md_paths(&cfg).await.expect("discover paths"); let expected_parent = AbsolutePathBuf::try_from( dunce::canonicalize(root.path().join("AGENTS.md")).expect("canonical parent doc path"), ) @@ -345,12 +329,33 @@ async fn project_root_markers_are_honored_for_agents_discovery() { assert_eq!(res, "parent doc\n\nchild doc"); } +#[tokio::test] +async fn instruction_sources_include_global_before_agents_md_docs() { + let tmp = tempfile::tempdir().expect("tempdir"); + fs::write(tmp.path().join("AGENTS.md"), "project doc").unwrap(); + + let cfg = make_config(&tmp, /*limit*/ 4096, Some("global doc")).await; + let global_agents = cfg.codex_home.join(DEFAULT_AGENTS_MD_FILENAME); + fs::create_dir_all(&cfg.codex_home).unwrap(); + fs::write(&global_agents, "global doc").unwrap(); + + let sources = AgentsMdManager::new(&cfg) + .instruction_sources(LOCAL_FS.as_ref()) + .await; + let project_agents = AbsolutePathBuf::try_from( + dunce::canonicalize(cfg.cwd.join("AGENTS.md")).expect("canonical project doc path"), + ) + .expect("absolute project doc path"); + + assert_eq!(sources, vec![global_agents, project_agents]); +} + /// AGENTS.override.md is preferred over AGENTS.md when both are present. #[tokio::test] async fn agents_local_md_preferred() { let tmp = tempfile::tempdir().expect("tempdir"); - fs::write(tmp.path().join(DEFAULT_PROJECT_DOC_FILENAME), "versioned").unwrap(); - fs::write(tmp.path().join(LOCAL_PROJECT_DOC_FILENAME), "local").unwrap(); + fs::write(tmp.path().join(DEFAULT_AGENTS_MD_FILENAME), "versioned").unwrap(); + fs::write(tmp.path().join(LOCAL_AGENTS_MD_FILENAME), "local").unwrap(); let cfg = make_config(&tmp, /*limit*/ 4096, /*instructions*/ None).await; @@ -360,13 +365,11 @@ async fn agents_local_md_preferred() { assert_eq!(res, "local"); - let discovery = discover_project_doc_paths(&cfg) - .await - .expect("discover paths"); + let discovery = agents_md_paths(&cfg).await.expect("discover paths"); assert_eq!(discovery.len(), 1); assert_eq!( discovery[0].file_name().unwrap().to_string_lossy(), - LOCAL_PROJECT_DOC_FILENAME + LOCAL_AGENTS_MD_FILENAME ); } @@ -412,16 +415,14 @@ async fn agents_md_preferred_over_fallbacks() { assert_eq!(res, "primary"); - let discovery = discover_project_doc_paths(&cfg) - .await - .expect("discover paths"); + let discovery = agents_md_paths(&cfg).await.expect("discover paths"); assert_eq!(discovery.len(), 1); assert!( discovery[0] .file_name() .unwrap() .to_string_lossy() - .eq(DEFAULT_PROJECT_DOC_FILENAME) + .eq(DEFAULT_AGENTS_MD_FILENAME) ); } @@ -435,9 +436,7 @@ async fn agents_md_directory_is_ignored() { let res = get_user_instructions(&cfg).await; assert_eq!(res, None); - let discovery = discover_project_doc_paths(&cfg) - .await - .expect("discover paths"); + let discovery = agents_md_paths(&cfg).await.expect("discover paths"); assert_eq!(discovery, Vec::::new()); } @@ -460,17 +459,15 @@ async fn agents_md_special_file_is_ignored() { let res = get_user_instructions(&cfg).await; assert_eq!(res, None); - let discovery = discover_project_doc_paths(&cfg) - .await - .expect("discover paths"); + let discovery = agents_md_paths(&cfg).await.expect("discover paths"); assert_eq!(discovery, Vec::::new()); } #[tokio::test] async fn override_directory_falls_back_to_agents_md_file() { let tmp = tempfile::tempdir().expect("tempdir"); - fs::create_dir(tmp.path().join(LOCAL_PROJECT_DOC_FILENAME)).unwrap(); - fs::write(tmp.path().join(DEFAULT_PROJECT_DOC_FILENAME), "primary").unwrap(); + fs::create_dir(tmp.path().join(LOCAL_AGENTS_MD_FILENAME)).unwrap(); + fs::write(tmp.path().join(DEFAULT_AGENTS_MD_FILENAME), "primary").unwrap(); let cfg = make_config(&tmp, /*limit*/ 4096, /*instructions*/ None).await; @@ -479,27 +476,25 @@ async fn override_directory_falls_back_to_agents_md_file() { .expect("AGENTS.md should be used when override is a directory"); assert_eq!(res, "primary"); - let discovery = discover_project_doc_paths(&cfg) - .await - .expect("discover paths"); + let discovery = agents_md_paths(&cfg).await.expect("discover paths"); assert_eq!(discovery.len(), 1); assert_eq!( discovery[0] .file_name() .expect("file name") .to_string_lossy(), - DEFAULT_PROJECT_DOC_FILENAME + DEFAULT_AGENTS_MD_FILENAME ); } #[tokio::test] -async fn skills_are_not_appended_to_project_doc() { +async fn skills_are_not_appended_to_agents_md() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "base doc").unwrap(); let cfg = make_config(&tmp, /*limit*/ 4096, /*instructions*/ None).await; create_skill( - cfg.codex_home.clone(), + cfg.codex_home.to_path_buf(), "pdf-processing", "extract from pdfs", ); @@ -523,7 +518,7 @@ async fn apps_feature_does_not_emit_user_instructions_by_itself() { } #[tokio::test] -async fn apps_feature_does_not_append_to_project_doc_user_instructions() { +async fn apps_feature_does_not_append_to_agents_md_user_instructions() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "base doc").unwrap(); diff --git a/codex-rs/core/src/apply_patch.rs b/codex-rs/core/src/apply_patch.rs index 0810d319b8..1bce68a988 100644 --- a/codex-rs/core/src/apply_patch.rs +++ b/codex-rs/core/src/apply_patch.rs @@ -18,16 +18,13 @@ pub(crate) enum InternalApplyPatchInvocation { /// The `apply_patch` call was approved, either automatically because it /// appears that it should be allowed based on the user's sandbox policy - /// *or* because the user explicitly approved it. In either case, we use - /// exec with [`codex_apply_patch::CODEX_CORE_APPLY_PATCH_ARG1`] to realize - /// the `apply_patch` call, - /// but [`ApplyPatchExec::auto_approved`] is used to determine the sandbox - /// used with the `exec()`. - DelegateToExec(ApplyPatchExec), + /// *or* because the user explicitly approved it. The runtime realizes the + /// patch through the selected environment filesystem. + DelegateToRuntime(ApplyPatchRuntimeInvocation), } #[derive(Debug)] -pub(crate) struct ApplyPatchExec { +pub(crate) struct ApplyPatchRuntimeInvocation { pub(crate) action: ApplyPatchAction, pub(crate) auto_approved: bool, pub(crate) exec_approval_requirement: ExecApprovalRequirement, @@ -49,7 +46,7 @@ pub(crate) async fn apply_patch( SafetyCheck::AutoApprove { user_explicitly_approved, .. - } => InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec { + } => InternalApplyPatchInvocation::DelegateToRuntime(ApplyPatchRuntimeInvocation { action, auto_approved: !user_explicitly_approved, exec_approval_requirement: ExecApprovalRequirement::Skip { @@ -61,7 +58,7 @@ pub(crate) async fn apply_patch( // Delegate the approval prompt (including cached approvals) to the // tool runtime, consistent with how shell/unified_exec approvals // are orchestrator-driven. - InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec { + InternalApplyPatchInvocation::DelegateToRuntime(ApplyPatchRuntimeInvocation { action, auto_approved: false, exec_approval_requirement: ExecApprovalRequirement::NeedsApproval { diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index 16f743943a..bd83c81f0d 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -32,6 +32,7 @@ use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; use codex_api::ApiError; +use codex_api::AuthProvider; use codex_api::CompactClient as ApiCompactClient; use codex_api::CompactionInput as ApiCompactionInput; use codex_api::Compression; @@ -83,7 +84,6 @@ use futures::StreamExt; use http::HeaderMap as ApiHeaderMap; use http::HeaderValue; use http::StatusCode as HttpStatusCode; -use http::header::AUTHORIZATION; use reqwest::StatusCode; use std::time::Duration; use std::time::Instant; @@ -277,16 +277,7 @@ pub(crate) struct RealtimeWebrtcCallStart { /// `api.openai.com` sideband path. fn sideband_websocket_auth_headers(api_auth: &CoreAuthProvider) -> ApiHeaderMap { let mut headers = ApiHeaderMap::new(); - if let Some(token) = api_auth.token.as_ref() - && let Ok(value) = HeaderValue::from_str(&format!("Bearer {token}")) - { - headers.insert(AUTHORIZATION, value); - } - if let Some(account_id) = api_auth.account_id.as_ref() - && let Ok(value) = HeaderValue::from_str(account_id) - { - headers.insert("ChatGPT-Account-ID", value); - } + api_auth.add_auth_headers(&mut headers); headers } diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index 33bcd014b2..83f0628e4c 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -1,11 +1,9 @@ use std::collections::HashMap; use std::collections::HashSet; use std::fmt::Debug; -use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicU64; -use std::time::Duration; use std::time::SystemTime; use std::time::UNIX_EPOCH; @@ -15,70 +13,46 @@ use crate::agent::Mailbox; use crate::agent::MailboxReceiver; use crate::agent::agent_status_from_event; use crate::agent::status::is_final; +use crate::agent_identity::AgentIdentityManager; +use crate::agent_identity::RegisteredAgentTask; use crate::apps::render_apps_section; use crate::commit_attribution::commit_message_trailer_instruction; use crate::compact; -use crate::compact::InitialContextInjection; -use crate::compact::run_inline_auto_compact_task; -use crate::compact::should_use_remote_compact_task; -use crate::compact_remote::run_inline_remote_auto_compact_task; use crate::config::ManagedFeatures; use crate::connectors; use crate::exec_policy::ExecPolicyManager; use crate::installation_id::resolve_installation_id; -use crate::mcp_tool_exposure::build_mcp_tool_exposure; use crate::parse_turn_item; use crate::path_utils::normalize_for_native_workdir; use crate::realtime_conversation::RealtimeConversationManager; -use crate::realtime_conversation::handle_audio as handle_realtime_conversation_audio; -use crate::realtime_conversation::handle_close as handle_realtime_conversation_close; -use crate::realtime_conversation::handle_start as handle_realtime_conversation_start; -use crate::realtime_conversation::handle_text as handle_realtime_conversation_text; use crate::render_skills_section; use crate::rollout::find_thread_name_by_id; use crate::session_prefix::format_subagent_notification_message; use crate::skills_load_input_from_config; -use crate::stream_events_utils::HandleOutputCtx; -use crate::stream_events_utils::handle_non_tool_response_item; -use crate::stream_events_utils::handle_output_item_done; -use crate::stream_events_utils::last_assistant_message_from_item; -use crate::stream_events_utils::raw_assistant_output_text_from_item; -use crate::stream_events_utils::record_completed_response_item; use crate::turn_metadata::TurnMetadataState; -use crate::util::error_or_panic; use async_channel::Receiver; use async_channel::Sender; use chrono::Local; use chrono::Utc; use codex_analytics::AnalyticsEventsClient; -use codex_analytics::AppInvocation; -use codex_analytics::CompactionPhase; -use codex_analytics::CompactionReason; -use codex_analytics::InvocationType; use codex_analytics::SubAgentThreadStartedInput; -use codex_analytics::build_track_events_context; use codex_app_server_protocol::AuthMode; use codex_app_server_protocol::McpServerElicitationRequest; use codex_app_server_protocol::McpServerElicitationRequestParams; use codex_config::types::OAuthCredentialsStoreMode; use codex_exec_server::Environment; use codex_exec_server::EnvironmentManager; +use codex_exec_server::FileSystemSandboxContext; use codex_features::FEATURES; use codex_features::Feature; use codex_features::unstable_features_warning_event; -use codex_hooks::HookEvent; -use codex_hooks::HookEventAfterAgent; -use codex_hooks::HookPayload; -use codex_hooks::HookResult; use codex_hooks::Hooks; use codex_hooks::HooksConfig; use codex_login::AuthManager; use codex_login::CodexAuth; use codex_login::auth_env_telemetry::collect_auth_env_telemetry; use codex_login::default_client::originator; -use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; use codex_mcp::McpConnectionManager; -use codex_mcp::SandboxState; use codex_mcp::ToolInfo; use codex_mcp::codex_apps_tools_cache_key; #[cfg(test)] @@ -92,6 +66,7 @@ use codex_otel::current_span_trace_id; use codex_otel::current_span_w3c_trace_context; use codex_otel::set_parent_from_w3c_trace_context; use codex_protocol::ThreadId; +use codex_protocol::ToolName; use codex_protocol::approvals::ElicitationRequestEvent; use codex_protocol::approvals::ExecPolicyAmendment; use codex_protocol::approvals::NetworkPolicyAmendment; @@ -102,10 +77,8 @@ use codex_protocol::config_types::Settings; use codex_protocol::config_types::WebSearchMode; use codex_protocol::dynamic_tools::DynamicToolResponse; use codex_protocol::dynamic_tools::DynamicToolSpec; -use codex_protocol::items::PlanItem; use codex_protocol::items::TurnItem; use codex_protocol::items::UserMessageItem; -use codex_protocol::items::build_hook_prompt_message; use codex_protocol::mcp::CallToolResult; use codex_protocol::models::BaseInstructions; use codex_protocol::models::PermissionProfile; @@ -135,20 +108,15 @@ use codex_protocol::request_permissions::RequestPermissionsResponse; use codex_protocol::request_user_input::RequestUserInputArgs; use codex_protocol::request_user_input::RequestUserInputResponse; use codex_rmcp_client::ElicitationResponse; +use codex_rollout::RolloutConfig; use codex_rollout::state_db; use codex_shell_command::parse_command::parse_command; use codex_terminal_detection::user_agent; -use codex_tools::filter_tool_suggest_discoverable_tools_for_client; +use codex_thread_store::LocalThreadStore; use codex_utils_output_truncation::TruncationPolicy; -use codex_utils_stream_parser::AssistantTextChunk; -use codex_utils_stream_parser::AssistantTextStreamParser; -use codex_utils_stream_parser::ProposedPlanSegment; -use codex_utils_stream_parser::extract_proposed_plan_text; -use codex_utils_stream_parser::strip_citations; use futures::future::BoxFuture; use futures::future::Shared; use futures::prelude::*; -use futures::stream::FuturesOrdered; use rmcp::model::ListResourceTemplatesResult; use rmcp::model::ListResourcesResult; use rmcp::model::PaginatedRequestParams; @@ -165,21 +133,14 @@ use tokio_util::sync::CancellationToken; use toml::Value as TomlValue; use tracing::Instrument; use tracing::debug; -use tracing::debug_span; use tracing::error; -use tracing::field; use tracing::info; use tracing::info_span; use tracing::instrument; -use tracing::trace; -use tracing::trace_span; use tracing::warn; use uuid::Uuid; use crate::client::ModelClient; -use crate::client::ModelClientSession; -use crate::client_common::Prompt; -use crate::client_common::ResponseEvent; use crate::codex_thread::ThreadConfigSnapshot; use crate::compact::collect_user_messages; use crate::config::Config; @@ -191,6 +152,7 @@ use crate::config::resolve_web_search_mode_for_turn; use crate::context_manager::ContextManager; use crate::context_manager::TotalTokenUsageBreakdown; use crate::environment_context::EnvironmentContext; +use crate::thread_rollout_truncation::initial_history_has_prior_user_turns; use codex_config::CONFIG_TOML_FILE; use codex_config::types::McpServerConfig; use codex_config::types::ShellEnvironmentPolicy; @@ -200,7 +162,36 @@ use codex_protocol::error::Result as CodexResult; #[cfg(test)] use codex_protocol::exec_output::StreamOutput; +mod handlers; +mod mcp; +mod review; mod rollout_reconstruction; +mod session; +mod turn; +mod turn_context; +#[cfg(test)] +use self::handlers::submission_dispatch_span; +use self::handlers::submission_loop; +use self::review::spawn_review_thread; +pub(crate) use self::session::AppServerClientMetadata; +pub(crate) use self::session::Session; +pub(crate) use self::session::SessionConfiguration; +pub(crate) use self::session::SessionSettingsUpdate; +#[cfg(test)] +use self::turn::AssistantMessageStreamParsers; +pub(crate) use self::turn::build_prompt; +pub(crate) use self::turn::built_tools; +#[cfg(test)] +use self::turn::collect_explicit_app_ids_from_skill_items; +#[cfg(test)] +use self::turn::explicitly_enabled_connectors_missing_from_tools; +#[cfg(test)] +use self::turn::filter_connectors_for_input; +pub(crate) use self::turn::get_last_assistant_message_from_turn; +use self::turn::realtime_text_for_event; +pub(crate) use self::turn::run_turn; +pub(crate) use self::turn_context::TurnContext; +pub(crate) use self::turn_context::TurnSkillsContext; #[cfg(test)] mod rollout_reconstruction_tests; @@ -256,40 +247,18 @@ pub(crate) struct PreviousTurnSettings { } use crate::SkillError; -use crate::SkillInjections; use crate::SkillLoadOutcome; use crate::SkillMetadata; use crate::SkillsManager; -use crate::build_skill_injections; -use crate::collect_env_var_dependencies; -use crate::collect_explicit_skill_mentions; +use crate::agents_md::AgentsMdManager; use crate::exec_policy::ExecPolicyUpdateError; -use crate::feedback_tags; use crate::guardian::GuardianReviewSessionManager; -use crate::hook_runtime::PendingInputHookDisposition; -use crate::hook_runtime::inspect_pending_input; -use crate::hook_runtime::record_additional_contexts; -use crate::hook_runtime::record_pending_input; -use crate::hook_runtime::run_pending_session_start_hooks; -use crate::hook_runtime::run_user_prompt_submit_hooks; -use crate::injection::ToolMentionKind; -use crate::injection::app_id_from_path; -use crate::injection::tool_kind_for_path; use crate::instructions::UserInstructions; use crate::mcp::McpManager; -use crate::mcp_skill_dependencies::maybe_prompt_and_install_mcp_dependencies; use crate::memories; -use crate::mentions::build_connector_slug_counts; -use crate::mentions::build_skill_name_counts; -use crate::mentions::collect_explicit_app_ids; -use crate::mentions::collect_explicit_plugin_mentions; -use crate::mentions::collect_tool_mentions_from_messages; use crate::network_policy_decision::execpolicy_network_rule_amendment; use crate::plugins::PluginsManager; -use crate::plugins::build_plugin_injections; use crate::plugins::render_plugins_section; -use crate::project_doc::get_user_instructions; -use crate::resolve_skill_dependencies_for_turn; use crate::rollout::RolloutRecorder; use crate::rollout::RolloutRecorderParams; use crate::rollout::map_session_init_error; @@ -304,28 +273,26 @@ use crate::state::ActiveTurn; use crate::state::MailboxDeliveryPhase; use crate::state::SessionServices; use crate::state::SessionState; +#[cfg(test)] +use crate::stream_events_utils::HandleOutputCtx; +#[cfg(test)] +use crate::stream_events_utils::handle_output_item_done; use crate::tasks::GhostSnapshotTask; use crate::tasks::ReviewTask; use crate::tasks::SessionTask; use crate::tasks::SessionTaskContext; -use crate::tools::ToolRouter; -use crate::tools::context::SharedTurnDiffTracker; use crate::tools::js_repl::JsReplHandle; use crate::tools::js_repl::resolve_compatible_node; use crate::tools::network_approval::NetworkApprovalService; use crate::tools::network_approval::build_blocked_request_observer; use crate::tools::network_approval::build_network_policy_decider; +#[cfg(test)] use crate::tools::parallel::ToolCallRuntime; -use crate::tools::router::ToolRouterParams; use crate::tools::sandboxing::ApprovalStore; -use crate::turn_diff_tracker::TurnDiffTracker; use crate::turn_timing::TurnTimingState; use crate::turn_timing::record_turn_ttfm_metric; -use crate::turn_timing::record_turn_ttft_metric; use crate::unified_exec::UnifiedExecProcessManager; -use crate::util::backoff; use crate::windows_sandbox::WindowsSandboxLevelExt; -use codex_async_utils::OrCancelExt; use codex_git_utils::get_git_repo_root; use codex_mcp::compute_auth_statuses; use codex_mcp::with_codex_apps_mcp; @@ -339,12 +306,9 @@ use codex_protocol::config_types::ServiceTier; use codex_protocol::config_types::WindowsSandboxLevel; use codex_protocol::models::ContentItem; use codex_protocol::models::DeveloperInstructions; -use codex_protocol::models::MessagePhase; use codex_protocol::models::ResponseInputItem; use codex_protocol::models::ResponseItem; use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; -use codex_protocol::protocol::AgentMessageContentDeltaEvent; -use codex_protocol::protocol::AgentReasoningSectionBreakEvent; use codex_protocol::protocol::ApplyPatchApprovalRequestEvent; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::BackgroundEventEvent; @@ -362,10 +326,7 @@ use codex_protocol::protocol::ModelRerouteReason; use codex_protocol::protocol::NetworkApprovalContext; use codex_protocol::protocol::NonSteerableTurnKind; use codex_protocol::protocol::Op; -use codex_protocol::protocol::PlanDeltaEvent; use codex_protocol::protocol::RateLimitSnapshot; -use codex_protocol::protocol::ReasoningContentDeltaEvent; -use codex_protocol::protocol::ReasoningRawContentDeltaEvent; use codex_protocol::protocol::RequestUserInputEvent; use codex_protocol::protocol::ReviewDecision; use codex_protocol::protocol::SandboxPolicy; @@ -381,7 +342,6 @@ use codex_protocol::protocol::Submission; use codex_protocol::protocol::TokenCountEvent; use codex_protocol::protocol::TokenUsage; use codex_protocol::protocol::TokenUsageInfo; -use codex_protocol::protocol::TurnDiffEvent; use codex_protocol::protocol::WarningEvent; use codex_protocol::user_input::UserInput; use codex_tools::ToolsConfig; @@ -389,13 +349,8 @@ use codex_tools::ToolsConfigParams; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_readiness::Readiness; use codex_utils_readiness::ReadinessFlag; - -fn image_generation_tool_auth_allowed(auth_manager: Option<&AuthManager>) -> bool { - matches!( - auth_manager.and_then(AuthManager::auth_mode), - Some(AuthMode::Chatgpt) - ) -} +#[cfg(test)] +use codex_utils_stream_parser::ProposedPlanSegment; /// The high-level interface to the Codex system. /// It operates as a queue pair where you send submissions and receive events. @@ -446,7 +401,6 @@ pub(crate) const INITIAL_SUBMIT_ID: &str = ""; pub(crate) const SUBMISSION_CHANNEL_CAPACITY: usize = 512; const CYBER_VERIFY_URL: &str = "https://chatgpt.com/cyber"; const CYBER_SAFETY_URL: &str = "https://developers.openai.com/codex/concepts/cyber-safety"; -const EXPLICIT_APPS_READY_TIMEOUT: Duration = Duration::from_secs(3); impl Codex { /// Spawn a new [`Codex`] and initialize the session. pub(crate) async fn spawn(args: CodexSpawnArgs) -> CodexResult { @@ -498,10 +452,17 @@ impl Codex { let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); let (tx_event, rx_event) = async_channel::unbounded(); - let plugin_outcome = plugins_manager.plugins_for_config(&config); + let environment = environment_manager + .current() + .await + .map_err(|err| CodexErr::Fatal(format!("failed to create environment: {err}")))?; + let fs = environment + .as_ref() + .map(|environment| environment.get_filesystem()); + let plugin_outcome = plugins_manager.plugins_for_config(&config).await; let effective_skill_roots = plugin_outcome.effective_skill_roots(); let skills_input = skills_load_input_from_config(&config, effective_skill_roots); - let loaded_skills = skills_manager.skills_for_config(&skills_input); + let loaded_skills = skills_manager.skills_for_config(&skills_input, fs).await; for err in &loaded_skills.errors { error!( @@ -546,11 +507,9 @@ impl Codex { config.startup_warnings.push(message); } - let environment = environment_manager - .current() - .await - .map_err(|err| CodexErr::Fatal(format!("failed to create environment: {err}")))?; - let user_instructions = get_user_instructions(&config, environment.as_deref()).await; + let user_instructions = AgentsMdManager::new(&config) + .user_instructions(environment.as_deref()) + .await; let exec_policy = if crate::guardian::is_guardian_reviewer_source(&session_source) { // Guardian review should rely on the built-in shell safety checks, @@ -744,6 +703,17 @@ impl Codex { Ok(()) } + /// Persist a thread-level memory mode update for the active session. + /// + /// This is a local-only operation that updates rollout metadata directly + /// and does not involve the model. + pub async fn set_thread_memory_mode( + &self, + mode: codex_protocol::protocol::ThreadMemoryMode, + ) -> anyhow::Result<()> { + handlers::persist_thread_memory_mode_update(&self.session, mode).await + } + pub async fn shutdown_and_wait(&self) -> CodexResult<()> { let session_loop_termination = self.session_loop_termination.clone(); match self.submit(Op::Shutdown).await { @@ -822,291 +792,9 @@ pub(crate) fn session_loop_termination_from_handle( .shared() } -/// Context for an initialized model agent -/// -/// A session has at most 1 running task at a time, and can be interrupted by user input. -pub(crate) struct Session { - pub(crate) conversation_id: ThreadId, - tx_event: Sender, - agent_status: watch::Sender, - out_of_band_elicitation_paused: watch::Sender, - state: Mutex, - /// Serializes rebuild/apply cycles for the running proxy; each cycle - /// rebuilds from the current SessionState while holding this lock. - managed_network_proxy_refresh_lock: Mutex<()>, - /// The set of enabled features should be invariant for the lifetime of the - /// session. - features: ManagedFeatures, - pending_mcp_server_refresh_config: Mutex>, - pub(crate) conversation: Arc, - pub(crate) active_turn: Mutex>, - mailbox: Mailbox, - mailbox_rx: Mutex, - idle_pending_input: Mutex>, // TODO (jif) merge with mailbox! - pub(crate) guardian_review_session: GuardianReviewSessionManager, - pub(crate) services: SessionServices, - js_repl: Arc, - next_internal_sub_id: AtomicU64, -} - -#[derive(Clone, Debug)] -pub(crate) struct TurnSkillsContext { - pub(crate) outcome: Arc, - pub(crate) implicit_invocation_seen_skills: Arc>>, -} - -impl TurnSkillsContext { - pub(crate) fn new(outcome: Arc) -> Self { - Self { - outcome, - implicit_invocation_seen_skills: Arc::new(Mutex::new(HashSet::new())), - } - } -} - -/// The context needed for a single turn of the thread. -#[derive(Debug)] -pub(crate) struct TurnContext { - pub(crate) sub_id: String, - pub(crate) trace_id: Option, - pub(crate) realtime_active: bool, - pub(crate) config: Arc, - pub(crate) auth_manager: Option>, - pub(crate) model_info: ModelInfo, - pub(crate) session_telemetry: SessionTelemetry, - pub(crate) provider: ModelProviderInfo, - pub(crate) reasoning_effort: Option, - pub(crate) reasoning_summary: ReasoningSummaryConfig, - pub(crate) session_source: SessionSource, - pub(crate) environment: Option>, - /// The session's absolute working directory. All relative paths provided - /// by the model as well as sandbox policies are resolved against this path - /// instead of `std::env::current_dir()`. - pub(crate) cwd: AbsolutePathBuf, - pub(crate) current_date: Option, - pub(crate) timezone: Option, - pub(crate) app_server_client_name: Option, - pub(crate) developer_instructions: Option, - pub(crate) compact_prompt: Option, - pub(crate) user_instructions: Option, - pub(crate) collaboration_mode: CollaborationMode, - pub(crate) personality: Option, - pub(crate) approval_policy: Constrained, - pub(crate) sandbox_policy: Constrained, - pub(crate) file_system_sandbox_policy: FileSystemSandboxPolicy, - pub(crate) network_sandbox_policy: NetworkSandboxPolicy, - pub(crate) network: Option, - pub(crate) windows_sandbox_level: WindowsSandboxLevel, - pub(crate) shell_environment_policy: ShellEnvironmentPolicy, - pub(crate) tools_config: ToolsConfig, - pub(crate) features: ManagedFeatures, - pub(crate) ghost_snapshot: GhostSnapshotConfig, - pub(crate) final_output_json_schema: Option, - pub(crate) codex_self_exe: Option, - pub(crate) codex_linux_sandbox_exe: Option, - pub(crate) tool_call_gate: Arc, - pub(crate) truncation_policy: TruncationPolicy, - pub(crate) js_repl: Arc, - pub(crate) dynamic_tools: Vec, - pub(crate) turn_metadata_state: Arc, - pub(crate) turn_skills: TurnSkillsContext, - pub(crate) turn_timing_state: Arc, -} -impl TurnContext { - pub(crate) fn model_context_window(&self) -> Option { - let effective_context_window_percent = self.model_info.effective_context_window_percent; - self.model_info.context_window.map(|context_window| { - context_window.saturating_mul(effective_context_window_percent) / 100 - }) - } - - pub(crate) fn apps_enabled(&self) -> bool { - let is_chatgpt_auth = self - .auth_manager - .as_deref() - .and_then(AuthManager::auth_cached) - .as_ref() - .is_some_and(CodexAuth::is_chatgpt_auth); - self.features.apps_enabled_for_auth(is_chatgpt_auth) - } - - pub(crate) async fn with_model(&self, model: String, models_manager: &ModelsManager) -> Self { - let mut config = (*self.config).clone(); - config.model = Some(model.clone()); - let model_info = models_manager - .get_model_info(model.as_str(), &config.to_models_manager_config()) - .await; - let truncation_policy = model_info.truncation_policy.into(); - let supported_reasoning_levels = model_info - .supported_reasoning_levels - .iter() - .map(|preset| preset.effort) - .collect::>(); - let reasoning_effort = if let Some(current_reasoning_effort) = self.reasoning_effort { - if supported_reasoning_levels.contains(¤t_reasoning_effort) { - Some(current_reasoning_effort) - } else { - supported_reasoning_levels - .get(supported_reasoning_levels.len().saturating_sub(1) / 2) - .copied() - .or(model_info.default_reasoning_level) - } - } else { - supported_reasoning_levels - .get(supported_reasoning_levels.len().saturating_sub(1) / 2) - .copied() - .or(model_info.default_reasoning_level) - }; - config.model_reasoning_effort = reasoning_effort; - - let collaboration_mode = self.collaboration_mode.with_updates( - Some(model.clone()), - Some(reasoning_effort), - /*developer_instructions*/ None, - ); - let features = self.features.clone(); - let tools_config = ToolsConfig::new(&ToolsConfigParams { - model_info: &model_info, - available_models: &models_manager - .list_models(RefreshStrategy::OnlineIfUncached) - .await, - features: &features, - image_generation_tool_auth_allowed: image_generation_tool_auth_allowed( - self.auth_manager.as_deref(), - ), - web_search_mode: self.tools_config.web_search_mode, - session_source: self.session_source.clone(), - sandbox_policy: self.sandbox_policy.get(), - windows_sandbox_level: self.windows_sandbox_level, - }) - .with_unified_exec_shell_mode(self.tools_config.unified_exec_shell_mode.clone()) - .with_web_search_config(self.tools_config.web_search_config.clone()) - .with_allow_login_shell(self.tools_config.allow_login_shell) - .with_has_environment(self.tools_config.has_environment) - .with_spawn_agent_usage_hint(config.multi_agent_v2.usage_hint_enabled) - .with_spawn_agent_usage_hint_text(config.multi_agent_v2.usage_hint_text.clone()) - .with_hide_spawn_agent_metadata(config.multi_agent_v2.hide_spawn_agent_metadata) - .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( - &config.agent_roles, - )); - - Self { - sub_id: self.sub_id.clone(), - trace_id: self.trace_id.clone(), - realtime_active: self.realtime_active, - config: Arc::new(config), - auth_manager: self.auth_manager.clone(), - model_info: model_info.clone(), - session_telemetry: self - .session_telemetry - .clone() - .with_model(model.as_str(), model_info.slug.as_str()), - provider: self.provider.clone(), - reasoning_effort, - reasoning_summary: self.reasoning_summary, - session_source: self.session_source.clone(), - environment: self.environment.clone(), - cwd: self.cwd.clone(), - current_date: self.current_date.clone(), - timezone: self.timezone.clone(), - app_server_client_name: self.app_server_client_name.clone(), - developer_instructions: self.developer_instructions.clone(), - compact_prompt: self.compact_prompt.clone(), - user_instructions: self.user_instructions.clone(), - collaboration_mode, - personality: self.personality, - approval_policy: self.approval_policy.clone(), - sandbox_policy: self.sandbox_policy.clone(), - file_system_sandbox_policy: self.file_system_sandbox_policy.clone(), - network_sandbox_policy: self.network_sandbox_policy, - network: self.network.clone(), - windows_sandbox_level: self.windows_sandbox_level, - shell_environment_policy: self.shell_environment_policy.clone(), - tools_config, - features, - ghost_snapshot: self.ghost_snapshot.clone(), - final_output_json_schema: self.final_output_json_schema.clone(), - codex_self_exe: self.codex_self_exe.clone(), - codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(), - tool_call_gate: Arc::new(ReadinessFlag::new()), - truncation_policy, - js_repl: Arc::clone(&self.js_repl), - dynamic_tools: self.dynamic_tools.clone(), - turn_metadata_state: self.turn_metadata_state.clone(), - turn_skills: self.turn_skills.clone(), - turn_timing_state: Arc::clone(&self.turn_timing_state), - } - } - - pub(crate) fn resolve_path(&self, path: Option) -> AbsolutePathBuf { - path.as_ref() - .map_or_else(|| self.cwd.clone(), |path| self.cwd.join(path)) - } - - pub(crate) fn compact_prompt(&self) -> &str { - self.compact_prompt - .as_deref() - .unwrap_or(compact::SUMMARIZATION_PROMPT) - } - - pub(crate) fn to_turn_context_item(&self) -> TurnContextItem { - TurnContextItem { - turn_id: Some(self.sub_id.clone()), - trace_id: self.trace_id.clone(), - cwd: self.cwd.to_path_buf(), - current_date: self.current_date.clone(), - timezone: self.timezone.clone(), - approval_policy: self.approval_policy.value(), - sandbox_policy: self.sandbox_policy.get().clone(), - network: self.turn_context_network_item(), - model: self.model_info.slug.clone(), - personality: self.personality, - collaboration_mode: Some(self.collaboration_mode.clone()), - realtime_active: Some(self.realtime_active), - effort: self.reasoning_effort, - summary: self.reasoning_summary, - user_instructions: self.user_instructions.clone(), - developer_instructions: self.developer_instructions.clone(), - final_output_json_schema: self.final_output_json_schema.clone(), - truncation_policy: Some(self.truncation_policy), - } - } - - fn turn_context_network_item(&self) -> Option { - let network = self - .config - .config_layer_stack - .requirements() - .network - .as_ref()?; - Some(TurnContextNetworkItem { - allowed_domains: network - .domains - .as_ref() - .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) - .unwrap_or_default(), - denied_domains: network - .domains - .as_ref() - .and_then(codex_config::NetworkDomainPermissionsToml::denied_domains) - .unwrap_or_default(), - }) - } -} - -fn local_time_context() -> (String, String) { - match iana_time_zone::get_timezone() { - Ok(timezone) => (Local::now().format("%Y-%m-%d").to_string(), timezone), - Err(_) => ( - Utc::now().format("%Y-%m-%d").to_string(), - "Etc/UTC".to_string(), - ), - } -} - async fn thread_title_from_state_db( state_db: Option<&state_db::StateDbHandle>, - codex_home: &Path, + codex_home: &AbsolutePathBuf, conversation_id: ThreadId, ) -> Option { if let Some(metadata) = state_db @@ -1124,176 +812,6 @@ async fn thread_title_from_state_db( .flatten() } -#[derive(Clone)] -pub(crate) struct SessionConfiguration { - /// Provider identifier ("openai", "openrouter", ...). - provider: ModelProviderInfo, - - collaboration_mode: CollaborationMode, - model_reasoning_summary: Option, - service_tier: Option, - - /// Developer instructions that supplement the base instructions. - developer_instructions: Option, - - /// Model instructions that are appended to the base instructions. - user_instructions: Option, - - /// Personality preference for the model. - personality: Option, - - /// Base instructions for the session. - base_instructions: String, - - /// Compact prompt override. - compact_prompt: Option, - - /// When to escalate for approval for execution - approval_policy: Constrained, - approvals_reviewer: ApprovalsReviewer, - /// How to sandbox commands executed in the system - sandbox_policy: Constrained, - file_system_sandbox_policy: FileSystemSandboxPolicy, - network_sandbox_policy: NetworkSandboxPolicy, - windows_sandbox_level: WindowsSandboxLevel, - - /// Absolute working directory that should be treated as the *root* of the - /// session. All relative paths supplied by the model as well as the - /// execution sandbox are resolved against this directory **instead** of - /// the process-wide current working directory. - cwd: AbsolutePathBuf, - /// Directory containing all Codex state for this session. - codex_home: PathBuf, - /// Optional user-facing name for the thread, updated during the session. - thread_name: Option, - - // TODO(pakrym): Remove config from here - original_config_do_not_use: Arc, - /// Optional service name tag for session metrics. - metrics_service_name: Option, - app_server_client_name: Option, - app_server_client_version: Option, - /// Source of the session (cli, vscode, exec, mcp, ...) - session_source: SessionSource, - dynamic_tools: Vec, - persist_extended_history: bool, - inherited_shell_snapshot: Option>, - user_shell_override: Option, -} - -impl SessionConfiguration { - pub(crate) fn codex_home(&self) -> &PathBuf { - &self.codex_home - } - - fn thread_config_snapshot(&self) -> ThreadConfigSnapshot { - ThreadConfigSnapshot { - model: self.collaboration_mode.model().to_string(), - model_provider_id: self.original_config_do_not_use.model_provider_id.clone(), - service_tier: self.service_tier, - approval_policy: self.approval_policy.value(), - approvals_reviewer: self.approvals_reviewer, - sandbox_policy: self.sandbox_policy.get().clone(), - cwd: self.cwd.to_path_buf(), - ephemeral: self.original_config_do_not_use.ephemeral, - reasoning_effort: self.collaboration_mode.reasoning_effort(), - personality: self.personality, - session_source: self.session_source.clone(), - } - } - - pub(crate) fn apply(&self, updates: &SessionSettingsUpdate) -> ConstraintResult { - let mut next_configuration = self.clone(); - let file_system_policy_matches_legacy = self.file_system_sandbox_policy - == FileSystemSandboxPolicy::from_legacy_sandbox_policy( - self.sandbox_policy.get(), - &self.cwd, - ); - if let Some(collaboration_mode) = updates.collaboration_mode.clone() { - next_configuration.collaboration_mode = collaboration_mode; - } - if let Some(summary) = updates.reasoning_summary { - next_configuration.model_reasoning_summary = Some(summary); - } - if let Some(service_tier) = updates.service_tier { - next_configuration.service_tier = service_tier; - } - if let Some(personality) = updates.personality { - next_configuration.personality = Some(personality); - } - if let Some(approval_policy) = updates.approval_policy { - next_configuration.approval_policy.set(approval_policy)?; - } - if let Some(approvals_reviewer) = updates.approvals_reviewer { - next_configuration.approvals_reviewer = approvals_reviewer; - } - let mut sandbox_policy_changed = false; - if let Some(sandbox_policy) = updates.sandbox_policy.clone() { - next_configuration.sandbox_policy.set(sandbox_policy)?; - next_configuration.network_sandbox_policy = - NetworkSandboxPolicy::from(next_configuration.sandbox_policy.get()); - sandbox_policy_changed = true; - } - if let Some(windows_sandbox_level) = updates.windows_sandbox_level { - next_configuration.windows_sandbox_level = windows_sandbox_level; - } - - let absolute_cwd = updates - .cwd - .as_ref() - .map(|cwd| { - AbsolutePathBuf::relative_to_current_dir(normalize_for_native_workdir( - cwd.as_path(), - )) - .unwrap_or_else(|e| { - warn!("failed to normalize update cwd: {cwd:?}: {e}"); - self.cwd.clone() - }) - }) - .unwrap_or_else(|| self.cwd.clone()); - - let cwd_changed = absolute_cwd.as_path() != self.cwd.as_path(); - next_configuration.cwd = absolute_cwd; - if sandbox_policy_changed || (cwd_changed && file_system_policy_matches_legacy) { - // Preserve richer split policies across cwd-only updates; only - // rederive when the session is already using the legacy bridge. - next_configuration.file_system_sandbox_policy = - FileSystemSandboxPolicy::from_legacy_sandbox_policy( - next_configuration.sandbox_policy.get(), - &next_configuration.cwd, - ); - } - if let Some(app_server_client_name) = updates.app_server_client_name.clone() { - next_configuration.app_server_client_name = Some(app_server_client_name); - } - if let Some(app_server_client_version) = updates.app_server_client_version.clone() { - next_configuration.app_server_client_version = Some(app_server_client_version); - } - Ok(next_configuration) - } -} - -#[derive(Default, Clone)] -pub(crate) struct SessionSettingsUpdate { - pub(crate) cwd: Option, - pub(crate) approval_policy: Option, - pub(crate) approvals_reviewer: Option, - pub(crate) sandbox_policy: Option, - pub(crate) windows_sandbox_level: Option, - pub(crate) collaboration_mode: Option, - pub(crate) reasoning_summary: Option, - pub(crate) service_tier: Option>, - pub(crate) final_output_json_schema: Option>, - pub(crate) personality: Option, - pub(crate) app_server_client_name: Option, - pub(crate) app_server_client_version: Option, -} - -pub(crate) struct AppServerClientMetadata { - pub(crate) client_name: Option, - pub(crate) client_version: Option, -} - impl Session { pub(crate) async fn app_server_client_metadata(&self) -> AppServerClientMetadata { let state = self.state.lock().await; @@ -1306,6 +824,10 @@ impl Session { } } + fn managed_network_proxy_active_for_sandbox_policy(sandbox_policy: &SandboxPolicy) -> bool { + !matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) + } + /// Builds the `x-codex-beta-features` header value for this session. /// /// `ModelClient` is session-scoped and intentionally does not depend on the full `Config`, so @@ -1413,39 +935,7 @@ impl Session { } } - /// Don't expand the number of mutated arguments on config. We are in the process of getting rid of it. - pub(crate) fn build_per_turn_config(session_configuration: &SessionConfiguration) -> Config { - // todo(aibrahim): store this state somewhere else so we don't need to mut config - let config = session_configuration.original_config_do_not_use.clone(); - let mut per_turn_config = (*config).clone(); - per_turn_config.cwd = session_configuration.cwd.clone(); - per_turn_config.model_reasoning_effort = - session_configuration.collaboration_mode.reasoning_effort(); - per_turn_config.model_reasoning_summary = session_configuration.model_reasoning_summary; - per_turn_config.service_tier = session_configuration.service_tier; - per_turn_config.personality = session_configuration.personality; - per_turn_config.approvals_reviewer = session_configuration.approvals_reviewer; - let resolved_web_search_mode = resolve_web_search_mode_for_turn( - &per_turn_config.web_search_mode, - session_configuration.sandbox_policy.get(), - ); - if let Err(err) = per_turn_config - .web_search_mode - .set(resolved_web_search_mode) - { - let fallback_value = per_turn_config.web_search_mode.value(); - tracing::warn!( - error = %err, - ?resolved_web_search_mode, - ?fallback_value, - "resolved web_search_mode is disallowed by requirements; keeping constrained value" - ); - } - per_turn_config.features = config.features.clone(); - per_turn_config - } - - pub(crate) async fn codex_home(&self) -> PathBuf { + pub(crate) async fn codex_home(&self) -> AbsolutePathBuf { let state = self.state.lock().await; state.session_configuration.codex_home().clone() } @@ -1481,745 +971,137 @@ impl Session { }); } - #[allow(clippy::too_many_arguments)] - fn make_turn_context( - conversation_id: ThreadId, - auth_manager: Option>, - session_telemetry: &SessionTelemetry, - provider: ModelProviderInfo, - session_configuration: &SessionConfiguration, - user_shell: &shell::Shell, - shell_zsh_path: Option<&PathBuf>, - main_execve_wrapper_exe: Option<&PathBuf>, - per_turn_config: Config, - model_info: ModelInfo, - models_manager: &ModelsManager, - network: Option, - environment: Option>, - sub_id: String, - js_repl: Arc, - skills_outcome: Arc, - ) -> TurnContext { - let reasoning_effort = session_configuration.collaboration_mode.reasoning_effort(); - let reasoning_summary = session_configuration - .model_reasoning_summary - .unwrap_or(model_info.default_reasoning_summary); - let session_telemetry = session_telemetry.clone().with_model( - session_configuration.collaboration_mode.model(), - model_info.slug.as_str(), - ); - let session_source = session_configuration.session_source.clone(); - let image_generation_tool_auth_allowed = - image_generation_tool_auth_allowed(auth_manager.as_deref()); - let auth_manager_for_context = auth_manager; - let provider_for_context = provider; - let session_telemetry_for_context = session_telemetry; - let tools_config = ToolsConfig::new(&ToolsConfigParams { - model_info: &model_info, - available_models: &models_manager.try_list_models().unwrap_or_default(), - features: &per_turn_config.features, - image_generation_tool_auth_allowed, - web_search_mode: Some(per_turn_config.web_search_mode.value()), - session_source: session_source.clone(), - sandbox_policy: session_configuration.sandbox_policy.get(), - windows_sandbox_level: session_configuration.windows_sandbox_level, - }) - .with_unified_exec_shell_mode_for_session( - crate::tools::spec::tool_user_shell_type(user_shell), - shell_zsh_path, - main_execve_wrapper_exe, - ) - .with_web_search_config(per_turn_config.web_search_config.clone()) - .with_allow_login_shell(per_turn_config.permissions.allow_login_shell) - .with_has_environment(environment.is_some()) - .with_spawn_agent_usage_hint(per_turn_config.multi_agent_v2.usage_hint_enabled) - .with_spawn_agent_usage_hint_text(per_turn_config.multi_agent_v2.usage_hint_text.clone()) - .with_hide_spawn_agent_metadata(per_turn_config.multi_agent_v2.hide_spawn_agent_metadata) - .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( - &per_turn_config.agent_roles, - )); - - let cwd = session_configuration.cwd.clone(); - - let per_turn_config = Arc::new(per_turn_config); - let turn_metadata_state = Arc::new(TurnMetadataState::new( - conversation_id.to_string(), - sub_id.clone(), - cwd.to_path_buf(), - session_configuration.sandbox_policy.get(), - session_configuration.windows_sandbox_level, - )); - let (current_date, timezone) = local_time_context(); - TurnContext { - sub_id, - trace_id: current_span_trace_id(), - realtime_active: false, - config: per_turn_config.clone(), - auth_manager: auth_manager_for_context, - model_info: model_info.clone(), - session_telemetry: session_telemetry_for_context, - provider: provider_for_context, - reasoning_effort, - reasoning_summary, - session_source, - environment, - cwd, - current_date: Some(current_date), - timezone: Some(timezone), - app_server_client_name: session_configuration.app_server_client_name.clone(), - developer_instructions: session_configuration.developer_instructions.clone(), - compact_prompt: session_configuration.compact_prompt.clone(), - user_instructions: session_configuration.user_instructions.clone(), - collaboration_mode: session_configuration.collaboration_mode.clone(), - personality: session_configuration.personality, - approval_policy: session_configuration.approval_policy.clone(), - sandbox_policy: session_configuration.sandbox_policy.clone(), - file_system_sandbox_policy: session_configuration.file_system_sandbox_policy.clone(), - network_sandbox_policy: session_configuration.network_sandbox_policy, - network, - windows_sandbox_level: session_configuration.windows_sandbox_level, - shell_environment_policy: per_turn_config.permissions.shell_environment_policy.clone(), - tools_config, - features: per_turn_config.features.clone(), - ghost_snapshot: per_turn_config.ghost_snapshot.clone(), - final_output_json_schema: None, - codex_self_exe: per_turn_config.codex_self_exe.clone(), - codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(), - tool_call_gate: Arc::new(ReadinessFlag::new()), - truncation_policy: model_info.truncation_policy.into(), - js_repl, - dynamic_tools: session_configuration.dynamic_tools.clone(), - turn_metadata_state, - turn_skills: TurnSkillsContext::new(skills_outcome), - turn_timing_state: Arc::new(TurnTimingState::default()), + fn start_agent_identity_registration(self: &Arc) { + if !self.services.agent_identity_manager.is_enabled() { + return; } + + let weak_sess = Arc::downgrade(self); + let mut auth_state_rx = self.services.auth_manager.subscribe_auth_state(); + tokio::spawn(async move { + loop { + let Some(sess) = weak_sess.upgrade() else { + return; + }; + match sess + .services + .agent_identity_manager + .ensure_registered_identity() + .await + { + Ok(Some(_)) => return, + Ok(None) => { + drop(sess); + if auth_state_rx.changed().await.is_err() { + return; + } + } + Err(error) => { + sess.fail_agent_identity_registration(error).await; + return; + } + } + } + }); } - #[instrument(name = "session_init", level = "info", skip_all)] - #[allow(clippy::too_many_arguments)] - async fn new( - mut session_configuration: SessionConfiguration, - config: Arc, - auth_manager: Arc, - models_manager: Arc, - exec_policy: Arc, - tx_event: Sender, - agent_status: watch::Sender, - initial_history: InitialHistory, - session_source: SessionSource, - skills_manager: Arc, - plugins_manager: Arc, - mcp_manager: Arc, - skills_watcher: Arc, - agent_control: AgentControl, - environment: Option>, - analytics_events_client: Option, - ) -> anyhow::Result> { - debug!( - "Configuring session: model={}; provider={:?}", - session_configuration.collaboration_mode.model(), - session_configuration.provider + async fn fail_agent_identity_registration(self: &Arc, error: anyhow::Error) { + warn!(error = %error, "agent identity registration failed"); + let message = format!( + "Agent identity registration failed while `features.use_agent_identity` is enabled: {error}" ); - let forked_from_id = initial_history.forked_from_id(); - - let (conversation_id, rollout_params) = match &initial_history { - InitialHistory::New | InitialHistory::Cleared | InitialHistory::Forked(_) => { - let conversation_id = ThreadId::default(); - ( - conversation_id, - RolloutRecorderParams::new( - conversation_id, - forked_from_id, - session_source, - BaseInstructions { - text: session_configuration.base_instructions.clone(), - }, - session_configuration.dynamic_tools.clone(), - if session_configuration.persist_extended_history { - EventPersistenceMode::Extended - } else { - EventPersistenceMode::Limited - }, - ), - ) - } - InitialHistory::Resumed(resumed_history) => ( - resumed_history.conversation_id, - RolloutRecorderParams::resume( - resumed_history.rollout_path.clone(), - if session_configuration.persist_extended_history { - EventPersistenceMode::Extended - } else { - EventPersistenceMode::Limited - }, - ), - ), - }; - let window_generation = match &initial_history { - InitialHistory::Resumed(resumed_history) => u64::try_from( - resumed_history - .history - .iter() - .filter(|item| matches!(item, RolloutItem::Compacted(_))) - .count(), - ) - .unwrap_or(u64::MAX), - InitialHistory::New | InitialHistory::Cleared | InitialHistory::Forked(_) => 0, - }; - let state_builder = match &initial_history { - InitialHistory::Resumed(resumed) => metadata::builder_from_items( - resumed.history.as_slice(), - resumed.rollout_path.as_path(), - ), - InitialHistory::New | InitialHistory::Cleared | InitialHistory::Forked(_) => None, - }; - - // Kick off independent async setup tasks in parallel to reduce startup latency. - // - // - initialize RolloutRecorder with new or resumed session info - // - perform default shell discovery - // - load history metadata (skipped for subagents) - let rollout_fut = async { - if config.ephemeral { - Ok::<_, anyhow::Error>((None, None)) - } else { - let state_db_ctx = state_db::init(&config).await; - let rollout_recorder = RolloutRecorder::new( - &config, - rollout_params, - state_db_ctx.clone(), - state_builder.clone(), - ) - .await?; - Ok((Some(rollout_recorder), state_db_ctx)) - } - } - .instrument(info_span!( - "session_init.rollout", - otel.name = "session_init.rollout", - session_init.ephemeral = config.ephemeral, - )); - - let is_subagent = matches!( - session_configuration.session_source, - SessionSource::SubAgent(_) - ); - let history_meta_fut = async { - if is_subagent { - (0, 0) - } else { - crate::message_history::history_metadata(&config).await - } - } - .instrument(info_span!( - "session_init.history_metadata", - otel.name = "session_init.history_metadata", - session_init.is_subagent = is_subagent, - )); - let auth_manager_clone = Arc::clone(&auth_manager); - let config_for_mcp = Arc::clone(&config); - let mcp_manager_for_mcp = Arc::clone(&mcp_manager); - let auth_and_mcp_fut = async move { - let auth = auth_manager_clone.auth().await; - let mcp_servers = mcp_manager_for_mcp.effective_servers(&config_for_mcp, auth.as_ref()); - let auth_statuses = compute_auth_statuses( - mcp_servers.iter(), - config_for_mcp.mcp_oauth_credentials_store_mode, - ) - .await; - (auth, mcp_servers, auth_statuses) - } - .instrument(info_span!( - "session_init.auth_mcp", - otel.name = "session_init.auth_mcp", - )); - - // Join all independent futures. - let ( - rollout_recorder_and_state_db, - (history_log_id, history_entry_count), - (auth, mcp_servers, auth_statuses), - ) = tokio::join!(rollout_fut, history_meta_fut, auth_and_mcp_fut); - - let (rollout_recorder, state_db_ctx) = rollout_recorder_and_state_db.map_err(|e| { - error!("failed to initialize rollout recorder: {e:#}"); - e - })?; - let rollout_path = rollout_recorder - .as_ref() - .map(|rec| rec.rollout_path().to_path_buf()); - - let mut post_session_configured_events = Vec::::new(); - - for usage in config.features.legacy_feature_usages() { - post_session_configured_events.push(Event { - id: INITIAL_SUBMIT_ID.to_owned(), - msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent { - summary: usage.summary.clone(), - details: usage.details.clone(), - }), - }); - } - if crate::config::uses_deprecated_instructions_file(&config.config_layer_stack) { - post_session_configured_events.push(Event { - id: INITIAL_SUBMIT_ID.to_owned(), - msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent { - summary: "`experimental_instructions_file` is deprecated and ignored. Use `model_instructions_file` instead." - .to_string(), - details: Some( - "Move the setting to `model_instructions_file` in config.toml (or under a profile) to load instructions from a file." - .to_string(), - ), - }), - }); - } - for message in &config.startup_warnings { - post_session_configured_events.push(Event { - id: "".to_owned(), - msg: EventMsg::Warning(WarningEvent { - message: message.clone(), - }), - }); - } - let config_path = config.codex_home.join(CONFIG_TOML_FILE); - if let Some(event) = unstable_features_warning_event( - config - .config_layer_stack - .effective_config() - .get("features") - .and_then(TomlValue::as_table), - config.suppress_unstable_features_warning, - &config.features, - &config_path.display().to_string(), - ) { - post_session_configured_events.push(event); - } - if config.permissions.approval_policy.value() == AskForApproval::OnFailure { - post_session_configured_events.push(Event { - id: "".to_owned(), - msg: EventMsg::Warning(WarningEvent { - message: "`on-failure` approval policy is deprecated and will be removed in a future release. Use `on-request` for interactive approvals or `never` for non-interactive runs.".to_string(), - }), - }); - } - - let auth = auth.as_ref(); - let auth_mode = auth.map(CodexAuth::auth_mode).map(TelemetryAuthMode::from); - let account_id = auth.and_then(CodexAuth::get_account_id); - let account_email = auth.and_then(CodexAuth::get_account_email); - let originator = originator().value; - let terminal_type = user_agent(); - let session_model = session_configuration.collaboration_mode.model().to_string(); - let auth_env_telemetry = collect_auth_env_telemetry( - &session_configuration.provider, - auth_manager.codex_api_key_env_enabled(), - ); - let mut session_telemetry = SessionTelemetry::new( - conversation_id, - session_model.as_str(), - session_model.as_str(), - account_id.clone(), - account_email.clone(), - auth_mode, - originator.clone(), - config.otel.log_user_prompt, - terminal_type.clone(), - session_configuration.session_source.clone(), - ) - .with_auth_env(auth_env_telemetry.to_otel_metadata()); - if let Some(service_name) = session_configuration.metrics_service_name.as_deref() { - session_telemetry = session_telemetry.with_metrics_service_name(service_name); - } - let network_proxy_audit_metadata = NetworkProxyAuditMetadata { - conversation_id: Some(conversation_id.to_string()), - app_version: Some(env!("CARGO_PKG_VERSION").to_string()), - user_account_id: account_id, - auth_mode: auth_mode.map(|mode| mode.to_string()), - originator: Some(originator), - user_email: account_email, - terminal_type: Some(terminal_type), - model: Some(session_model.clone()), - slug: Some(session_model), - }; - config.features.emit_metrics(&session_telemetry); - session_telemetry.counter( - THREAD_STARTED_METRIC, - /*inc*/ 1, - &[( - "is_git", - if get_git_repo_root(&session_configuration.cwd).is_some() { - "true" - } else { - "false" - }, - )], - ); - - session_telemetry.conversation_starts( - config.model_provider.name.as_str(), - session_configuration.collaboration_mode.reasoning_effort(), - config - .model_reasoning_summary - .unwrap_or(ReasoningSummaryConfig::Auto), - config.model_context_window, - config.model_auto_compact_token_limit, - config.permissions.approval_policy.value(), - config.permissions.sandbox_policy.get().clone(), - mcp_servers.keys().map(String::as_str).collect(), - config.active_profile.clone(), - ); - - let use_zsh_fork_shell = config.features.enabled(Feature::ShellZshFork); - let mut default_shell = if let Some(user_shell_override) = - session_configuration.user_shell_override.clone() - { - user_shell_override - } else if use_zsh_fork_shell { - let zsh_path = config.zsh_path.as_ref().ok_or_else(|| { - anyhow::anyhow!( - "zsh fork feature enabled, but `zsh_path` is not configured; set `zsh_path` in config.toml" - ) - })?; - let zsh_path = zsh_path.to_path_buf(); - shell::get_shell(shell::ShellType::Zsh, Some(&zsh_path)).ok_or_else(|| { - anyhow::anyhow!( - "zsh fork feature enabled, but zsh_path `{}` is not usable; set `zsh_path` to a valid zsh executable", - zsh_path.display() - ) - })? - } else { - shell::default_user_shell() - }; - // Create the mutable state for the Session. - let shell_snapshot_tx = if config.features.enabled(Feature::ShellSnapshot) { - if let Some(snapshot) = session_configuration.inherited_shell_snapshot.clone() { - let (tx, rx) = watch::channel(Some(snapshot)); - default_shell.shell_snapshot = rx; - tx - } else { - ShellSnapshot::start_snapshotting( - config.codex_home.clone(), - conversation_id, - session_configuration.cwd.to_path_buf(), - &mut default_shell, - session_telemetry.clone(), - ) - } - } else { - let (tx, rx) = watch::channel(None); - default_shell.shell_snapshot = rx; - tx - }; - let thread_name = - thread_title_from_state_db(state_db_ctx.as_ref(), &config.codex_home, conversation_id) - .instrument(info_span!( - "session_init.thread_name_lookup", - otel.name = "session_init.thread_name_lookup", - )) - .await; - session_configuration.thread_name = thread_name.clone(); - let state = SessionState::new(session_configuration.clone()); - let managed_network_requirements_enabled = config.managed_network_requirements_enabled(); - let network_approval = Arc::new(NetworkApprovalService::default()); - // The managed proxy can call back into core for allowlist-miss decisions. - let network_policy_decider_session = if managed_network_requirements_enabled { - config - .permissions - .network - .as_ref() - .map(|_| Arc::new(RwLock::new(std::sync::Weak::::new()))) - } else { - None - }; - let blocked_request_observer = if managed_network_requirements_enabled { - config - .permissions - .network - .as_ref() - .map(|_| build_blocked_request_observer(Arc::clone(&network_approval))) - } else { - None - }; - let network_policy_decider = - network_policy_decider_session - .as_ref() - .map(|network_policy_decider_session| { - build_network_policy_decider( - Arc::clone(&network_approval), - Arc::clone(network_policy_decider_session), - ) - }); - let (network_proxy, session_network_proxy) = - if let Some(spec) = config.permissions.network.as_ref() { - let current_exec_policy = exec_policy.current(); - let (network_proxy, session_network_proxy) = Self::start_managed_network_proxy( - spec, - current_exec_policy.as_ref(), - config.permissions.sandbox_policy.get(), - network_policy_decider.as_ref().map(Arc::clone), - blocked_request_observer.as_ref().map(Arc::clone), - managed_network_requirements_enabled, - network_proxy_audit_metadata, - ) - .instrument(info_span!( - "session_init.network_proxy", - otel.name = "session_init.network_proxy", - session_init.managed_network_requirements_enabled = - managed_network_requirements_enabled, - )) - .await?; - (Some(network_proxy), Some(session_network_proxy)) - } else { - (None, None) - }; - - let mut hook_shell_argv = - default_shell.derive_exec_args("", /*use_login_shell*/ false); - let hook_shell_program = hook_shell_argv.remove(0); - let _ = hook_shell_argv.pop(); - let hooks = Hooks::new(HooksConfig { - legacy_notify_argv: config.notify.clone(), - feature_enabled: config.features.enabled(Feature::CodexHooks), - config_layer_stack: Some(config.config_layer_stack.clone()), - shell_program: Some(hook_shell_program), - shell_args: hook_shell_argv, - }); - for warning in hooks.startup_warnings() { - post_session_configured_events.push(Event { - id: INITIAL_SUBMIT_ID.to_owned(), - msg: EventMsg::Warning(WarningEvent { - message: warning.clone(), - }), - }); - } - - let installation_id = resolve_installation_id(&config.codex_home).await?; - let analytics_events_client = analytics_events_client.unwrap_or_else(|| { - AnalyticsEventsClient::new( - Arc::clone(&auth_manager), - config.chatgpt_base_url.trim_end_matches('/').to_string(), - config.analytics_enabled, - ) - }); - let services = SessionServices { - // Initialize the MCP connection manager with an uninitialized - // instance. It will be replaced with one created via - // McpConnectionManager::new() once all its constructor args are - // available. This also ensures `SessionConfigured` is emitted - // before any MCP-related events. It is reasonable to consider - // changing this to use Option or OnceCell, though the current - // setup is straightforward enough and performs well. - mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::new_uninitialized( - &config.permissions.approval_policy, - &config.permissions.sandbox_policy, - ))), - mcp_startup_cancellation_token: Mutex::new(CancellationToken::new()), - unified_exec_manager: UnifiedExecProcessManager::new( - config.background_terminal_max_timeout, - ), - shell_zsh_path: config.zsh_path.clone(), - main_execve_wrapper_exe: config.main_execve_wrapper_exe.clone(), - analytics_events_client, - hooks, - rollout: Mutex::new(rollout_recorder), - user_shell: Arc::new(default_shell), - shell_snapshot_tx, - show_raw_agent_reasoning: config.show_raw_agent_reasoning, - exec_policy, - auth_manager: Arc::clone(&auth_manager), - session_telemetry, - models_manager: Arc::clone(&models_manager), - tool_approvals: Mutex::new(ApprovalStore::default()), - guardian_rejections: Mutex::new(HashMap::new()), - skills_manager, - plugins_manager: Arc::clone(&plugins_manager), - mcp_manager: Arc::clone(&mcp_manager), - skills_watcher, - agent_control, - network_proxy, - network_approval: Arc::clone(&network_approval), - state_db: state_db_ctx.clone(), - model_client: ModelClient::new( - Some(Arc::clone(&auth_manager)), - conversation_id, - installation_id, - session_configuration.provider.clone(), - session_configuration.session_source.clone(), - config.model_verbosity, - config.features.enabled(Feature::EnableRequestCompression), - config.features.enabled(Feature::RuntimeMetrics), - Self::build_model_client_beta_features_header(config.as_ref()), - ), - code_mode_service: crate::tools::code_mode::CodeModeService::new( - config.js_repl_node_path.clone(), - ), - environment, - }; - services - .model_client - .set_window_generation(window_generation); - let js_repl = Arc::new(JsReplHandle::with_node_path( - config.js_repl_node_path.clone(), - config.js_repl_node_module_dirs.clone(), - )); - let (out_of_band_elicitation_paused, _out_of_band_elicitation_paused_rx) = - watch::channel(false); - - let (mailbox, mailbox_rx) = Mailbox::new(); - let sess = Arc::new(Session { - conversation_id, - tx_event: tx_event.clone(), - agent_status, - out_of_band_elicitation_paused, - state: Mutex::new(state), - managed_network_proxy_refresh_lock: Mutex::new(()), - features: config.features.clone(), - pending_mcp_server_refresh_config: Mutex::new(None), - conversation: Arc::new(RealtimeConversationManager::new()), - active_turn: Mutex::new(None), - mailbox, - mailbox_rx: Mutex::new(mailbox_rx), - idle_pending_input: Mutex::new(Vec::new()), - guardian_review_session: GuardianReviewSessionManager::default(), - services, - js_repl, - next_internal_sub_id: AtomicU64::new(0), - }); - if let Some(network_policy_decider_session) = network_policy_decider_session { - let mut guard = network_policy_decider_session.write().await; - *guard = Arc::downgrade(&sess); - } - // Dispatch the SessionConfiguredEvent first and then report any errors. - // If resuming, include converted initial messages in the payload so UIs can render them immediately. - let initial_messages = initial_history.get_event_msgs(); - let events = std::iter::once(Event { - id: INITIAL_SUBMIT_ID.to_owned(), - msg: EventMsg::SessionConfigured(SessionConfiguredEvent { - session_id: conversation_id, - forked_from_id, - thread_name: session_configuration.thread_name.clone(), - model: session_configuration.collaboration_mode.model().to_string(), - model_provider_id: config.model_provider_id.clone(), - service_tier: session_configuration.service_tier, - approval_policy: session_configuration.approval_policy.value(), - approvals_reviewer: session_configuration.approvals_reviewer, - sandbox_policy: session_configuration.sandbox_policy.get().clone(), - cwd: session_configuration.cwd.to_path_buf(), - reasoning_effort: session_configuration.collaboration_mode.reasoning_effort(), - history_log_id, - history_entry_count, - initial_messages, - network_proxy: session_network_proxy, - rollout_path, + self.send_event_raw(Event { + id: self.next_internal_sub_id(), + msg: EventMsg::Error(ErrorEvent { + message, + codex_error_info: Some(CodexErrorInfo::Other), }), }) - .chain(post_session_configured_events.into_iter()); - for event in events { - sess.send_event_raw(event).await; - } - - // Start the watcher after SessionConfigured so it cannot emit earlier events. - sess.start_skills_watcher_listener(); - // Construct sandbox_state before MCP startup so it can be sent to each - // MCP server immediately after it becomes ready (avoiding blocking). - let sandbox_state = SandboxState { - sandbox_policy: session_configuration.sandbox_policy.get().clone(), - codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(), - sandbox_cwd: session_configuration.cwd.to_path_buf(), - use_legacy_landlock: config.features.use_legacy_landlock(), - }; - let mut required_mcp_servers: Vec = mcp_servers - .iter() - .filter(|(_, server)| server.enabled && server.required) - .map(|(name, _)| name.clone()) - .collect(); - required_mcp_servers.sort(); - let enabled_mcp_server_count = mcp_servers.values().filter(|server| server.enabled).count(); - let required_mcp_server_count = required_mcp_servers.len(); - let tool_plugin_provenance = mcp_manager.tool_plugin_provenance(config.as_ref()); - { - let mut cancel_guard = sess.services.mcp_startup_cancellation_token.lock().await; - cancel_guard.cancel(); - *cancel_guard = CancellationToken::new(); - } - let (mcp_connection_manager, cancel_token) = McpConnectionManager::new( - &mcp_servers, - config.mcp_oauth_credentials_store_mode, - auth_statuses.clone(), - &session_configuration.approval_policy, - INITIAL_SUBMIT_ID.to_owned(), - tx_event.clone(), - sandbox_state, - config.codex_home.clone(), - codex_apps_tools_cache_key(auth), - tool_plugin_provenance, - ) - .instrument(info_span!( - "session_init.mcp_manager_init", - otel.name = "session_init.mcp_manager_init", - session_init.enabled_mcp_server_count = enabled_mcp_server_count, - session_init.required_mcp_server_count = required_mcp_server_count, - )) .await; - { - let mut manager_guard = sess.services.mcp_connection_manager.write().await; - *manager_guard = mcp_connection_manager; - } - { - let mut cancel_guard = sess.services.mcp_startup_cancellation_token.lock().await; - if cancel_guard.is_cancelled() { - cancel_token.cancel(); - } - *cancel_guard = cancel_token; - } - if !required_mcp_servers.is_empty() { - let failures = sess - .services - .mcp_connection_manager - .read() - .await - .required_startup_failures(&required_mcp_servers) - .instrument(info_span!( - "session_init.required_mcp_wait", - otel.name = "session_init.required_mcp_wait", - session_init.required_mcp_server_count = required_mcp_server_count, - )) - .await; - if !failures.is_empty() { - let details = failures - .iter() - .map(|failure| format!("{}: {}", failure.server, failure.error)) - .collect::>() - .join("; "); - return Err(anyhow::anyhow!( - "required MCP servers failed to initialize: {details}" - )); - } - } - sess.schedule_startup_prewarm(session_configuration.base_instructions.clone()) - .await; - let session_start_source = match &initial_history { - InitialHistory::Resumed(_) => codex_hooks::SessionStartSource::Resume, - InitialHistory::New | InitialHistory::Forked(_) => { - codex_hooks::SessionStartSource::Startup - } - InitialHistory::Cleared => codex_hooks::SessionStartSource::Clear, - }; + } - // record_initial_history can emit events. We record only after the SessionConfiguredEvent is emitted. - sess.record_initial_history(initial_history).await; + async fn cached_agent_task_for_current_binding(&self) -> Option { + let agent_task = { + let state = self.state.lock().await; + state.agent_task() + }?; + + if self + .services + .agent_identity_manager + .task_matches_current_binding(&agent_task) + .await { - let mut state = sess.state.lock().await; - state.set_pending_session_start_source(Some(session_start_source)); + debug!( + agent_runtime_id = %agent_task.agent_runtime_id, + task_id = %agent_task.task_id, + "reusing cached agent task" + ); + return Some(agent_task); } - memories::start_memories_startup_task( - &sess, - Arc::clone(&config), - &session_configuration.session_source, + debug!( + agent_runtime_id = %agent_task.agent_runtime_id, + task_id = %agent_task.task_id, + "discarding cached agent task because auth binding changed" ); + let mut state = self.state.lock().await; + if state.agent_task().as_ref() == Some(&agent_task) { + state.clear_agent_task(); + } + None + } - Ok(sess) + async fn ensure_agent_task_registered(&self) -> anyhow::Result> { + if let Some(agent_task) = self.cached_agent_task_for_current_binding().await { + return Ok(Some(agent_task)); + } + + for _ in 0..2 { + let Some(agent_task) = self.services.agent_identity_manager.register_task().await? + else { + return Ok(None); + }; + + if !self + .services + .agent_identity_manager + .task_matches_current_binding(&agent_task) + .await + { + debug!( + agent_runtime_id = %agent_task.agent_runtime_id, + task_id = %agent_task.task_id, + "discarding newly registered agent task because auth binding changed" + ); + continue; + } + + { + let mut state = self.state.lock().await; + if let Some(existing_agent_task) = state.agent_task() { + if existing_agent_task.has_same_binding(&agent_task) { + return Ok(Some(existing_agent_task)); + } + debug!( + agent_runtime_id = %existing_agent_task.agent_runtime_id, + task_id = %existing_agent_task.task_id, + "replacing cached agent task because auth binding changed" + ); + } + state.set_agent_task(agent_task.clone()); + } + + info!( + thread_id = %self.conversation_id, + agent_runtime_id = %agent_task.agent_runtime_id, + task_id = %agent_task.task_id, + "registered agent task for thread" + ); + return Ok(Some(agent_task)); + } + + Ok(None) } pub(crate) fn get_tx_event(&self) -> Sender { @@ -2299,6 +1181,17 @@ impl Session { state.token_info().map(|info| info.total_token_usage) } + /// Returns the complete token usage snapshot currently cached for this session. + /// + /// Resume and fork reconstruction seed this state from the last persisted rollout + /// `TokenCount` event. Callers that need to replay restored usage to a client + /// should use this accessor instead of `total_token_usage`, because the app-server + /// notification includes both total and last-turn usage. + pub(crate) async fn token_usage_info(&self) -> Option { + let state = self.state.lock().await; + state.token_info() + } + pub(crate) async fn get_estimated_token_count( &self, turn_context: &TurnContext, @@ -2344,6 +1237,11 @@ impl Session { SessionSource::SubAgent(_) ) }; + let has_prior_user_turns = initial_history_has_prior_user_turns(&conversation_history); + { + let mut state = self.state.lock().await; + state.set_next_turn_is_first(!has_prior_user_turns); + } match conversation_history { InitialHistory::New | InitialHistory::Cleared => { // Defer initial context insertion until the first real turn starts so @@ -2458,9 +1356,9 @@ impl Session { fn maybe_refresh_shell_snapshot_for_cwd( &self, - previous_cwd: &Path, - next_cwd: &Path, - codex_home: &Path, + previous_cwd: &AbsolutePathBuf, + next_cwd: &AbsolutePathBuf, + codex_home: &AbsolutePathBuf, session_source: &SessionSource, ) { if previous_cwd == next_cwd { @@ -2479,9 +1377,9 @@ impl Session { } ShellSnapshot::refresh_snapshot( - codex_home.to_path_buf(), + codex_home.clone(), self.conversation_id, - next_cwd.to_path_buf(), + next_cwd.clone(), self.services.user_shell.as_ref().clone(), self.services.shell_snapshot_tx.clone(), self.services.session_telemetry.clone(), @@ -2525,173 +1423,6 @@ impl Session { } } - pub(crate) async fn new_turn_with_sub_id( - &self, - sub_id: String, - updates: SessionSettingsUpdate, - ) -> ConstraintResult> { - let ( - session_configuration, - sandbox_policy_changed, - previous_cwd, - codex_home, - session_source, - ) = { - let mut state = self.state.lock().await; - match state.session_configuration.clone().apply(&updates) { - Ok(next) => { - let previous_cwd = state.session_configuration.cwd.clone(); - let sandbox_policy_changed = - state.session_configuration.sandbox_policy != next.sandbox_policy; - let codex_home = next.codex_home.clone(); - let session_source = next.session_source.clone(); - state.session_configuration = next.clone(); - ( - next, - sandbox_policy_changed, - previous_cwd, - codex_home, - session_source, - ) - } - Err(err) => { - drop(state); - self.send_event_raw(Event { - id: sub_id.clone(), - msg: EventMsg::Error(ErrorEvent { - message: err.to_string(), - codex_error_info: Some(CodexErrorInfo::BadRequest), - }), - }) - .await; - return Err(err); - } - } - }; - - self.maybe_refresh_shell_snapshot_for_cwd( - &previous_cwd, - &session_configuration.cwd, - &codex_home, - &session_source, - ); - - Ok(self - .new_turn_from_configuration( - sub_id, - session_configuration, - updates.final_output_json_schema, - sandbox_policy_changed, - ) - .await) - } - - async fn new_turn_from_configuration( - &self, - sub_id: String, - session_configuration: SessionConfiguration, - final_output_json_schema: Option>, - sandbox_policy_changed: bool, - ) -> Arc { - let per_turn_config = Self::build_per_turn_config(&session_configuration); - { - let mcp_connection_manager = self.services.mcp_connection_manager.read().await; - mcp_connection_manager.set_approval_policy(&session_configuration.approval_policy); - mcp_connection_manager - .set_sandbox_policy(per_turn_config.permissions.sandbox_policy.get()); - } - - if sandbox_policy_changed { - self.refresh_managed_network_proxy_for_current_sandbox_policy() - .await; - let sandbox_state = SandboxState { - sandbox_policy: per_turn_config.permissions.sandbox_policy.get().clone(), - codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(), - sandbox_cwd: per_turn_config.cwd.to_path_buf(), - use_legacy_landlock: per_turn_config.features.use_legacy_landlock(), - }; - if let Err(e) = self - .services - .mcp_connection_manager - .read() - .await - .notify_sandbox_state_change(&sandbox_state) - .await - { - warn!("Failed to notify sandbox state change to MCP servers: {e:#}"); - } - } - - let model_info = self - .services - .models_manager - .get_model_info( - session_configuration.collaboration_mode.model(), - &per_turn_config.to_models_manager_config(), - ) - .await; - let plugin_outcome = self - .services - .plugins_manager - .plugins_for_config(&per_turn_config); - let effective_skill_roots = plugin_outcome.effective_skill_roots(); - let skills_input = skills_load_input_from_config(&per_turn_config, effective_skill_roots); - let skills_outcome = Arc::new( - self.services - .skills_manager - .skills_for_config(&skills_input), - ); - let mut turn_context: TurnContext = Self::make_turn_context( - self.conversation_id, - Some(Arc::clone(&self.services.auth_manager)), - &self.services.session_telemetry, - session_configuration.provider.clone(), - &session_configuration, - self.services.user_shell.as_ref(), - self.services.shell_zsh_path.as_ref(), - self.services.main_execve_wrapper_exe.as_ref(), - per_turn_config, - model_info, - &self.services.models_manager, - self.services - .network_proxy - .as_ref() - .map(StartedNetworkProxy::proxy), - self.services.environment.clone(), - sub_id, - Arc::clone(&self.js_repl), - skills_outcome, - ); - turn_context.realtime_active = self.conversation.running_state().await.is_some(); - - if let Some(final_schema) = final_output_json_schema { - turn_context.final_output_json_schema = final_schema; - } - let turn_context = Arc::new(turn_context); - turn_context.turn_metadata_state.spawn_git_enrichment_task(); - turn_context - } - - pub(crate) async fn maybe_emit_unknown_model_warning_for_turn(&self, tc: &TurnContext) { - if tc.model_info.used_fallback_model_metadata { - self.send_event( - tc, - EventMsg::Warning(WarningEvent { - message: format!( - "Model metadata for `{}` not found. Defaulting to fallback metadata; this can degrade performance and cause issues.", - tc.model_info.slug - ), - }), - ) - .await; - } - } - - pub(crate) async fn new_default_turn(&self) -> Arc { - self.new_default_turn_with_sub_id(self.next_internal_sub_id()) - .await - } - pub(crate) async fn set_session_startup_prewarm( &self, startup_prewarm: SessionStartupPrewarmHandle, @@ -2744,14 +1475,6 @@ impl Session { } }; - let config_toml_path = match AbsolutePathBuf::try_from(config_toml_path) { - Ok(path) => path, - Err(err) => { - warn!("failed to resolve user config path while reloading layer: {err}"); - return; - } - }; - let mut state = self.state.lock().await; let mut config = (*state.session_configuration.original_config_do_not_use).clone(); config.config_layer_stack = config @@ -2762,20 +1485,6 @@ impl Session { self.services.plugins_manager.clear_cache(); } - pub(crate) async fn new_default_turn_with_sub_id(&self, sub_id: String) -> Arc { - let session_configuration = { - let state = self.state.lock().await; - state.session_configuration.clone() - }; - self.new_turn_from_configuration( - sub_id, - session_configuration, - /*final_output_json_schema*/ None, - /*sandbox_policy_changed*/ false, - ) - .await - } - async fn build_settings_update_items( &self, reference_context_item: Option<&TurnContextItem>, @@ -3149,7 +1858,7 @@ impl Session { call_id: String, approval_id: Option, command: Vec, - cwd: PathBuf, + cwd: AbsolutePathBuf, reason: Option, network_approval_context: Option, proposed_execpolicy_amendment: Option, @@ -3337,85 +2046,6 @@ impl Session { rx_response.await.ok() } - pub async fn request_mcp_server_elicitation( - &self, - turn_context: &TurnContext, - request_id: RequestId, - params: McpServerElicitationRequestParams, - ) -> Option { - let server_name = params.server_name.clone(); - let request = match params.request { - McpServerElicitationRequest::Form { - meta, - message, - requested_schema, - } => { - let requested_schema = match serde_json::to_value(requested_schema) { - Ok(requested_schema) => requested_schema, - Err(err) => { - warn!( - "failed to serialize MCP elicitation schema for server_name: {server_name}, request_id: {request_id}: {err:#}" - ); - return None; - } - }; - codex_protocol::approvals::ElicitationRequest::Form { - meta, - message, - requested_schema, - } - } - McpServerElicitationRequest::Url { - meta, - message, - url, - elicitation_id, - } => codex_protocol::approvals::ElicitationRequest::Url { - meta, - message, - url, - elicitation_id, - }, - }; - - let (tx_response, rx_response) = oneshot::channel(); - let prev_entry = { - let mut active = self.active_turn.lock().await; - match active.as_mut() { - Some(at) => { - let mut ts = at.turn_state.lock().await; - ts.insert_pending_elicitation( - server_name.clone(), - request_id.clone(), - tx_response, - ) - } - None => None, - } - }; - if prev_entry.is_some() { - warn!( - "Overwriting existing pending elicitation for server_name: {server_name}, request_id: {request_id}" - ); - } - let id = match request_id { - rmcp::model::NumberOrString::String(value) => { - codex_protocol::mcp::RequestId::String(value.to_string()) - } - rmcp::model::NumberOrString::Number(value) => { - codex_protocol::mcp::RequestId::Integer(value) - } - }; - let event = EventMsg::ElicitationRequest(ElicitationRequestEvent { - turn_id: params.turn_id, - server_name, - id, - request, - }); - self.send_event(turn_context, event).await; - rx_response.await.ok() - } - pub async fn notify_user_input_response( &self, sub_id: &str, @@ -3536,37 +2166,6 @@ impl Session { } } - pub async fn resolve_elicitation( - &self, - server_name: String, - id: RequestId, - response: ElicitationResponse, - ) -> anyhow::Result<()> { - let entry = { - let mut active = self.active_turn.lock().await; - match active.as_mut() { - Some(at) => { - let mut ts = at.turn_state.lock().await; - ts.remove_pending_elicitation(&server_name, &id) - } - None => None, - } - }; - if let Some(tx_response) = entry { - tx_response - .send(response) - .map_err(|e| anyhow::anyhow!("failed to send elicitation response: {e:?}"))?; - return Ok(()); - } - - self.services - .mcp_connection_manager - .read() - .await - .resolve_elicitation(server_name, id, response) - .await - } - /// Records input items: always append to conversation history and /// persist these response items to rollout. pub(crate) async fn record_conversation_items( @@ -3827,7 +2426,8 @@ impl Session { let loaded_plugins = self .services .plugins_manager - .plugins_for_config(&turn_context.config); + .plugins_for_config(&turn_context.config) + .await; if let Some(plugin_section) = render_plugins_section(loaded_plugins.capability_summaries()) { developer_sections.push(plugin_section); @@ -4363,73 +2963,6 @@ impl Session { self.mailbox_rx.lock().await.has_pending() } - pub async fn list_resources( - &self, - server: &str, - params: Option, - ) -> anyhow::Result { - self.services - .mcp_connection_manager - .read() - .await - .list_resources(server, params) - .await - } - - pub async fn list_resource_templates( - &self, - server: &str, - params: Option, - ) -> anyhow::Result { - self.services - .mcp_connection_manager - .read() - .await - .list_resource_templates(server, params) - .await - } - - pub async fn read_resource( - &self, - server: &str, - params: ReadResourceRequestParams, - ) -> anyhow::Result { - self.services - .mcp_connection_manager - .read() - .await - .read_resource(server, params) - .await - } - - pub async fn call_tool( - &self, - server: &str, - tool: &str, - arguments: Option, - meta: Option, - ) -> anyhow::Result { - self.services - .mcp_connection_manager - .read() - .await - .call_tool(server, tool, arguments, meta) - .await - } - - pub(crate) async fn resolve_mcp_tool_info( - &self, - name: &str, - namespace: Option<&str>, - ) -> Option { - self.services - .mcp_connection_manager - .read() - .await - .resolve_tool_info(name, namespace) - .await - } - pub async fn interrupt_task(self: &Arc) { info!("interrupt received: abort current task, if any"); let has_active_turn = { self.active_turn.lock().await.is_some() }; @@ -4468,120 +3001,9 @@ impl Session { state.take_pending_session_start_source() } - async fn refresh_mcp_servers_inner( - &self, - turn_context: &TurnContext, - mcp_servers: HashMap, - store_mode: OAuthCredentialsStoreMode, - ) { - let auth = self.services.auth_manager.auth().await; - let config = self.get_config().await; - let mcp_config = config.to_mcp_config(self.services.plugins_manager.as_ref()); - let tool_plugin_provenance = self - .services - .mcp_manager - .tool_plugin_provenance(config.as_ref()); - let mcp_servers = with_codex_apps_mcp(mcp_servers, auth.as_ref(), &mcp_config); - let auth_statuses = compute_auth_statuses(mcp_servers.iter(), store_mode).await; - let sandbox_state = SandboxState { - sandbox_policy: turn_context.sandbox_policy.get().clone(), - codex_linux_sandbox_exe: turn_context.codex_linux_sandbox_exe.clone(), - sandbox_cwd: turn_context.cwd.to_path_buf(), - use_legacy_landlock: turn_context.features.use_legacy_landlock(), - }; - { - let mut guard = self.services.mcp_startup_cancellation_token.lock().await; - guard.cancel(); - *guard = CancellationToken::new(); - } - let (refreshed_manager, cancel_token) = McpConnectionManager::new( - &mcp_servers, - store_mode, - auth_statuses, - &turn_context.config.permissions.approval_policy, - turn_context.sub_id.clone(), - self.get_tx_event(), - sandbox_state, - config.codex_home.clone(), - codex_apps_tools_cache_key(auth.as_ref()), - tool_plugin_provenance, - ) - .await; - { - let mut guard = self.services.mcp_startup_cancellation_token.lock().await; - if guard.is_cancelled() { - cancel_token.cancel(); - } - *guard = cancel_token; - } - - let mut manager = self.services.mcp_connection_manager.write().await; - *manager = refreshed_manager; - } - - async fn refresh_mcp_servers_if_requested(&self, turn_context: &TurnContext) { - let refresh_config = { self.pending_mcp_server_refresh_config.lock().await.take() }; - let Some(refresh_config) = refresh_config else { - return; - }; - - let McpServerRefreshConfig { - mcp_servers, - mcp_oauth_credentials_store_mode, - } = refresh_config; - - let mcp_servers = - match serde_json::from_value::>(mcp_servers) { - Ok(servers) => servers, - Err(err) => { - warn!("failed to parse MCP server refresh config: {err}"); - return; - } - }; - let store_mode = match serde_json::from_value::( - mcp_oauth_credentials_store_mode, - ) { - Ok(mode) => mode, - Err(err) => { - warn!("failed to parse MCP OAuth refresh config: {err}"); - return; - } - }; - - self.refresh_mcp_servers_inner(turn_context, mcp_servers, store_mode) - .await; - } - - pub(crate) async fn refresh_mcp_servers_now( - &self, - turn_context: &TurnContext, - mcp_servers: HashMap, - store_mode: OAuthCredentialsStoreMode, - ) { - self.refresh_mcp_servers_inner(turn_context, mcp_servers, store_mode) - .await; - } - - #[cfg(test)] - async fn mcp_startup_cancellation_token(&self) -> CancellationToken { - self.services - .mcp_startup_cancellation_token - .lock() - .await - .clone() - } - fn show_raw_agent_reasoning(&self) -> bool { self.services.show_raw_agent_reasoning } - - async fn cancel_mcp_startup(&self) { - self.services - .mcp_startup_cancellation_token - .lock() - .await - .cancel(); - } } pub(crate) fn emit_subagent_session_started( @@ -4617,1335 +3039,9 @@ pub(crate) fn emit_subagent_session_started( }); } -async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiver) { - // To break out of this loop, send Op::Shutdown. - while let Ok(sub) = rx_sub.recv().await { - debug!(?sub, "Submission"); - let dispatch_span = submission_dispatch_span(&sub); - let should_exit = async { - match sub.op.clone() { - Op::Interrupt => { - handlers::interrupt(&sess).await; - false - } - Op::CleanBackgroundTerminals => { - handlers::clean_background_terminals(&sess).await; - false - } - Op::RealtimeConversationStart(params) => { - if let Err(err) = - handle_realtime_conversation_start(&sess, sub.id.clone(), params).await - { - sess.send_event_raw(Event { - id: sub.id.clone(), - msg: EventMsg::Error(ErrorEvent { - message: err.to_string(), - codex_error_info: Some(CodexErrorInfo::Other), - }), - }) - .await; - } - false - } - Op::RealtimeConversationAudio(params) => { - handle_realtime_conversation_audio(&sess, sub.id.clone(), params).await; - false - } - Op::RealtimeConversationText(params) => { - handle_realtime_conversation_text(&sess, sub.id.clone(), params).await; - false - } - Op::RealtimeConversationClose => { - handle_realtime_conversation_close(&sess, sub.id.clone()).await; - false - } - Op::RealtimeConversationListVoices => { - handlers::realtime_conversation_list_voices(&sess, sub.id.clone()).await; - false - } - Op::OverrideTurnContext { - cwd, - approval_policy, - approvals_reviewer, - sandbox_policy, - windows_sandbox_level, - model, - effort, - summary, - service_tier, - collaboration_mode, - personality, - } => { - let collaboration_mode = if let Some(collab_mode) = collaboration_mode { - collab_mode - } else { - let state = sess.state.lock().await; - state.session_configuration.collaboration_mode.with_updates( - model.clone(), - effort, - /*developer_instructions*/ None, - ) - }; - handlers::override_turn_context( - &sess, - sub.id.clone(), - SessionSettingsUpdate { - cwd, - approval_policy, - approvals_reviewer, - sandbox_policy, - windows_sandbox_level, - collaboration_mode: Some(collaboration_mode), - reasoning_summary: summary, - service_tier, - personality, - ..Default::default() - }, - ) - .await; - false - } - Op::UserInput { .. } | Op::UserTurn { .. } => { - handlers::user_input_or_turn(&sess, sub.id.clone(), sub.op).await; - false - } - Op::InterAgentCommunication { communication } => { - handlers::inter_agent_communication(&sess, sub.id.clone(), communication).await; - false - } - Op::ExecApproval { - id: approval_id, - turn_id, - decision, - } => { - handlers::exec_approval(&sess, approval_id, turn_id, decision).await; - false - } - Op::PatchApproval { id, decision } => { - handlers::patch_approval(&sess, id, decision).await; - false - } - Op::UserInputAnswer { id, response } => { - handlers::request_user_input_response(&sess, id, response).await; - false - } - Op::RequestPermissionsResponse { id, response } => { - handlers::request_permissions_response(&sess, id, response).await; - false - } - Op::DynamicToolResponse { id, response } => { - handlers::dynamic_tool_response(&sess, id, response).await; - false - } - Op::AddToHistory { text } => { - handlers::add_to_history(&sess, &config, text).await; - false - } - Op::GetHistoryEntryRequest { offset, log_id } => { - handlers::get_history_entry_request( - &sess, - &config, - sub.id.clone(), - offset, - log_id, - ) - .await; - false - } - Op::ListMcpTools => { - handlers::list_mcp_tools(&sess, &config, sub.id.clone()).await; - false - } - Op::RefreshMcpServers { config } => { - handlers::refresh_mcp_servers(&sess, config).await; - false - } - Op::ReloadUserConfig => { - handlers::reload_user_config(&sess).await; - false - } - Op::ListSkills { cwds, force_reload } => { - handlers::list_skills(&sess, sub.id.clone(), cwds, force_reload).await; - false - } - Op::Undo => { - handlers::undo(&sess, sub.id.clone()).await; - false - } - Op::Compact => { - handlers::compact(&sess, sub.id.clone()).await; - false - } - Op::DropMemories => { - handlers::drop_memories(&sess, &config, sub.id.clone()).await; - false - } - Op::UpdateMemories => { - handlers::update_memories(&sess, &config, sub.id.clone()).await; - false - } - Op::ThreadRollback { num_turns } => { - handlers::thread_rollback(&sess, sub.id.clone(), num_turns).await; - false - } - Op::SetThreadName { name } => { - handlers::set_thread_name(&sess, sub.id.clone(), name).await; - false - } - Op::RunUserShellCommand { command } => { - handlers::run_user_shell_command(&sess, sub.id.clone(), command).await; - false - } - Op::ResolveElicitation { - server_name, - request_id, - decision, - content, - meta, - } => { - handlers::resolve_elicitation( - &sess, - server_name, - request_id, - decision, - content, - meta, - ) - .await; - false - } - Op::Shutdown => handlers::shutdown(&sess, sub.id.clone()).await, - Op::Review { review_request } => { - handlers::review(&sess, &config, sub.id.clone(), review_request).await; - false - } - _ => false, // Ignore unknown ops; enum is non_exhaustive to allow extensions. - } - } - .instrument(dispatch_span) - .await; - if should_exit { - break; - } - } - // Also drain cached guardian state if the submission loop exits because - // the channel closed without receiving an explicit shutdown op. - sess.guardian_review_session.shutdown().await; - debug!("Agent loop exited"); -} - -fn submission_dispatch_span(sub: &Submission) -> tracing::Span { - let op_name = sub.op.kind(); - let span_name = format!("op.dispatch.{op_name}"); - let dispatch_span = match &sub.op { - Op::RealtimeConversationAudio(_) => { - debug_span!( - "submission_dispatch", - otel.name = span_name.as_str(), - submission.id = sub.id.as_str(), - codex.op = op_name - ) - } - _ => info_span!( - "submission_dispatch", - otel.name = span_name.as_str(), - submission.id = sub.id.as_str(), - codex.op = op_name - ), - }; - if let Some(trace) = sub.trace.as_ref() - && !set_parent_from_w3c_trace_context(&dispatch_span, trace) - { - warn!( - submission.id = sub.id.as_str(), - "ignoring invalid submission trace carrier" - ); - } - dispatch_span -} - -/// Operation handlers -mod handlers { - use crate::codex::Session; - use crate::codex::SessionSettingsUpdate; - use crate::codex::SteerInputError; - - use crate::SkillError; - use crate::codex::spawn_review_thread; - use crate::config::Config; - use crate::config_loader::CloudRequirementsLoader; - use crate::config_loader::LoaderOverrides; - use crate::config_loader::load_config_layers_state; - use codex_features::Feature; - use codex_utils_absolute_path::AbsolutePathBuf; - - use crate::review_prompts::resolve_review_request; - use crate::rollout::RolloutRecorder; - use crate::tasks::CompactTask; - use crate::tasks::UndoTask; - use crate::tasks::UserShellCommandMode; - use crate::tasks::UserShellCommandTask; - use crate::tasks::execute_user_shell_command; - use codex_mcp::collect_mcp_snapshot_from_manager; - use codex_mcp::compute_auth_statuses; - use codex_protocol::protocol::CodexErrorInfo; - use codex_protocol::protocol::ErrorEvent; - use codex_protocol::protocol::Event; - use codex_protocol::protocol::EventMsg; - use codex_protocol::protocol::InterAgentCommunication; - use codex_protocol::protocol::ListSkillsResponseEvent; - use codex_protocol::protocol::McpServerRefreshConfig; - use codex_protocol::protocol::Op; - use codex_protocol::protocol::RealtimeConversationListVoicesResponseEvent; - use codex_protocol::protocol::RealtimeVoicesList; - use codex_protocol::protocol::ReviewDecision; - use codex_protocol::protocol::ReviewRequest; - use codex_protocol::protocol::RolloutItem; - use codex_protocol::protocol::SkillsListEntry; - use codex_protocol::protocol::ThreadNameUpdatedEvent; - use codex_protocol::protocol::ThreadRolledBackEvent; - use codex_protocol::protocol::TurnAbortReason; - use codex_protocol::protocol::WarningEvent; - use codex_protocol::request_permissions::RequestPermissionsResponse; - use codex_protocol::request_user_input::RequestUserInputResponse; - - use crate::context_manager::is_user_turn_boundary; - use codex_protocol::config_types::CollaborationMode; - use codex_protocol::config_types::ModeKind; - use codex_protocol::config_types::Settings; - use codex_protocol::dynamic_tools::DynamicToolResponse; - use codex_protocol::items::UserMessageItem; - use codex_protocol::mcp::RequestId as ProtocolRequestId; - use codex_protocol::user_input::UserInput; - use codex_rmcp_client::ElicitationAction; - use codex_rmcp_client::ElicitationResponse; - use serde_json::Value; - use std::path::PathBuf; - use std::sync::Arc; - use tracing::debug; - use tracing::info; - use tracing::warn; - - pub async fn interrupt(sess: &Arc) { - sess.interrupt_task().await; - } - - pub async fn clean_background_terminals(sess: &Arc) { - sess.close_unified_exec_processes().await; - } - - pub async fn realtime_conversation_list_voices(sess: &Session, sub_id: String) { - sess.send_event_raw(Event { - id: sub_id, - msg: EventMsg::RealtimeConversationListVoicesResponse( - RealtimeConversationListVoicesResponseEvent { - voices: RealtimeVoicesList::builtin(), - }, - ), - }) - .await; - } - - pub async fn override_turn_context( - sess: &Session, - sub_id: String, - updates: SessionSettingsUpdate, - ) { - if let Err(err) = sess.update_settings(updates).await { - sess.send_event_raw(Event { - id: sub_id, - msg: EventMsg::Error(ErrorEvent { - message: err.to_string(), - codex_error_info: Some(CodexErrorInfo::BadRequest), - }), - }) - .await; - } - } - - pub async fn user_input_or_turn(sess: &Arc, sub_id: String, op: Op) { - user_input_or_turn_inner( - sess, - sub_id, - op, - /*mirror_user_text_to_realtime*/ Some(()), - ) - .await; - } - - pub(super) async fn user_input_or_turn_inner( - sess: &Arc, - sub_id: String, - op: Op, - mirror_user_text_to_realtime: Option<()>, - ) { - let (items, updates, responsesapi_client_metadata) = match op { - Op::UserTurn { - cwd, - approval_policy, - approvals_reviewer, - sandbox_policy, - model, - effort, - summary, - service_tier, - final_output_json_schema, - items, - collaboration_mode, - personality, - } => { - let collaboration_mode = collaboration_mode.or_else(|| { - Some(CollaborationMode { - mode: ModeKind::Default, - settings: Settings { - model: model.clone(), - reasoning_effort: effort, - developer_instructions: None, - }, - }) - }); - ( - items, - SessionSettingsUpdate { - cwd: Some(cwd), - approval_policy: Some(approval_policy), - approvals_reviewer, - sandbox_policy: Some(sandbox_policy), - windows_sandbox_level: None, - collaboration_mode, - reasoning_summary: summary, - service_tier, - final_output_json_schema: Some(final_output_json_schema), - personality, - app_server_client_name: None, - app_server_client_version: None, - }, - None, - ) - } - Op::UserInput { - items, - final_output_json_schema, - responsesapi_client_metadata, - } => ( - items, - SessionSettingsUpdate { - final_output_json_schema: Some(final_output_json_schema), - ..Default::default() - }, - responsesapi_client_metadata, - ), - _ => unreachable!(), - }; - - let Ok(current_context) = sess.new_turn_with_sub_id(sub_id.clone(), updates).await else { - // new_turn_with_sub_id already emits the error event. - return; - }; - sess.maybe_emit_unknown_model_warning_for_turn(current_context.as_ref()) - .await; - let accepted_items = match sess - .steer_input( - items.clone(), - /*expected_turn_id*/ None, - responsesapi_client_metadata.clone(), - ) - .await - { - Ok(_) => { - current_context.session_telemetry.user_prompt(&items); - Some(items) - } - Err(SteerInputError::NoActiveTurn(items)) => { - if let Some(responsesapi_client_metadata) = responsesapi_client_metadata { - current_context - .turn_metadata_state - .set_responsesapi_client_metadata(responsesapi_client_metadata); - } - current_context.session_telemetry.user_prompt(&items); - sess.refresh_mcp_servers_if_requested(¤t_context) - .await; - let accepted_items = items.clone(); - sess.spawn_task( - Arc::clone(¤t_context), - items, - crate::tasks::RegularTask::new(), - ) - .await; - Some(accepted_items) - } - Err(err) => { - sess.send_event_raw(Event { - id: sub_id, - msg: EventMsg::Error(err.to_error_event()), - }) - .await; - None - } - }; - if let (Some(items), Some(())) = (accepted_items, mirror_user_text_to_realtime) { - self::mirror_user_text_to_realtime(sess, &items).await; - } - } - - async fn mirror_user_text_to_realtime(sess: &Arc, items: &[UserInput]) { - let text = UserMessageItem::new(items).message(); - if text.is_empty() { - return; - } - if sess.conversation.running_state().await.is_none() { - return; - } - if let Err(err) = sess.conversation.text_in(text).await { - debug!("failed to mirror user text to realtime conversation: {err}"); - } - } - - /// Records an inter-agent assistant envelope, then lets the shared pending-work scheduler - /// decide whether an idle session should start a regular turn. - pub async fn inter_agent_communication( - sess: &Arc, - sub_id: String, - communication: InterAgentCommunication, - ) { - let trigger_turn = communication.trigger_turn; - sess.enqueue_mailbox_communication(communication); - if trigger_turn { - sess.maybe_start_turn_for_pending_work_with_sub_id(sub_id) - .await; - } - } - - pub async fn run_user_shell_command(sess: &Arc, sub_id: String, command: String) { - if let Some((turn_context, cancellation_token)) = - sess.active_turn_context_and_cancellation_token().await - { - let session = Arc::clone(sess); - tokio::spawn(async move { - execute_user_shell_command( - session, - turn_context, - command, - cancellation_token, - UserShellCommandMode::ActiveTurnAuxiliary, - ) - .await; - }); - return; - } - - let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; - sess.spawn_task( - Arc::clone(&turn_context), - Vec::new(), - UserShellCommandTask::new(command), - ) - .await; - } - - pub async fn resolve_elicitation( - sess: &Arc, - server_name: String, - request_id: ProtocolRequestId, - decision: codex_protocol::approvals::ElicitationAction, - content: Option, - meta: Option, - ) { - let action = match decision { - codex_protocol::approvals::ElicitationAction::Accept => ElicitationAction::Accept, - codex_protocol::approvals::ElicitationAction::Decline => ElicitationAction::Decline, - codex_protocol::approvals::ElicitationAction::Cancel => ElicitationAction::Cancel, - }; - let content = match action { - // Preserve the legacy fallback for clients that only send an action. - ElicitationAction::Accept => Some(content.unwrap_or_else(|| serde_json::json!({}))), - ElicitationAction::Decline | ElicitationAction::Cancel => None, - }; - let response = ElicitationResponse { - action, - content, - meta, - }; - let request_id = match request_id { - ProtocolRequestId::String(value) => { - rmcp::model::NumberOrString::String(std::sync::Arc::from(value)) - } - ProtocolRequestId::Integer(value) => rmcp::model::NumberOrString::Number(value), - }; - if let Err(err) = sess - .resolve_elicitation(server_name, request_id, response) - .await - { - warn!( - error = %err, - "failed to resolve elicitation request in session" - ); - } - } - - /// Propagate a user's exec approval decision to the session. - /// Also optionally applies an execpolicy amendment. - pub async fn exec_approval( - sess: &Arc, - approval_id: String, - turn_id: Option, - decision: ReviewDecision, - ) { - let event_turn_id = turn_id.unwrap_or_else(|| approval_id.clone()); - if let ReviewDecision::ApprovedExecpolicyAmendment { - proposed_execpolicy_amendment, - } = &decision - { - match sess - .persist_execpolicy_amendment(proposed_execpolicy_amendment) - .await - { - Ok(()) => { - sess.record_execpolicy_amendment_message( - &event_turn_id, - proposed_execpolicy_amendment, - ) - .await; - } - Err(err) => { - let message = format!("Failed to apply execpolicy amendment: {err}"); - tracing::warn!("{message}"); - let warning = EventMsg::Warning(WarningEvent { message }); - sess.send_event_raw(Event { - id: event_turn_id.clone(), - msg: warning, - }) - .await; - } - } - } - match decision { - ReviewDecision::Abort => { - sess.interrupt_task().await; - } - other => sess.notify_approval(&approval_id, other).await, - } - } - - pub async fn patch_approval(sess: &Arc, id: String, decision: ReviewDecision) { - match decision { - ReviewDecision::Abort => { - sess.interrupt_task().await; - } - other => sess.notify_approval(&id, other).await, - } - } - - pub async fn request_user_input_response( - sess: &Arc, - id: String, - response: RequestUserInputResponse, - ) { - sess.notify_user_input_response(&id, response).await; - } - - pub async fn request_permissions_response( - sess: &Arc, - id: String, - response: RequestPermissionsResponse, - ) { - sess.notify_request_permissions_response(&id, response) - .await; - } - - pub async fn dynamic_tool_response( - sess: &Arc, - id: String, - response: DynamicToolResponse, - ) { - sess.notify_dynamic_tool_response(&id, response).await; - } - - pub async fn add_to_history(sess: &Arc, config: &Arc, text: String) { - let id = sess.conversation_id; - let config = Arc::clone(config); - tokio::spawn(async move { - if let Err(e) = crate::message_history::append_entry(&text, &id, &config).await { - warn!("failed to append to message history: {e}"); - } - }); - } - - pub async fn get_history_entry_request( - sess: &Arc, - config: &Arc, - sub_id: String, - offset: usize, - log_id: u64, - ) { - let config = Arc::clone(config); - let sess_clone = Arc::clone(sess); - - tokio::spawn(async move { - // Run lookup in blocking thread because it does file IO + locking. - let entry_opt = tokio::task::spawn_blocking(move || { - crate::message_history::lookup(log_id, offset, &config) - }) - .await - .unwrap_or(None); - - let event = Event { - id: sub_id, - msg: EventMsg::GetHistoryEntryResponse( - codex_protocol::protocol::GetHistoryEntryResponseEvent { - offset, - log_id, - entry: entry_opt.map(|e| codex_protocol::message_history::HistoryEntry { - conversation_id: e.session_id, - ts: e.ts, - text: e.text, - }), - }, - ), - }; - - sess_clone.send_event_raw(event).await; - }); - } - - pub async fn refresh_mcp_servers(sess: &Arc, refresh_config: McpServerRefreshConfig) { - let mut guard = sess.pending_mcp_server_refresh_config.lock().await; - *guard = Some(refresh_config); - } - - pub async fn reload_user_config(sess: &Arc) { - sess.reload_user_config_layer().await; - } - - pub async fn list_mcp_tools(sess: &Session, config: &Arc, sub_id: String) { - let mcp_connection_manager = sess.services.mcp_connection_manager.read().await; - let auth = sess.services.auth_manager.auth().await; - let mcp_servers = sess - .services - .mcp_manager - .effective_servers(config, auth.as_ref()); - let snapshot = collect_mcp_snapshot_from_manager( - &mcp_connection_manager, - compute_auth_statuses(mcp_servers.iter(), config.mcp_oauth_credentials_store_mode) - .await, - ) - .await; - let event = Event { - id: sub_id, - msg: EventMsg::McpListToolsResponse(snapshot), - }; - sess.send_event_raw(event).await; - } - - pub async fn list_skills( - sess: &Session, - sub_id: String, - cwds: Vec, - force_reload: bool, - ) { - let cwds = if cwds.is_empty() { - let state = sess.state.lock().await; - vec![state.session_configuration.cwd.to_path_buf()] - } else { - cwds - }; - - let skills_manager = &sess.services.skills_manager; - let plugins_manager = &sess.services.plugins_manager; - let config = sess.get_config().await; - let codex_home = sess.codex_home().await; - let mut skills = Vec::new(); - let empty_cli_overrides: &[(String, toml::Value)] = &[]; - for cwd in cwds { - let cwd_abs = match AbsolutePathBuf::try_from(cwd.as_path()) { - Ok(path) => path, - Err(err) => { - let message = err.to_string(); - let cwd_for_entry = cwd.clone(); - skills.push(SkillsListEntry { - cwd: cwd_for_entry.clone(), - skills: Vec::new(), - errors: super::errors_to_info(&[SkillError { - path: cwd_for_entry, - message, - }]), - }); - continue; - } - }; - let config_layer_stack = match load_config_layers_state( - &codex_home, - Some(cwd_abs), - empty_cli_overrides, - LoaderOverrides::default(), - CloudRequirementsLoader::default(), - ) - .await - { - Ok(config_layer_stack) => config_layer_stack, - Err(err) => { - let message = err.to_string(); - let cwd_for_entry = cwd.clone(); - skills.push(SkillsListEntry { - cwd: cwd_for_entry.clone(), - skills: Vec::new(), - errors: super::errors_to_info(&[SkillError { - path: cwd_for_entry, - message, - }]), - }); - continue; - } - }; - let effective_skill_roots = plugins_manager.effective_skill_roots_for_layer_stack( - &config_layer_stack, - config.features.enabled(Feature::Plugins), - ); - let skills_input = crate::SkillsLoadInput::new( - cwd.clone(), - effective_skill_roots, - config_layer_stack, - config.bundled_skills_enabled(), - ); - let outcome = skills_manager - .skills_for_cwd(&skills_input, force_reload) - .await; - let errors = super::errors_to_info(&outcome.errors); - let skills_metadata = super::skills_to_info(&outcome.skills, &outcome.disabled_paths); - skills.push(SkillsListEntry { - cwd, - skills: skills_metadata, - errors, - }); - } - - let event = Event { - id: sub_id, - msg: EventMsg::ListSkillsResponse(ListSkillsResponseEvent { skills }), - }; - sess.send_event_raw(event).await; - } - - pub async fn undo(sess: &Arc, sub_id: String) { - let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; - sess.spawn_task(turn_context, Vec::new(), UndoTask::new()) - .await; - } - - pub async fn compact(sess: &Arc, sub_id: String) { - let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; - - sess.spawn_task( - Arc::clone(&turn_context), - vec![UserInput::Text { - text: turn_context.compact_prompt().to_string(), - // Compaction prompt is synthesized; no UI element ranges to preserve. - text_elements: Vec::new(), - }], - CompactTask, - ) - .await; - } - - pub async fn drop_memories(sess: &Arc, config: &Arc, sub_id: String) { - let mut errors = Vec::new(); - - if let Some(state_db) = sess.services.state_db.as_deref() { - if let Err(err) = state_db.clear_memory_data().await { - errors.push(format!("failed clearing memory rows from state db: {err}")); - } - } else { - errors.push("state db unavailable; memory rows were not cleared".to_string()); - } - - let memory_root = crate::memories::memory_root(&config.codex_home); - if let Err(err) = crate::memories::clear_memory_root_contents(&memory_root).await { - errors.push(format!( - "failed clearing memory directory {}: {err}", - memory_root.display() - )); - } - - if errors.is_empty() { - sess.send_event_raw(Event { - id: sub_id, - msg: EventMsg::Warning(WarningEvent { - message: format!( - "Dropped memories at {} and cleared memory rows from state db.", - memory_root.display() - ), - }), - }) - .await; - return; - } - - sess.send_event_raw(Event { - id: sub_id, - msg: EventMsg::Error(ErrorEvent { - message: format!("Memory drop completed with errors: {}", errors.join("; ")), - codex_error_info: Some(CodexErrorInfo::Other), - }), - }) - .await; - } - - pub async fn update_memories(sess: &Arc, config: &Arc, sub_id: String) { - let session_source = { - let state = sess.state.lock().await; - state.session_configuration.session_source.clone() - }; - - crate::memories::start_memories_startup_task(sess, Arc::clone(config), &session_source); - - sess.send_event_raw(Event { - id: sub_id.clone(), - msg: EventMsg::Warning(WarningEvent { - message: "Memory update triggered.".to_string(), - }), - }) - .await; - } - - pub async fn thread_rollback(sess: &Arc, sub_id: String, num_turns: u32) { - if num_turns == 0 { - sess.send_event_raw(Event { - id: sub_id, - msg: EventMsg::Error(ErrorEvent { - message: "num_turns must be >= 1".to_string(), - codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), - }), - }) - .await; - return; - } - - let has_active_turn = { sess.active_turn.lock().await.is_some() }; - if has_active_turn { - sess.send_event_raw(Event { - id: sub_id, - msg: EventMsg::Error(ErrorEvent { - message: "Cannot rollback while a turn is in progress.".to_string(), - codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), - }), - }) - .await; - return; - } - - let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; - let rollout_path = { - let recorder = { - let guard = sess.services.rollout.lock().await; - guard.clone() - }; - let Some(recorder) = recorder else { - sess.send_event_raw(Event { - id: turn_context.sub_id.clone(), - msg: EventMsg::Error(ErrorEvent { - message: "thread rollback requires a persisted rollout path".to_string(), - codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), - }), - }) - .await; - return; - }; - recorder.rollout_path().to_path_buf() - }; - if let Some(recorder) = { - let guard = sess.services.rollout.lock().await; - guard.clone() - } && let Err(err) = recorder.flush().await - { - sess.send_event_raw(Event { - id: turn_context.sub_id.clone(), - msg: EventMsg::Error(ErrorEvent { - message: format!( - "failed to flush rollout `{}` for rollback replay: {err}", - rollout_path.display() - ), - codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), - }), - }) - .await; - return; - } - - let initial_history = - match RolloutRecorder::get_rollout_history(rollout_path.as_path()).await { - Ok(history) => history, - Err(err) => { - sess.send_event_raw(Event { - id: turn_context.sub_id.clone(), - msg: EventMsg::Error(ErrorEvent { - message: format!( - "failed to load rollout `{}` for rollback replay: {err}", - rollout_path.display() - ), - codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), - }), - }) - .await; - return; - } - }; - - let rollback_event = ThreadRolledBackEvent { num_turns }; - let rollback_msg = EventMsg::ThreadRolledBack(rollback_event.clone()); - let replay_items = initial_history - .get_rollout_items() - .into_iter() - .chain(std::iter::once(RolloutItem::EventMsg(rollback_msg.clone()))) - .collect::>(); - sess.apply_rollout_reconstruction(turn_context.as_ref(), replay_items.as_slice()) - .await; - sess.recompute_token_usage(turn_context.as_ref()).await; - - sess.persist_rollout_items(&[RolloutItem::EventMsg(rollback_msg.clone())]) - .await; - if let Err(err) = sess.flush_rollout().await { - sess.send_event( - turn_context.as_ref(), - EventMsg::Warning(WarningEvent { - message: format!( - "Rolled the thread back, but failed to save the rollback marker. Codex will continue retrying. Error: {err}" - ), - }), - ) - .await; - } - - sess.deliver_event_raw(Event { - id: turn_context.sub_id.clone(), - msg: rollback_msg, - }) - .await; - } - - async fn persist_thread_name_update( - sess: &Arc, - event: ThreadNameUpdatedEvent, - ) -> anyhow::Result { - let msg = EventMsg::ThreadNameUpdated(event); - let item = RolloutItem::EventMsg(msg.clone()); - let recorder = { - let guard = sess.services.rollout.lock().await; - guard.clone() - } - .ok_or_else(|| anyhow::anyhow!("Session persistence is disabled; cannot rename thread."))?; - recorder.persist().await?; - recorder.record_items(std::slice::from_ref(&item)).await?; - recorder.flush().await?; - Ok(msg) - } - - /// Persists the thread name in the rollout and state database, updates in-memory state, and - /// emits a `ThreadNameUpdated` event on success. - pub async fn set_thread_name(sess: &Arc, sub_id: String, name: String) { - let Some(name) = crate::util::normalize_thread_name(&name) else { - let event = Event { - id: sub_id, - msg: EventMsg::Error(ErrorEvent { - message: "Thread name cannot be empty.".to_string(), - codex_error_info: Some(CodexErrorInfo::BadRequest), - }), - }; - sess.send_event_raw(event).await; - return; - }; - - let updated = ThreadNameUpdatedEvent { - thread_id: sess.conversation_id, - thread_name: Some(name.clone()), - }; - - let msg = match persist_thread_name_update(sess, updated).await { - Ok(msg) => msg, - Err(err) => { - warn!("Failed to persist thread name update to rollout: {err}"); - let event = Event { - id: sub_id, - msg: EventMsg::Error(ErrorEvent { - message: err.to_string(), - codex_error_info: Some(CodexErrorInfo::Other), - }), - }; - sess.send_event_raw(event).await; - return; - } - }; - - if let Some(state_db) = sess.services.state_db.as_deref() - && let Err(err) = state_db - .update_thread_title(sess.conversation_id, &name) - .await - { - warn!("Failed to update thread title in state db: {err}"); - } - - { - let mut state = sess.state.lock().await; - state.session_configuration.thread_name = Some(name.clone()); - } - - let codex_home = sess.codex_home().await; - if let Err(err) = - crate::rollout::append_thread_name(&codex_home, sess.conversation_id, &name).await - { - warn!("Failed to update legacy thread name index: {err}"); - } - - sess.deliver_event_raw(Event { id: sub_id, msg }).await; - } - - pub async fn shutdown(sess: &Arc, sub_id: String) -> bool { - sess.abort_all_tasks(TurnAbortReason::Interrupted).await; - let _ = sess.conversation.shutdown().await; - sess.services - .unified_exec_manager - .terminate_all_processes() - .await; - sess.guardian_review_session.shutdown().await; - info!("Shutting down Codex instance"); - let history = sess.clone_history().await; - let turn_count = history - .raw_items() - .iter() - .filter(|item| is_user_turn_boundary(item)) - .count(); - sess.services.session_telemetry.counter( - "codex.conversation.turn.count", - i64::try_from(turn_count).unwrap_or(0), - &[], - ); - - // Gracefully flush and shutdown rollout recorder on session end so tests - // that inspect the rollout file do not race with the background writer. - let recorder_opt = { - let mut guard = sess.services.rollout.lock().await; - guard.take() - }; - if let Some(rec) = recorder_opt - && let Err(e) = rec.shutdown().await - { - warn!("failed to shutdown rollout recorder: {e}"); - let event = Event { - id: sub_id.clone(), - msg: EventMsg::Error(ErrorEvent { - message: "Failed to shutdown rollout recorder".to_string(), - codex_error_info: Some(CodexErrorInfo::Other), - }), - }; - sess.send_event_raw(event).await; - } - - let event = Event { - id: sub_id, - msg: EventMsg::ShutdownComplete, - }; - sess.send_event_raw(event).await; - true - } - - pub async fn review( - sess: &Arc, - config: &Arc, - sub_id: String, - review_request: ReviewRequest, - ) { - let turn_context = sess.new_default_turn_with_sub_id(sub_id.clone()).await; - sess.maybe_emit_unknown_model_warning_for_turn(turn_context.as_ref()) - .await; - sess.refresh_mcp_servers_if_requested(&turn_context).await; - match resolve_review_request(review_request, turn_context.cwd.as_path()) { - Ok(resolved) => { - spawn_review_thread( - Arc::clone(sess), - Arc::clone(config), - turn_context.clone(), - sub_id, - resolved, - ) - .await; - } - Err(err) => { - let event = Event { - id: sub_id, - msg: EventMsg::Error(ErrorEvent { - message: err.to_string(), - codex_error_info: Some(CodexErrorInfo::Other), - }), - }; - sess.send_event(&turn_context, event.msg).await; - } - } - } -} - -/// Spawn a review thread using the given prompt. -async fn spawn_review_thread( - sess: Arc, - config: Arc, - parent_turn_context: Arc, - sub_id: String, - resolved: crate::review_prompts::ResolvedReviewRequest, -) { - let model = config - .review_model - .clone() - .unwrap_or_else(|| parent_turn_context.model_info.slug.clone()); - let review_model_info = sess - .services - .models_manager - .get_model_info(&model, &config.to_models_manager_config()) - .await; - // For reviews, disable web_search and view_image regardless of global settings. - let mut review_features = sess.features.clone(); - let _ = review_features.disable(Feature::WebSearchRequest); - let _ = review_features.disable(Feature::WebSearchCached); - let review_web_search_mode = WebSearchMode::Disabled; - let tools_config = ToolsConfig::new(&ToolsConfigParams { - model_info: &review_model_info, - available_models: &sess - .services - .models_manager - .list_models(RefreshStrategy::OnlineIfUncached) - .await, - features: &review_features, - image_generation_tool_auth_allowed: image_generation_tool_auth_allowed(Some( - sess.services.auth_manager.as_ref(), - )), - web_search_mode: Some(review_web_search_mode), - session_source: parent_turn_context.session_source.clone(), - sandbox_policy: parent_turn_context.sandbox_policy.get(), - windows_sandbox_level: parent_turn_context.windows_sandbox_level, - }) - .with_unified_exec_shell_mode_for_session( - crate::tools::spec::tool_user_shell_type(sess.services.user_shell.as_ref()), - sess.services.shell_zsh_path.as_ref(), - sess.services.main_execve_wrapper_exe.as_ref(), - ) - .with_web_search_config(/*web_search_config*/ None) - .with_allow_login_shell(config.permissions.allow_login_shell) - .with_has_environment(parent_turn_context.environment.is_some()) - .with_spawn_agent_usage_hint(config.multi_agent_v2.usage_hint_enabled) - .with_spawn_agent_usage_hint_text(config.multi_agent_v2.usage_hint_text.clone()) - .with_hide_spawn_agent_metadata(config.multi_agent_v2.hide_spawn_agent_metadata) - .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( - &config.agent_roles, - )); - - let review_prompt = resolved.prompt.clone(); - let provider = parent_turn_context.provider.clone(); - let auth_manager = parent_turn_context.auth_manager.clone(); - let model_info = review_model_info.clone(); - - // Build per‑turn client with the requested model/family. - let mut per_turn_config = (*config).clone(); - per_turn_config.model = Some(model.clone()); - per_turn_config.features = review_features.clone(); - if let Err(err) = per_turn_config.web_search_mode.set(review_web_search_mode) { - let fallback_value = per_turn_config.web_search_mode.value(); - tracing::warn!( - error = %err, - ?review_web_search_mode, - ?fallback_value, - "review web_search_mode is disallowed by requirements; keeping constrained value" - ); - } - - let session_telemetry = parent_turn_context - .session_telemetry - .clone() - .with_model(model.as_str(), review_model_info.slug.as_str()); - let auth_manager_for_context = auth_manager.clone(); - let provider_for_context = provider.clone(); - let session_telemetry_for_context = session_telemetry.clone(); - let reasoning_effort = per_turn_config.model_reasoning_effort; - let reasoning_summary = per_turn_config - .model_reasoning_summary - .unwrap_or(model_info.default_reasoning_summary); - let session_source = parent_turn_context.session_source.clone(); - - let per_turn_config = Arc::new(per_turn_config); - let review_turn_id = sub_id.to_string(); - let turn_metadata_state = Arc::new(TurnMetadataState::new( - sess.conversation_id.to_string(), - review_turn_id.clone(), - parent_turn_context.cwd.to_path_buf(), - parent_turn_context.sandbox_policy.get(), - parent_turn_context.windows_sandbox_level, - )); - - let review_turn_context = TurnContext { - sub_id: review_turn_id, - trace_id: current_span_trace_id(), - realtime_active: parent_turn_context.realtime_active, - config: per_turn_config, - auth_manager: auth_manager_for_context, - model_info: model_info.clone(), - session_telemetry: session_telemetry_for_context, - provider: provider_for_context, - reasoning_effort, - reasoning_summary, - session_source, - environment: parent_turn_context.environment.clone(), - tools_config, - features: parent_turn_context.features.clone(), - ghost_snapshot: parent_turn_context.ghost_snapshot.clone(), - current_date: parent_turn_context.current_date.clone(), - timezone: parent_turn_context.timezone.clone(), - app_server_client_name: parent_turn_context.app_server_client_name.clone(), - developer_instructions: None, - user_instructions: None, - compact_prompt: parent_turn_context.compact_prompt.clone(), - collaboration_mode: parent_turn_context.collaboration_mode.clone(), - personality: parent_turn_context.personality, - approval_policy: parent_turn_context.approval_policy.clone(), - sandbox_policy: parent_turn_context.sandbox_policy.clone(), - file_system_sandbox_policy: parent_turn_context.file_system_sandbox_policy.clone(), - network_sandbox_policy: parent_turn_context.network_sandbox_policy, - network: parent_turn_context.network.clone(), - windows_sandbox_level: parent_turn_context.windows_sandbox_level, - shell_environment_policy: parent_turn_context.shell_environment_policy.clone(), - cwd: parent_turn_context.cwd.clone(), - final_output_json_schema: None, - codex_self_exe: parent_turn_context.codex_self_exe.clone(), - codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(), - tool_call_gate: Arc::new(ReadinessFlag::new()), - js_repl: Arc::clone(&sess.js_repl), - dynamic_tools: parent_turn_context.dynamic_tools.clone(), - truncation_policy: model_info.truncation_policy.into(), - turn_metadata_state, - turn_skills: TurnSkillsContext::new(parent_turn_context.turn_skills.outcome.clone()), - turn_timing_state: Arc::new(TurnTimingState::default()), - }; - - // Seed the child task with the review prompt as the initial user message. - let input: Vec = vec![UserInput::Text { - text: review_prompt, - // Review prompt is synthesized; no UI element ranges to preserve. - text_elements: Vec::new(), - }]; - let tc = Arc::new(review_turn_context); - tc.turn_metadata_state.spawn_git_enrichment_task(); - // TODO(ccunningham): Review turns currently rely on `spawn_task` for TurnComplete but do not - // emit a parent TurnStarted. Consider giving review a full parent turn lifecycle - // (TurnStarted + TurnComplete) for consistency with other standalone tasks. - sess.spawn_task(tc.clone(), input, ReviewTask::new()).await; - - // Announce entering review mode so UIs can switch modes. - let review_request = ReviewRequest { - target: resolved.target, - user_facing_hint: Some(resolved.user_facing_hint), - }; - sess.send_event(&tc, EventMsg::EnteredReviewMode(review_request)) - .await; -} - fn skills_to_info( skills: &[SkillMetadata], - disabled_paths: &HashSet, + disabled_paths: &HashSet, ) -> Vec { skills .iter() @@ -5991,2029 +3087,12 @@ fn errors_to_info(errors: &[SkillError]) -> Vec { errors .iter() .map(|err| SkillErrorInfo { - path: err.path.clone(), + path: err.path.to_path_buf(), message: err.message.clone(), }) .collect() } -// Explicit plugin mentions imply app usage even when the user did not -// mention the app directly. If those connectors are still missing from the -// current `codex_apps` snapshot, give startup a bounded chance to finish -fn explicitly_enabled_connectors_missing_from_tools( - connector_ids: &HashSet, - mcp_tools: &HashMap, -) -> bool { - let accessible_connector_ids = connectors::accessible_connectors_from_mcp_tools(mcp_tools) - .into_iter() - .map(|connector| connector.id) - .collect::>(); - - connector_ids - .iter() - .any(|connector_id| !accessible_connector_ids.contains(connector_id)) -} - -/// Takes a user message as input and runs a loop where, at each sampling request, the model -/// replies with either: -/// -/// - requested function calls -/// - an assistant message -/// -/// While it is possible for the model to return multiple of these items in a -/// single sampling request, in practice, we generally one item per sampling request: -/// -/// - If the model requests a function call, we execute it and send the output -/// back to the model in the next sampling request. -/// - If the model sends only an assistant message, we record it in the -/// conversation history and consider the turn complete. -/// -pub(crate) async fn run_turn( - sess: Arc, - turn_context: Arc, - input: Vec, - prewarmed_client_session: Option, - cancellation_token: CancellationToken, -) -> Option { - if input.is_empty() && !sess.has_pending_input().await { - return None; - } - - let model_info = turn_context.model_info.clone(); - let auto_compact_limit = model_info.auto_compact_token_limit().unwrap_or(i64::MAX); - let mut prewarmed_client_session = prewarmed_client_session; - // TODO(ccunningham): Pre-turn compaction runs before context updates and the - // new user message are recorded. Estimate pending incoming items (context - // diffs/full reinjection + user input) and trigger compaction preemptively - // when they would push the thread over the compaction threshold. - let pre_sampling_compacted = match run_pre_sampling_compact(&sess, &turn_context).await { - Ok(pre_sampling_compacted) => pre_sampling_compacted, - Err(_) => { - error!("Failed to run pre-sampling compact"); - return None; - } - }; - if pre_sampling_compacted && let Some(mut client_session) = prewarmed_client_session.take() { - client_session.reset_websocket_session(); - } - - let skills_outcome = Some(turn_context.turn_skills.outcome.as_ref()); - - sess.record_context_updates_and_set_reference_context_item(turn_context.as_ref()) - .await; - - let loaded_plugins = sess - .services - .plugins_manager - .plugins_for_config(&turn_context.config); - // Structured plugin:// mentions are resolved from the current session's - // enabled plugins, then converted into turn-scoped guidance below. - let mentioned_plugins = - collect_explicit_plugin_mentions(&input, loaded_plugins.capability_summaries()); - let mut explicitly_enabled_connectors = collect_explicit_app_ids(&input); - if turn_context.apps_enabled() { - // Treat app connectors declared by explicit plugin mentions as - // explicit for this turn too. That lets them participate in both - // startup waiting and first-turn tool exposure. - explicitly_enabled_connectors.extend(mentioned_plugins.iter().flat_map(|plugin| { - plugin - .app_connector_ids - .iter() - .map(|connector_id| connector_id.0.clone()) - })); - } - let mcp_tools = if turn_context.apps_enabled() || !mentioned_plugins.is_empty() { - // Plugin mentions need raw MCP/app inventory even when app tools - // are normally hidden so we can describe the plugin's currently - // usable capabilities for this turn. - let mcp_connection_manager = sess.services.mcp_connection_manager.read().await; - let mut mcp_tools = match mcp_connection_manager - .list_all_tools() - .or_cancel(&cancellation_token) - .await - { - Ok(mcp_tools) => mcp_tools, - Err(_) if turn_context.apps_enabled() => return None, - Err(_) => HashMap::new(), - }; - if turn_context.apps_enabled() - && !explicitly_enabled_connectors.is_empty() - && explicitly_enabled_connectors_missing_from_tools( - &explicitly_enabled_connectors, - &mcp_tools, - ) - { - // The caller explicitly asked for one of these app-backed surfaces, - // but the first snapshot still does not expose it, so wait - // briefly and then rebuild the tool view for this turn. - let codex_apps_ready = match mcp_connection_manager - .wait_for_server_ready(CODEX_APPS_MCP_SERVER_NAME, EXPLICIT_APPS_READY_TIMEOUT) - .or_cancel(&cancellation_token) - .await - { - Ok(codex_apps_ready) => codex_apps_ready, - Err(_) => return None, - }; - if codex_apps_ready { - mcp_tools = match mcp_connection_manager - .list_all_tools() - .or_cancel(&cancellation_token) - .await - { - Ok(mcp_tools) => mcp_tools, - Err(_) => return None, - }; - } - } - mcp_tools - } else { - HashMap::new() - }; - let available_connectors = if turn_context.apps_enabled() { - let connectors = connectors::merge_plugin_apps_with_accessible( - loaded_plugins.effective_apps(), - connectors::accessible_connectors_from_mcp_tools(&mcp_tools), - ); - connectors::with_app_enabled_state(connectors, &turn_context.config) - } else { - Vec::new() - }; - let connector_slug_counts = build_connector_slug_counts(&available_connectors); - let skill_name_counts_lower = skills_outcome - .as_ref() - .map_or_else(HashMap::new, |outcome| { - build_skill_name_counts(&outcome.skills, &outcome.disabled_paths).1 - }); - let mentioned_skills = skills_outcome.as_ref().map_or_else(Vec::new, |outcome| { - collect_explicit_skill_mentions( - &input, - &outcome.skills, - &outcome.disabled_paths, - &connector_slug_counts, - ) - }); - let config = turn_context.config.clone(); - if config - .features - .enabled(Feature::SkillEnvVarDependencyPrompt) - { - let env_var_dependencies = collect_env_var_dependencies(&mentioned_skills); - resolve_skill_dependencies_for_turn(&sess, &turn_context, &env_var_dependencies).await; - } - - maybe_prompt_and_install_mcp_dependencies( - sess.as_ref(), - turn_context.as_ref(), - &cancellation_token, - &mentioned_skills, - ) - .await; - - let session_telemetry = turn_context.session_telemetry.clone(); - let thread_id = sess.conversation_id.to_string(); - let tracking = build_track_events_context( - turn_context.model_info.slug.clone(), - thread_id, - turn_context.sub_id.clone(), - ); - let SkillInjections { - items: skill_items, - warnings: skill_warnings, - } = build_skill_injections( - &mentioned_skills, - Some(&session_telemetry), - &sess.services.analytics_events_client, - tracking.clone(), - ) - .await; - - for message in skill_warnings { - sess.send_event(&turn_context, EventMsg::Warning(WarningEvent { message })) - .await; - } - - let plugin_items = - build_plugin_injections(&mentioned_plugins, &mcp_tools, &available_connectors); - let mentioned_plugin_metadata = mentioned_plugins - .iter() - .filter_map(crate::plugins::PluginCapabilitySummary::telemetry_metadata) - .collect::>(); - - explicitly_enabled_connectors.extend(collect_explicit_app_ids_from_skill_items( - &skill_items, - &available_connectors, - &skill_name_counts_lower, - )); - let connector_names_by_id = available_connectors - .iter() - .map(|connector| (connector.id.as_str(), connector.name.as_str())) - .collect::>(); - let mentioned_app_invocations = explicitly_enabled_connectors - .iter() - .map(|connector_id| AppInvocation { - connector_id: Some(connector_id.clone()), - app_name: connector_names_by_id - .get(connector_id.as_str()) - .map(|name| (*name).to_string()), - invocation_type: Some(InvocationType::Explicit), - }) - .collect::>(); - - if run_pending_session_start_hooks(&sess, &turn_context).await { - return None; - } - let additional_contexts = if input.is_empty() { - Vec::new() - } else { - let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input.clone()); - let response_item: ResponseItem = initial_input_for_turn.clone().into(); - let user_prompt_submit_outcome = run_user_prompt_submit_hooks( - &sess, - &turn_context, - UserMessageItem::new(&input).message(), - ) - .await; - if user_prompt_submit_outcome.should_stop { - record_additional_contexts( - &sess, - &turn_context, - user_prompt_submit_outcome.additional_contexts, - ) - .await; - return None; - } - sess.record_user_prompt_and_emit_turn_item(turn_context.as_ref(), &input, response_item) - .await; - user_prompt_submit_outcome.additional_contexts - }; - sess.services - .analytics_events_client - .track_app_mentioned(tracking.clone(), mentioned_app_invocations); - for plugin in mentioned_plugin_metadata { - sess.services - .analytics_events_client - .track_plugin_used(tracking.clone(), plugin); - } - sess.merge_connector_selection(explicitly_enabled_connectors.clone()) - .await; - record_additional_contexts(&sess, &turn_context, additional_contexts).await; - if !input.is_empty() { - // Track the previous-turn baseline from the regular user-turn path only so - // standalone tasks (compact/shell/review/undo) cannot suppress future - // model/realtime injections. - sess.set_previous_turn_settings(Some(PreviousTurnSettings { - model: turn_context.model_info.slug.clone(), - realtime_active: Some(turn_context.realtime_active), - })) - .await; - } - - if !skill_items.is_empty() { - sess.record_conversation_items(&turn_context, &skill_items) - .await; - } - if !plugin_items.is_empty() { - sess.record_conversation_items(&turn_context, &plugin_items) - .await; - } - - let skills_outcome = Some(turn_context.turn_skills.outcome.as_ref()); - sess.maybe_start_ghost_snapshot(Arc::clone(&turn_context), cancellation_token.child_token()) - .await; - let mut last_agent_message: Option = None; - let mut stop_hook_active = false; - // Although from the perspective of codex.rs, TurnDiffTracker has the lifecycle of a Task which contains - // many turns, from the perspective of the user, it is a single turn. - let turn_diff_tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new())); - let mut server_model_warning_emitted_for_turn = false; - - // `ModelClientSession` is turn-scoped and caches WebSocket + sticky routing state, so we reuse - // one instance across retries within this turn. - let mut client_session = - prewarmed_client_session.unwrap_or_else(|| sess.services.model_client.new_session()); - // Pending input is drained into history before building the next model request. - // However, we defer that drain until after sampling in two cases: - // 1. At the start of a turn, so the fresh user prompt in `input` gets sampled first. - // 2. After auto-compact, when model/tool continuation needs to resume before any steer. - let mut can_drain_pending_input = input.is_empty(); - - loop { - if run_pending_session_start_hooks(&sess, &turn_context).await { - break; - } - - // Note that pending_input would be something like a message the user - // submitted through the UI while the model was running. Though the UI - // may support this, the model might not. - let pending_input = if can_drain_pending_input { - sess.get_pending_input().await - } else { - Vec::new() - }; - - let mut blocked_pending_input = false; - let mut blocked_pending_input_contexts = Vec::new(); - let mut requeued_pending_input = false; - let mut accepted_pending_input = Vec::new(); - if !pending_input.is_empty() { - let mut pending_input_iter = pending_input.into_iter(); - while let Some(pending_input_item) = pending_input_iter.next() { - match inspect_pending_input(&sess, &turn_context, pending_input_item).await { - PendingInputHookDisposition::Accepted(pending_input) => { - accepted_pending_input.push(*pending_input); - } - PendingInputHookDisposition::Blocked { - additional_contexts, - } => { - let remaining_pending_input = pending_input_iter.collect::>(); - if !remaining_pending_input.is_empty() { - let _ = sess.prepend_pending_input(remaining_pending_input).await; - requeued_pending_input = true; - } - blocked_pending_input_contexts = additional_contexts; - blocked_pending_input = true; - break; - } - } - } - } - - let has_accepted_pending_input = !accepted_pending_input.is_empty(); - for pending_input in accepted_pending_input { - record_pending_input(&sess, &turn_context, pending_input).await; - } - record_additional_contexts(&sess, &turn_context, blocked_pending_input_contexts).await; - - if blocked_pending_input && !has_accepted_pending_input { - if requeued_pending_input { - continue; - } - break; - } - - // Construct the input that we will send to the model. - let sampling_request_input: Vec = { - sess.clone_history() - .await - .for_prompt(&turn_context.model_info.input_modalities) - }; - - let sampling_request_input_messages = sampling_request_input - .iter() - .filter_map(|item| match parse_turn_item(item) { - Some(TurnItem::UserMessage(user_message)) => Some(user_message), - _ => None, - }) - .map(|user_message| user_message.message()) - .collect::>(); - let turn_metadata_header = turn_context.turn_metadata_state.current_header_value(); - match run_sampling_request( - Arc::clone(&sess), - Arc::clone(&turn_context), - Arc::clone(&turn_diff_tracker), - &mut client_session, - turn_metadata_header.as_deref(), - sampling_request_input, - &explicitly_enabled_connectors, - skills_outcome, - &mut server_model_warning_emitted_for_turn, - cancellation_token.child_token(), - ) - .await - { - Ok(sampling_request_output) => { - let SamplingRequestResult { - needs_follow_up: model_needs_follow_up, - last_agent_message: sampling_request_last_agent_message, - } = sampling_request_output; - can_drain_pending_input = true; - let has_pending_input = sess.has_pending_input().await; - let needs_follow_up = model_needs_follow_up || has_pending_input; - let total_usage_tokens = sess.get_total_token_usage().await; - let token_limit_reached = total_usage_tokens >= auto_compact_limit; - - let estimated_token_count = - sess.get_estimated_token_count(turn_context.as_ref()).await; - - trace!( - turn_id = %turn_context.sub_id, - total_usage_tokens, - estimated_token_count = ?estimated_token_count, - auto_compact_limit, - token_limit_reached, - model_needs_follow_up, - has_pending_input, - needs_follow_up, - "post sampling token usage" - ); - - // as long as compaction works well in getting us way below the token limit, we shouldn't worry about being in an infinite loop. - if token_limit_reached && needs_follow_up { - if run_auto_compact( - &sess, - &turn_context, - InitialContextInjection::BeforeLastUserMessage, - CompactionReason::ContextLimit, - CompactionPhase::MidTurn, - ) - .await - .is_err() - { - return None; - } - client_session.reset_websocket_session(); - can_drain_pending_input = !model_needs_follow_up; - continue; - } - - if !needs_follow_up { - last_agent_message = sampling_request_last_agent_message; - let stop_hook_permission_mode = match turn_context.approval_policy.value() { - AskForApproval::Never => "bypassPermissions", - AskForApproval::UnlessTrusted - | AskForApproval::OnFailure - | AskForApproval::OnRequest - | AskForApproval::Granular(_) => "default", - } - .to_string(); - let stop_request = codex_hooks::StopRequest { - session_id: sess.conversation_id, - turn_id: turn_context.sub_id.clone(), - cwd: turn_context.cwd.to_path_buf(), - transcript_path: sess.hook_transcript_path().await, - model: turn_context.model_info.slug.clone(), - permission_mode: stop_hook_permission_mode, - stop_hook_active, - last_assistant_message: last_agent_message.clone(), - }; - for run in sess.hooks().preview_stop(&stop_request) { - sess.send_event( - &turn_context, - EventMsg::HookStarted(codex_protocol::protocol::HookStartedEvent { - turn_id: Some(turn_context.sub_id.clone()), - run, - }), - ) - .await; - } - let stop_outcome = sess.hooks().run_stop(stop_request).await; - for completed in stop_outcome.hook_events { - sess.send_event(&turn_context, EventMsg::HookCompleted(completed)) - .await; - } - if stop_outcome.should_block { - if let Some(hook_prompt_message) = - build_hook_prompt_message(&stop_outcome.continuation_fragments) - { - sess.record_conversation_items( - &turn_context, - std::slice::from_ref(&hook_prompt_message), - ) - .await; - stop_hook_active = true; - continue; - } else { - sess.send_event( - &turn_context, - EventMsg::Warning(WarningEvent { - message: "Stop hook requested continuation without a prompt; ignoring the block.".to_string(), - }), - ) - .await; - } - } - if stop_outcome.should_stop { - break; - } - let hook_outcomes = sess - .hooks() - .dispatch(HookPayload { - session_id: sess.conversation_id, - cwd: turn_context.cwd.to_path_buf(), - client: turn_context.app_server_client_name.clone(), - triggered_at: chrono::Utc::now(), - hook_event: HookEvent::AfterAgent { - event: HookEventAfterAgent { - thread_id: sess.conversation_id, - turn_id: turn_context.sub_id.clone(), - input_messages: sampling_request_input_messages, - last_assistant_message: last_agent_message.clone(), - }, - }, - }) - .await; - - let mut abort_message = None; - for hook_outcome in hook_outcomes { - let hook_name = hook_outcome.hook_name; - match hook_outcome.result { - HookResult::Success => {} - HookResult::FailedContinue(error) => { - warn!( - turn_id = %turn_context.sub_id, - hook_name = %hook_name, - error = %error, - "after_agent hook failed; continuing" - ); - } - HookResult::FailedAbort(error) => { - let message = format!( - "after_agent hook '{hook_name}' failed and aborted turn completion: {error}" - ); - warn!( - turn_id = %turn_context.sub_id, - hook_name = %hook_name, - error = %error, - "after_agent hook failed; aborting operation" - ); - if abort_message.is_none() { - abort_message = Some(message); - } - } - } - } - if let Some(message) = abort_message { - sess.send_event( - &turn_context, - EventMsg::Error(ErrorEvent { - message, - codex_error_info: None, - }), - ) - .await; - return None; - } - break; - } - continue; - } - Err(CodexErr::TurnAborted) => { - // Aborted turn is reported via a different event. - break; - } - Err(CodexErr::InvalidImageRequest()) => { - let mut state = sess.state.lock().await; - error_or_panic( - "Invalid image detected; sanitizing tool output to prevent poisoning", - ); - if state.history.replace_last_turn_images("Invalid image") { - continue; - } - let event = EventMsg::Error(ErrorEvent { - message: "Invalid image in your last message. Please remove it and try again." - .to_string(), - codex_error_info: Some(CodexErrorInfo::BadRequest), - }); - sess.send_event(&turn_context, event).await; - break; - } - Err(e) => { - info!("Turn error: {e:#}"); - let event = EventMsg::Error(e.to_error_event(/*message_prefix*/ None)); - sess.send_event(&turn_context, event).await; - // let the user continue the conversation - break; - } - } - } - - last_agent_message -} - -async fn run_pre_sampling_compact( - sess: &Arc, - turn_context: &Arc, -) -> CodexResult { - let total_usage_tokens_before_compaction = sess.get_total_token_usage().await; - let mut pre_sampling_compacted = maybe_run_previous_model_inline_compact( - sess, - turn_context, - total_usage_tokens_before_compaction, - ) - .await?; - let total_usage_tokens = sess.get_total_token_usage().await; - let auto_compact_limit = turn_context - .model_info - .auto_compact_token_limit() - .unwrap_or(i64::MAX); - // Compact if the total usage tokens are greater than the auto compact limit - if total_usage_tokens >= auto_compact_limit { - run_auto_compact( - sess, - turn_context, - InitialContextInjection::DoNotInject, - CompactionReason::ContextLimit, - CompactionPhase::PreTurn, - ) - .await?; - pre_sampling_compacted = true; - } - Ok(pre_sampling_compacted) -} - -/// Runs pre-sampling compaction against the previous model when switching to a smaller -/// context-window model. -/// -/// Returns `Ok(true)` when compaction ran successfully, `Ok(false)` when compaction was skipped -/// because the model/context-window preconditions were not met, and `Err(_)` only when compaction -/// was attempted and failed. -async fn maybe_run_previous_model_inline_compact( - sess: &Arc, - turn_context: &Arc, - total_usage_tokens: i64, -) -> CodexResult { - let Some(previous_turn_settings) = sess.previous_turn_settings().await else { - return Ok(false); - }; - let previous_model_turn_context = Arc::new( - turn_context - .with_model(previous_turn_settings.model, &sess.services.models_manager) - .await, - ); - - let Some(old_context_window) = previous_model_turn_context.model_context_window() else { - return Ok(false); - }; - let Some(new_context_window) = turn_context.model_context_window() else { - return Ok(false); - }; - let new_auto_compact_limit = turn_context - .model_info - .auto_compact_token_limit() - .unwrap_or(i64::MAX); - let should_run = total_usage_tokens > new_auto_compact_limit - && previous_model_turn_context.model_info.slug != turn_context.model_info.slug - && old_context_window > new_context_window; - if should_run { - run_auto_compact( - sess, - &previous_model_turn_context, - InitialContextInjection::DoNotInject, - CompactionReason::ModelDownshift, - CompactionPhase::PreTurn, - ) - .await?; - return Ok(true); - } - Ok(false) -} - -async fn run_auto_compact( - sess: &Arc, - turn_context: &Arc, - initial_context_injection: InitialContextInjection, - reason: CompactionReason, - phase: CompactionPhase, -) -> CodexResult<()> { - if should_use_remote_compact_task(&turn_context.provider) { - run_inline_remote_auto_compact_task( - Arc::clone(sess), - Arc::clone(turn_context), - initial_context_injection, - reason, - phase, - ) - .await?; - } else { - run_inline_auto_compact_task( - Arc::clone(sess), - Arc::clone(turn_context), - initial_context_injection, - reason, - phase, - ) - .await?; - } - Ok(()) -} - -fn collect_explicit_app_ids_from_skill_items( - skill_items: &[ResponseItem], - connectors: &[connectors::AppInfo], - skill_name_counts_lower: &HashMap, -) -> HashSet { - if skill_items.is_empty() || connectors.is_empty() { - return HashSet::new(); - } - - let skill_messages = skill_items - .iter() - .filter_map(|item| match item { - ResponseItem::Message { content, .. } => { - content.iter().find_map(|content_item| match content_item { - ContentItem::InputText { text } => Some(text.clone()), - _ => None, - }) - } - _ => None, - }) - .collect::>(); - if skill_messages.is_empty() { - return HashSet::new(); - } - - let mentions = collect_tool_mentions_from_messages(&skill_messages); - let mention_names_lower = mentions - .plain_names - .iter() - .map(|name| name.to_ascii_lowercase()) - .collect::>(); - let mut connector_ids = mentions - .paths - .iter() - .filter(|path| tool_kind_for_path(path) == ToolMentionKind::App) - .filter_map(|path| app_id_from_path(path).map(str::to_string)) - .collect::>(); - - let connector_slug_counts = build_connector_slug_counts(connectors); - for connector in connectors { - let slug = connectors::connector_mention_slug(connector); - let connector_count = connector_slug_counts.get(&slug).copied().unwrap_or(0); - let skill_count = skill_name_counts_lower.get(&slug).copied().unwrap_or(0); - if connector_count == 1 && skill_count == 0 && mention_names_lower.contains(&slug) { - connector_ids.insert(connector.id.clone()); - } - } - - connector_ids -} - -fn filter_connectors_for_input( - connectors: &[connectors::AppInfo], - input: &[ResponseItem], - explicitly_enabled_connectors: &HashSet, - skill_name_counts_lower: &HashMap, -) -> Vec { - let connectors: Vec = connectors - .iter() - .filter(|connector| connector.is_enabled) - .cloned() - .collect::>(); - if connectors.is_empty() { - return Vec::new(); - } - - let user_messages = collect_user_messages(input); - if user_messages.is_empty() && explicitly_enabled_connectors.is_empty() { - return Vec::new(); - } - - let mentions = collect_tool_mentions_from_messages(&user_messages); - let mention_names_lower = mentions - .plain_names - .iter() - .map(|name| name.to_ascii_lowercase()) - .collect::>(); - - let connector_slug_counts = build_connector_slug_counts(&connectors); - let mut allowed_connector_ids = explicitly_enabled_connectors.clone(); - for path in mentions - .paths - .iter() - .filter(|path| tool_kind_for_path(path) == ToolMentionKind::App) - { - if let Some(connector_id) = app_id_from_path(path) { - allowed_connector_ids.insert(connector_id.to_string()); - } - } - - connectors - .into_iter() - .filter(|connector| { - connector_inserted_in_messages( - connector, - &mention_names_lower, - &allowed_connector_ids, - &connector_slug_counts, - skill_name_counts_lower, - ) - }) - .collect() -} - -fn connector_inserted_in_messages( - connector: &connectors::AppInfo, - mention_names_lower: &HashSet, - allowed_connector_ids: &HashSet, - connector_slug_counts: &HashMap, - skill_name_counts_lower: &HashMap, -) -> bool { - if allowed_connector_ids.contains(&connector.id) { - return true; - } - - let mention_slug = connectors::connector_mention_slug(connector); - let connector_count = connector_slug_counts - .get(&mention_slug) - .copied() - .unwrap_or(0); - let skill_count = skill_name_counts_lower - .get(&mention_slug) - .copied() - .unwrap_or(0); - connector_count == 1 && skill_count == 0 && mention_names_lower.contains(&mention_slug) -} - -pub(crate) fn build_prompt( - input: Vec, - router: &ToolRouter, - turn_context: &TurnContext, - base_instructions: BaseInstructions, -) -> Prompt { - let deferred_dynamic_tools = turn_context - .dynamic_tools - .iter() - .filter(|tool| tool.defer_loading) - .map(|tool| tool.name.as_str()) - .collect::>(); - let tools = if deferred_dynamic_tools.is_empty() { - router.model_visible_specs() - } else { - router - .model_visible_specs() - .into_iter() - .filter(|spec| !deferred_dynamic_tools.contains(spec.name())) - .collect() - }; - - Prompt { - input, - tools, - parallel_tool_calls: turn_context.model_info.supports_parallel_tool_calls, - base_instructions, - personality: turn_context.personality, - output_schema: turn_context.final_output_json_schema.clone(), - } -} - -#[allow(clippy::too_many_arguments)] -#[instrument(level = "trace", - skip_all, - fields( - turn_id = %turn_context.sub_id, - model = %turn_context.model_info.slug, - cwd = %turn_context.cwd.display() - ) -)] -async fn run_sampling_request( - sess: Arc, - turn_context: Arc, - turn_diff_tracker: SharedTurnDiffTracker, - client_session: &mut ModelClientSession, - turn_metadata_header: Option<&str>, - input: Vec, - explicitly_enabled_connectors: &HashSet, - skills_outcome: Option<&SkillLoadOutcome>, - server_model_warning_emitted_for_turn: &mut bool, - cancellation_token: CancellationToken, -) -> CodexResult { - let router = built_tools( - sess.as_ref(), - turn_context.as_ref(), - &input, - explicitly_enabled_connectors, - skills_outcome, - &cancellation_token, - ) - .await?; - - let base_instructions = sess.get_base_instructions().await; - - let prompt = build_prompt( - input, - router.as_ref(), - turn_context.as_ref(), - base_instructions, - ); - let tool_runtime = ToolCallRuntime::new( - Arc::clone(&router), - Arc::clone(&sess), - Arc::clone(&turn_context), - Arc::clone(&turn_diff_tracker), - ); - let _code_mode_worker = sess - .services - .code_mode_service - .start_turn_worker( - &sess, - &turn_context, - Arc::clone(&router), - Arc::clone(&turn_diff_tracker), - ) - .await; - let mut retries = 0; - loop { - let err = match try_run_sampling_request( - tool_runtime.clone(), - Arc::clone(&sess), - Arc::clone(&turn_context), - client_session, - turn_metadata_header, - Arc::clone(&turn_diff_tracker), - server_model_warning_emitted_for_turn, - &prompt, - cancellation_token.child_token(), - ) - .await - { - Ok(output) => { - return Ok(output); - } - Err(CodexErr::ContextWindowExceeded) => { - sess.set_total_tokens_full(&turn_context).await; - return Err(CodexErr::ContextWindowExceeded); - } - Err(CodexErr::UsageLimitReached(e)) => { - let rate_limits = e.rate_limits.clone(); - if let Some(rate_limits) = rate_limits { - sess.update_rate_limits(&turn_context, *rate_limits).await; - } - return Err(CodexErr::UsageLimitReached(e)); - } - Err(err) => err, - }; - - if !err.is_retryable() { - return Err(err); - } - - // Use the configured provider-specific stream retry budget. - let max_retries = turn_context.provider.stream_max_retries(); - if retries >= max_retries - && client_session.try_switch_fallback_transport( - &turn_context.session_telemetry, - &turn_context.model_info, - ) - { - sess.send_event( - &turn_context, - EventMsg::Warning(WarningEvent { - message: format!("Falling back from WebSockets to HTTPS transport. {err:#}"), - }), - ) - .await; - retries = 0; - continue; - } - if retries < max_retries { - retries += 1; - let delay = match &err { - CodexErr::Stream(_, requested_delay) => { - requested_delay.unwrap_or_else(|| backoff(retries)) - } - _ => backoff(retries), - }; - warn!( - "stream disconnected - retrying sampling request ({retries}/{max_retries} in {delay:?})...", - ); - - // In release builds, hide the first websocket retry notification to reduce noisy - // transient reconnect messages. In debug builds, keep full visibility for diagnosis. - let report_error = retries > 1 - || cfg!(debug_assertions) - || !sess.services.model_client.responses_websocket_enabled(); - if report_error { - // Surface retry information to any UI/front‑end so the - // user understands what is happening instead of staring - // at a seemingly frozen screen. - sess.notify_stream_error( - &turn_context, - format!("Reconnecting... {retries}/{max_retries}"), - err, - ) - .await; - } - tokio::time::sleep(delay).await; - } else { - return Err(err); - } - } -} - -pub(crate) async fn built_tools( - sess: &Session, - turn_context: &TurnContext, - input: &[ResponseItem], - explicitly_enabled_connectors: &HashSet, - skills_outcome: Option<&SkillLoadOutcome>, - cancellation_token: &CancellationToken, -) -> CodexResult> { - let mcp_connection_manager = sess.services.mcp_connection_manager.read().await; - let has_mcp_servers = mcp_connection_manager.has_servers(); - let all_mcp_tools = mcp_connection_manager - .list_all_tools() - .or_cancel(cancellation_token) - .await?; - drop(mcp_connection_manager); - let loaded_plugins = sess - .services - .plugins_manager - .plugins_for_config(&turn_context.config); - - let mut effective_explicitly_enabled_connectors = explicitly_enabled_connectors.clone(); - effective_explicitly_enabled_connectors.extend(sess.get_connector_selection().await); - - let apps_enabled = turn_context.apps_enabled(); - let accessible_connectors = - apps_enabled.then(|| connectors::accessible_connectors_from_mcp_tools(&all_mcp_tools)); - let accessible_connectors_with_enabled_state = - accessible_connectors.as_ref().map(|connectors| { - connectors::with_app_enabled_state(connectors.clone(), &turn_context.config) - }); - let connectors = if apps_enabled { - let connectors = connectors::merge_plugin_apps_with_accessible( - loaded_plugins.effective_apps(), - accessible_connectors.clone().unwrap_or_default(), - ); - Some(connectors::with_app_enabled_state( - connectors, - &turn_context.config, - )) - } else { - None - }; - let auth = sess.services.auth_manager.auth().await; - let discoverable_tools = if apps_enabled && turn_context.tools_config.tool_suggest { - if let Some(accessible_connectors) = accessible_connectors_with_enabled_state.as_ref() { - match connectors::list_tool_suggest_discoverable_tools_with_auth( - &turn_context.config, - auth.as_ref(), - accessible_connectors.as_slice(), - ) - .await - .map(|discoverable_tools| { - filter_tool_suggest_discoverable_tools_for_client( - discoverable_tools, - turn_context.app_server_client_name.as_deref(), - ) - }) { - Ok(discoverable_tools) if discoverable_tools.is_empty() => None, - Ok(discoverable_tools) => Some(discoverable_tools), - Err(err) => { - warn!("failed to load discoverable tool suggestions: {err:#}"); - None - } - } - } else { - None - } - } else { - None - }; - - let explicitly_enabled = if let Some(connectors) = connectors.as_ref() { - let skill_name_counts_lower = skills_outcome.map_or_else(HashMap::new, |outcome| { - build_skill_name_counts(&outcome.skills, &outcome.disabled_paths).1 - }); - - filter_connectors_for_input( - connectors, - input, - &effective_explicitly_enabled_connectors, - &skill_name_counts_lower, - ) - } else { - Vec::new() - }; - let mcp_tool_exposure = build_mcp_tool_exposure( - &all_mcp_tools, - connectors.as_deref(), - explicitly_enabled.as_slice(), - &turn_context.config, - &turn_context.tools_config, - ); - let direct_mcp_tools = has_mcp_servers.then_some(mcp_tool_exposure.direct_tools); - - Ok(Arc::new(ToolRouter::from_config( - &turn_context.tools_config, - ToolRouterParams { - deferred_mcp_tools: mcp_tool_exposure.deferred_tools, - mcp_tools: direct_mcp_tools, - discoverable_tools, - dynamic_tools: turn_context.dynamic_tools.as_slice(), - }, - ))) -} - -#[derive(Debug)] -struct SamplingRequestResult { - needs_follow_up: bool, - last_agent_message: Option, -} - -/// Ephemeral per-response state for streaming a single proposed plan. -/// This is intentionally not persisted or stored in session/state since it -/// only exists while a response is actively streaming. The final plan text -/// is extracted from the completed assistant message. -/// Tracks a single proposed plan item across a streaming response. -struct ProposedPlanItemState { - item_id: String, - started: bool, - completed: bool, -} - -/// Aggregated state used only while streaming a plan-mode response. -/// Includes per-item parsers, deferred agent message bookkeeping, and the plan item lifecycle. -struct PlanModeStreamState { - /// Agent message items started by the model but deferred until we see non-plan text. - pending_agent_message_items: HashMap, - /// Agent message items whose start notification has been emitted. - started_agent_message_items: HashSet, - /// Leading whitespace buffered until we see non-whitespace text for an item. - leading_whitespace_by_item: HashMap, - /// Tracks plan item lifecycle while streaming plan output. - plan_item_state: ProposedPlanItemState, -} - -impl PlanModeStreamState { - fn new(turn_id: &str) -> Self { - Self { - pending_agent_message_items: HashMap::new(), - started_agent_message_items: HashSet::new(), - leading_whitespace_by_item: HashMap::new(), - plan_item_state: ProposedPlanItemState::new(turn_id), - } - } -} - -#[derive(Debug, Default)] -struct AssistantMessageStreamParsers { - plan_mode: bool, - parsers_by_item: HashMap, -} - -type ParsedAssistantTextDelta = AssistantTextChunk; - -impl AssistantMessageStreamParsers { - fn new(plan_mode: bool) -> Self { - Self { - plan_mode, - parsers_by_item: HashMap::new(), - } - } - - fn parser_mut(&mut self, item_id: &str) -> &mut AssistantTextStreamParser { - let plan_mode = self.plan_mode; - self.parsers_by_item - .entry(item_id.to_string()) - .or_insert_with(|| AssistantTextStreamParser::new(plan_mode)) - } - - fn seed_item_text(&mut self, item_id: &str, text: &str) -> ParsedAssistantTextDelta { - if text.is_empty() { - return ParsedAssistantTextDelta::default(); - } - self.parser_mut(item_id).push_str(text) - } - - fn parse_delta(&mut self, item_id: &str, delta: &str) -> ParsedAssistantTextDelta { - self.parser_mut(item_id).push_str(delta) - } - - fn finish_item(&mut self, item_id: &str) -> ParsedAssistantTextDelta { - let Some(mut parser) = self.parsers_by_item.remove(item_id) else { - return ParsedAssistantTextDelta::default(); - }; - parser.finish() - } - - fn drain_finished(&mut self) -> Vec<(String, ParsedAssistantTextDelta)> { - let parsers_by_item = std::mem::take(&mut self.parsers_by_item); - parsers_by_item - .into_iter() - .map(|(item_id, mut parser)| (item_id, parser.finish())) - .collect() - } -} - -impl ProposedPlanItemState { - fn new(turn_id: &str) -> Self { - Self { - item_id: format!("{turn_id}-plan"), - started: false, - completed: false, - } - } - - async fn start(&mut self, sess: &Session, turn_context: &TurnContext) { - if self.started || self.completed { - return; - } - self.started = true; - let item = TurnItem::Plan(PlanItem { - id: self.item_id.clone(), - text: String::new(), - }); - sess.emit_turn_item_started(turn_context, &item).await; - } - - async fn push_delta(&mut self, sess: &Session, turn_context: &TurnContext, delta: &str) { - if self.completed { - return; - } - if delta.is_empty() { - return; - } - let event = PlanDeltaEvent { - thread_id: sess.conversation_id.to_string(), - turn_id: turn_context.sub_id.clone(), - item_id: self.item_id.clone(), - delta: delta.to_string(), - }; - sess.send_event(turn_context, EventMsg::PlanDelta(event)) - .await; - } - - async fn complete_with_text( - &mut self, - sess: &Session, - turn_context: &TurnContext, - text: String, - ) { - if self.completed || !self.started { - return; - } - self.completed = true; - let item = TurnItem::Plan(PlanItem { - id: self.item_id.clone(), - text, - }); - sess.emit_turn_item_completed(turn_context, item).await; - } -} - -/// In plan mode we defer agent message starts until the parser emits non-plan -/// text. The parser buffers each line until it can rule out a tag prefix, so -/// plan-only outputs never show up as empty assistant messages. -async fn maybe_emit_pending_agent_message_start( - sess: &Session, - turn_context: &TurnContext, - state: &mut PlanModeStreamState, - item_id: &str, -) { - if state.started_agent_message_items.contains(item_id) { - return; - } - if let Some(item) = state.pending_agent_message_items.remove(item_id) { - sess.emit_turn_item_started(turn_context, &item).await; - state - .started_agent_message_items - .insert(item_id.to_string()); - } -} - -/// Agent messages are text-only today; concatenate all text entries. -fn agent_message_text(item: &codex_protocol::items::AgentMessageItem) -> String { - item.content - .iter() - .map(|entry| match entry { - codex_protocol::items::AgentMessageContent::Text { text } => text.as_str(), - }) - .collect() -} - -fn realtime_text_for_event(msg: &EventMsg) -> Option { - match msg { - EventMsg::AgentMessage(event) => Some(event.message.clone()), - EventMsg::ItemCompleted(event) => match &event.item { - TurnItem::AgentMessage(item) => Some(agent_message_text(item)), - _ => None, - }, - EventMsg::Error(_) - | EventMsg::Warning(_) - | EventMsg::RealtimeConversationStarted(_) - | EventMsg::RealtimeConversationSdp(_) - | EventMsg::RealtimeConversationRealtime(_) - | EventMsg::RealtimeConversationClosed(_) - | EventMsg::ModelReroute(_) - | EventMsg::ContextCompacted(_) - | EventMsg::ThreadRolledBack(_) - | EventMsg::TurnStarted(_) - | EventMsg::TurnComplete(_) - | EventMsg::TokenCount(_) - | EventMsg::UserMessage(_) - | EventMsg::AgentMessageDelta(_) - | EventMsg::AgentReasoning(_) - | EventMsg::AgentReasoningDelta(_) - | EventMsg::AgentReasoningRawContent(_) - | EventMsg::AgentReasoningRawContentDelta(_) - | EventMsg::AgentReasoningSectionBreak(_) - | EventMsg::SessionConfigured(_) - | EventMsg::ThreadNameUpdated(_) - | EventMsg::McpStartupUpdate(_) - | EventMsg::McpStartupComplete(_) - | EventMsg::McpToolCallBegin(_) - | EventMsg::McpToolCallEnd(_) - | EventMsg::WebSearchBegin(_) - | EventMsg::WebSearchEnd(_) - | EventMsg::ExecCommandBegin(_) - | EventMsg::ExecCommandOutputDelta(_) - | EventMsg::TerminalInteraction(_) - | EventMsg::ExecCommandEnd(_) - | EventMsg::PatchApplyBegin(_) - | EventMsg::PatchApplyEnd(_) - | EventMsg::ViewImageToolCall(_) - | EventMsg::ImageGenerationBegin(_) - | EventMsg::ImageGenerationEnd(_) - | EventMsg::ExecApprovalRequest(_) - | EventMsg::RequestPermissions(_) - | EventMsg::RequestUserInput(_) - | EventMsg::DynamicToolCallRequest(_) - | EventMsg::DynamicToolCallResponse(_) - | EventMsg::GuardianAssessment(_) - | EventMsg::ElicitationRequest(_) - | EventMsg::ApplyPatchApprovalRequest(_) - | EventMsg::DeprecationNotice(_) - | EventMsg::BackgroundEvent(_) - | EventMsg::UndoStarted(_) - | EventMsg::UndoCompleted(_) - | EventMsg::StreamError(_) - | EventMsg::TurnDiff(_) - | EventMsg::GetHistoryEntryResponse(_) - | EventMsg::McpListToolsResponse(_) - | EventMsg::ListSkillsResponse(_) - | EventMsg::RealtimeConversationListVoicesResponse(_) - | EventMsg::SkillsUpdateAvailable - | EventMsg::PlanUpdate(_) - | EventMsg::TurnAborted(_) - | EventMsg::ShutdownComplete - | EventMsg::EnteredReviewMode(_) - | EventMsg::ExitedReviewMode(_) - | EventMsg::RawResponseItem(_) - | EventMsg::ItemStarted(_) - | EventMsg::HookStarted(_) - | EventMsg::HookCompleted(_) - | EventMsg::AgentMessageContentDelta(_) - | EventMsg::PlanDelta(_) - | EventMsg::ReasoningContentDelta(_) - | EventMsg::ReasoningRawContentDelta(_) - | EventMsg::CollabAgentSpawnBegin(_) - | EventMsg::CollabAgentSpawnEnd(_) - | EventMsg::CollabAgentInteractionBegin(_) - | EventMsg::CollabAgentInteractionEnd(_) - | EventMsg::CollabWaitingBegin(_) - | EventMsg::CollabWaitingEnd(_) - | EventMsg::CollabCloseBegin(_) - | EventMsg::CollabCloseEnd(_) - | EventMsg::CollabResumeBegin(_) - | EventMsg::CollabResumeEnd(_) => None, - } -} - -/// Split the stream into normal assistant text vs. proposed plan content. -/// Normal text becomes AgentMessage deltas; plan content becomes PlanDelta + -/// TurnItem::Plan. -async fn handle_plan_segments( - sess: &Session, - turn_context: &TurnContext, - state: &mut PlanModeStreamState, - item_id: &str, - segments: Vec, -) { - for segment in segments { - match segment { - ProposedPlanSegment::Normal(delta) => { - if delta.is_empty() { - continue; - } - let has_non_whitespace = delta.chars().any(|ch| !ch.is_whitespace()); - if !has_non_whitespace && !state.started_agent_message_items.contains(item_id) { - let entry = state - .leading_whitespace_by_item - .entry(item_id.to_string()) - .or_default(); - entry.push_str(&delta); - continue; - } - let delta = if !state.started_agent_message_items.contains(item_id) { - if let Some(prefix) = state.leading_whitespace_by_item.remove(item_id) { - format!("{prefix}{delta}") - } else { - delta - } - } else { - delta - }; - maybe_emit_pending_agent_message_start(sess, turn_context, state, item_id).await; - - let event = AgentMessageContentDeltaEvent { - thread_id: sess.conversation_id.to_string(), - turn_id: turn_context.sub_id.clone(), - item_id: item_id.to_string(), - delta, - }; - sess.send_event(turn_context, EventMsg::AgentMessageContentDelta(event)) - .await; - } - ProposedPlanSegment::ProposedPlanStart => { - if !state.plan_item_state.completed { - state.plan_item_state.start(sess, turn_context).await; - } - } - ProposedPlanSegment::ProposedPlanDelta(delta) => { - if !state.plan_item_state.completed { - if !state.plan_item_state.started { - state.plan_item_state.start(sess, turn_context).await; - } - state - .plan_item_state - .push_delta(sess, turn_context, &delta) - .await; - } - } - ProposedPlanSegment::ProposedPlanEnd => {} - } - } -} - -async fn emit_streamed_assistant_text_delta( - sess: &Session, - turn_context: &TurnContext, - plan_mode_state: Option<&mut PlanModeStreamState>, - item_id: &str, - parsed: ParsedAssistantTextDelta, -) { - if parsed.is_empty() { - return; - } - if !parsed.citations.is_empty() { - // Citation extraction is intentionally local for now; we strip citations from display text - // but do not yet surface them in protocol events. - let _citations = parsed.citations; - } - if let Some(state) = plan_mode_state { - if !parsed.plan_segments.is_empty() { - handle_plan_segments(sess, turn_context, state, item_id, parsed.plan_segments).await; - } - return; - } - if parsed.visible_text.is_empty() { - return; - } - let event = AgentMessageContentDeltaEvent { - thread_id: sess.conversation_id.to_string(), - turn_id: turn_context.sub_id.clone(), - item_id: item_id.to_string(), - delta: parsed.visible_text, - }; - sess.send_event(turn_context, EventMsg::AgentMessageContentDelta(event)) - .await; -} - -/// Flush buffered assistant text parser state when an assistant message item ends. -async fn flush_assistant_text_segments_for_item( - sess: &Session, - turn_context: &TurnContext, - plan_mode_state: Option<&mut PlanModeStreamState>, - parsers: &mut AssistantMessageStreamParsers, - item_id: &str, -) { - let parsed = parsers.finish_item(item_id); - emit_streamed_assistant_text_delta(sess, turn_context, plan_mode_state, item_id, parsed).await; -} - -/// Flush any remaining buffered assistant text parser state at response completion. -async fn flush_assistant_text_segments_all( - sess: &Session, - turn_context: &TurnContext, - mut plan_mode_state: Option<&mut PlanModeStreamState>, - parsers: &mut AssistantMessageStreamParsers, -) { - for (item_id, parsed) in parsers.drain_finished() { - emit_streamed_assistant_text_delta( - sess, - turn_context, - plan_mode_state.as_deref_mut(), - &item_id, - parsed, - ) - .await; - } -} - -/// Emit completion for plan items by parsing the finalized assistant message. -async fn maybe_complete_plan_item_from_message( - sess: &Session, - turn_context: &TurnContext, - state: &mut PlanModeStreamState, - item: &ResponseItem, -) { - if let ResponseItem::Message { role, content, .. } = item - && role == "assistant" - { - let mut text = String::new(); - for entry in content { - if let ContentItem::OutputText { text: chunk } = entry { - text.push_str(chunk); - } - } - if let Some(plan_text) = extract_proposed_plan_text(&text) { - let (plan_text, _citations) = strip_citations(&plan_text); - if !state.plan_item_state.started { - state.plan_item_state.start(sess, turn_context).await; - } - state - .plan_item_state - .complete_with_text(sess, turn_context, plan_text) - .await; - } - } -} - -/// Emit a completed agent message in plan mode, respecting deferred starts. -async fn emit_agent_message_in_plan_mode( - sess: &Session, - turn_context: &TurnContext, - agent_message: codex_protocol::items::AgentMessageItem, - state: &mut PlanModeStreamState, -) { - let agent_message_id = agent_message.id.clone(); - let text = agent_message_text(&agent_message); - if text.trim().is_empty() { - state.pending_agent_message_items.remove(&agent_message_id); - state.started_agent_message_items.remove(&agent_message_id); - return; - } - - maybe_emit_pending_agent_message_start(sess, turn_context, state, &agent_message_id).await; - - if !state - .started_agent_message_items - .contains(&agent_message_id) - { - let start_item = state - .pending_agent_message_items - .remove(&agent_message_id) - .unwrap_or_else(|| { - TurnItem::AgentMessage(codex_protocol::items::AgentMessageItem { - id: agent_message_id.clone(), - content: Vec::new(), - phase: None, - memory_citation: None, - }) - }); - sess.emit_turn_item_started(turn_context, &start_item).await; - state - .started_agent_message_items - .insert(agent_message_id.clone()); - } - - sess.emit_turn_item_completed(turn_context, TurnItem::AgentMessage(agent_message)) - .await; - state.started_agent_message_items.remove(&agent_message_id); -} - -/// Emit completion for a plan-mode turn item, handling agent messages specially. -async fn emit_turn_item_in_plan_mode( - sess: &Session, - turn_context: &TurnContext, - turn_item: TurnItem, - previously_active_item: Option<&TurnItem>, - state: &mut PlanModeStreamState, -) { - match turn_item { - TurnItem::AgentMessage(agent_message) => { - emit_agent_message_in_plan_mode(sess, turn_context, agent_message, state).await; - } - _ => { - if previously_active_item.is_none() { - sess.emit_turn_item_started(turn_context, &turn_item).await; - } - sess.emit_turn_item_completed(turn_context, turn_item).await; - } - } -} - -/// Handle a completed assistant response item in plan mode, returning true if handled. -async fn handle_assistant_item_done_in_plan_mode( - sess: &Session, - turn_context: &TurnContext, - item: &ResponseItem, - state: &mut PlanModeStreamState, - previously_active_item: Option<&TurnItem>, - last_agent_message: &mut Option, -) -> bool { - if let ResponseItem::Message { role, .. } = item - && role == "assistant" - { - maybe_complete_plan_item_from_message(sess, turn_context, state, item).await; - - if let Some(turn_item) = - handle_non_tool_response_item(sess, turn_context, item, /*plan_mode*/ true).await - { - emit_turn_item_in_plan_mode( - sess, - turn_context, - turn_item, - previously_active_item, - state, - ) - .await; - } - - record_completed_response_item(sess, turn_context, item).await; - if let Some(agent_message) = last_assistant_message_from_item(item, /*plan_mode*/ true) { - *last_agent_message = Some(agent_message); - } - return true; - } - false -} - -async fn drain_in_flight( - in_flight: &mut FuturesOrdered>>, - sess: Arc, - turn_context: Arc, -) -> CodexResult<()> { - while let Some(res) = in_flight.next().await { - match res { - Ok(response_input) => { - sess.record_conversation_items(&turn_context, &[response_input.into()]) - .await; - } - Err(err) => { - error_or_panic(format!("in-flight tool future failed during drain: {err}")); - } - } - } - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -#[instrument(level = "trace", - skip_all, - fields( - turn_id = %turn_context.sub_id, - model = %turn_context.model_info.slug - ) -)] -async fn try_run_sampling_request( - tool_runtime: ToolCallRuntime, - sess: Arc, - turn_context: Arc, - client_session: &mut ModelClientSession, - turn_metadata_header: Option<&str>, - turn_diff_tracker: SharedTurnDiffTracker, - server_model_warning_emitted_for_turn: &mut bool, - prompt: &Prompt, - cancellation_token: CancellationToken, -) -> CodexResult { - feedback_tags!( - model = turn_context.model_info.slug.clone(), - approval_policy = turn_context.approval_policy.value(), - sandbox_policy = turn_context.sandbox_policy.get(), - effort = turn_context.reasoning_effort, - auth_mode = sess.services.auth_manager.auth_mode(), - features = sess.features.enabled_features(), - ); - let mut stream = client_session - .stream( - prompt, - &turn_context.model_info, - &turn_context.session_telemetry, - turn_context.reasoning_effort, - turn_context.reasoning_summary, - turn_context.config.service_tier, - turn_metadata_header, - ) - .instrument(trace_span!("stream_request")) - .or_cancel(&cancellation_token) - .await??; - let mut in_flight: FuturesOrdered>> = - FuturesOrdered::new(); - let mut needs_follow_up = false; - let mut last_agent_message: Option = None; - let mut active_item: Option = None; - let mut should_emit_turn_diff = false; - let plan_mode = turn_context.collaboration_mode.mode == ModeKind::Plan; - let mut assistant_message_stream_parsers = AssistantMessageStreamParsers::new(plan_mode); - let mut plan_mode_state = plan_mode.then(|| PlanModeStreamState::new(&turn_context.sub_id)); - let receiving_span = trace_span!("receiving_stream"); - let outcome: CodexResult = loop { - let handle_responses = trace_span!( - parent: &receiving_span, - "handle_responses", - otel.name = field::Empty, - tool_name = field::Empty, - from = field::Empty, - ); - - let event = match stream - .next() - .instrument(trace_span!(parent: &handle_responses, "receiving")) - .or_cancel(&cancellation_token) - .await - { - Ok(event) => event, - Err(codex_async_utils::CancelErr::Cancelled) => break Err(CodexErr::TurnAborted), - }; - - let event = match event { - Some(res) => res?, - None => { - break Err(CodexErr::Stream( - "stream closed before response.completed".into(), - None, - )); - } - }; - - sess.services - .session_telemetry - .record_responses(&handle_responses, &event); - record_turn_ttft_metric(&turn_context, &event).await; - - match event { - ResponseEvent::Created => {} - ResponseEvent::OutputItemDone(item) => { - let previously_active_item = active_item.take(); - if let Some(previous) = previously_active_item.as_ref() - && matches!(previous, TurnItem::AgentMessage(_)) - { - let item_id = previous.id(); - flush_assistant_text_segments_for_item( - &sess, - &turn_context, - plan_mode_state.as_mut(), - &mut assistant_message_stream_parsers, - &item_id, - ) - .await; - } - if let Some(state) = plan_mode_state.as_mut() - && handle_assistant_item_done_in_plan_mode( - &sess, - &turn_context, - &item, - state, - previously_active_item.as_ref(), - &mut last_agent_message, - ) - .await - { - continue; - } - - let mut ctx = HandleOutputCtx { - sess: sess.clone(), - turn_context: turn_context.clone(), - tool_runtime: tool_runtime.clone(), - cancellation_token: cancellation_token.child_token(), - }; - - let preempt_for_mailbox_mail = match &item { - ResponseItem::Message { role, phase, .. } => { - role == "assistant" && matches!(phase, Some(MessagePhase::Commentary)) - } - ResponseItem::Reasoning { .. } => true, - ResponseItem::LocalShellCall { .. } - | ResponseItem::FunctionCall { .. } - | ResponseItem::ToolSearchCall { .. } - | ResponseItem::FunctionCallOutput { .. } - | ResponseItem::CustomToolCall { .. } - | ResponseItem::CustomToolCallOutput { .. } - | ResponseItem::ToolSearchOutput { .. } - | ResponseItem::WebSearchCall { .. } - | ResponseItem::ImageGenerationCall { .. } - | ResponseItem::GhostSnapshot { .. } - | ResponseItem::Compaction { .. } - | ResponseItem::Other => false, - }; - - let output_result = handle_output_item_done(&mut ctx, item, previously_active_item) - .instrument(handle_responses) - .await?; - if let Some(tool_future) = output_result.tool_future { - in_flight.push_back(tool_future); - } - if let Some(agent_message) = output_result.last_agent_message { - last_agent_message = Some(agent_message); - } - needs_follow_up |= output_result.needs_follow_up; - // todo: remove before stabilizing multi-agent v2 - if preempt_for_mailbox_mail && sess.mailbox_rx.lock().await.has_pending() { - break Ok(SamplingRequestResult { - needs_follow_up: true, - last_agent_message, - }); - } - } - ResponseEvent::OutputItemAdded(item) => { - if let Some(turn_item) = handle_non_tool_response_item( - sess.as_ref(), - turn_context.as_ref(), - &item, - plan_mode, - ) - .await - { - let mut turn_item = turn_item; - let mut seeded_parsed: Option = None; - let mut seeded_item_id: Option = None; - if matches!(turn_item, TurnItem::AgentMessage(_)) - && let Some(raw_text) = raw_assistant_output_text_from_item(&item) - { - let item_id = turn_item.id(); - let mut seeded = - assistant_message_stream_parsers.seed_item_text(&item_id, &raw_text); - if let TurnItem::AgentMessage(agent_message) = &mut turn_item { - agent_message.content = - vec![codex_protocol::items::AgentMessageContent::Text { - text: if plan_mode { - String::new() - } else { - std::mem::take(&mut seeded.visible_text) - }, - }]; - } - seeded_parsed = plan_mode.then_some(seeded); - seeded_item_id = Some(item_id); - } - if let Some(state) = plan_mode_state.as_mut() - && matches!(turn_item, TurnItem::AgentMessage(_)) - { - let item_id = turn_item.id(); - state - .pending_agent_message_items - .insert(item_id, turn_item.clone()); - } else { - sess.emit_turn_item_started(&turn_context, &turn_item).await; - } - if let (Some(state), Some(item_id), Some(parsed)) = ( - plan_mode_state.as_mut(), - seeded_item_id.as_deref(), - seeded_parsed, - ) { - emit_streamed_assistant_text_delta( - &sess, - &turn_context, - Some(state), - item_id, - parsed, - ) - .await; - } - active_item = Some(turn_item); - } - } - ResponseEvent::ServerModel(server_model) => { - if !*server_model_warning_emitted_for_turn - && sess - .maybe_warn_on_server_model_mismatch(&turn_context, server_model) - .await - { - *server_model_warning_emitted_for_turn = true; - } - } - ResponseEvent::ServerReasoningIncluded(included) => { - sess.set_server_reasoning_included(included).await; - } - ResponseEvent::RateLimits(snapshot) => { - // Update internal state with latest rate limits, but defer sending until - // token usage is available to avoid duplicate TokenCount events. - sess.update_rate_limits(&turn_context, snapshot).await; - } - ResponseEvent::ModelsEtag(etag) => { - // Update internal state with latest models etag - sess.services.models_manager.refresh_if_new_etag(etag).await; - } - ResponseEvent::Completed { - response_id: _, - token_usage, - } => { - flush_assistant_text_segments_all( - &sess, - &turn_context, - plan_mode_state.as_mut(), - &mut assistant_message_stream_parsers, - ) - .await; - sess.update_token_usage_info(&turn_context, token_usage.as_ref()) - .await; - should_emit_turn_diff = true; - - break Ok(SamplingRequestResult { - needs_follow_up, - last_agent_message, - }); - } - ResponseEvent::OutputTextDelta(delta) => { - // In review child threads, suppress assistant text deltas; the - // UI will show a selection popup from the final ReviewOutput. - if let Some(active) = active_item.as_ref() { - let item_id = active.id(); - if matches!(active, TurnItem::AgentMessage(_)) { - let parsed = assistant_message_stream_parsers.parse_delta(&item_id, &delta); - emit_streamed_assistant_text_delta( - &sess, - &turn_context, - plan_mode_state.as_mut(), - &item_id, - parsed, - ) - .await; - } else { - let event = AgentMessageContentDeltaEvent { - thread_id: sess.conversation_id.to_string(), - turn_id: turn_context.sub_id.clone(), - item_id, - delta, - }; - sess.send_event(&turn_context, EventMsg::AgentMessageContentDelta(event)) - .await; - } - } else { - error_or_panic("OutputTextDelta without active item".to_string()); - } - } - ResponseEvent::ReasoningSummaryDelta { - delta, - summary_index, - } => { - if let Some(active) = active_item.as_ref() { - let event = ReasoningContentDeltaEvent { - thread_id: sess.conversation_id.to_string(), - turn_id: turn_context.sub_id.clone(), - item_id: active.id(), - delta, - summary_index, - }; - sess.send_event(&turn_context, EventMsg::ReasoningContentDelta(event)) - .await; - } else { - error_or_panic("ReasoningSummaryDelta without active item".to_string()); - } - } - ResponseEvent::ReasoningSummaryPartAdded { summary_index } => { - if let Some(active) = active_item.as_ref() { - let event = - EventMsg::AgentReasoningSectionBreak(AgentReasoningSectionBreakEvent { - item_id: active.id(), - summary_index, - }); - sess.send_event(&turn_context, event).await; - } else { - error_or_panic("ReasoningSummaryPartAdded without active item".to_string()); - } - } - ResponseEvent::ReasoningContentDelta { - delta, - content_index, - } => { - if let Some(active) = active_item.as_ref() { - let event = ReasoningRawContentDeltaEvent { - thread_id: sess.conversation_id.to_string(), - turn_id: turn_context.sub_id.clone(), - item_id: active.id(), - delta, - content_index, - }; - sess.send_event(&turn_context, EventMsg::ReasoningRawContentDelta(event)) - .await; - } else { - error_or_panic("ReasoningRawContentDelta without active item".to_string()); - } - } - } - }; - - flush_assistant_text_segments_all( - &sess, - &turn_context, - plan_mode_state.as_mut(), - &mut assistant_message_stream_parsers, - ) - .await; - - drain_in_flight(&mut in_flight, sess.clone(), turn_context.clone()).await?; - - if cancellation_token.is_cancelled() { - return Err(CodexErr::TurnAborted); - } - - if should_emit_turn_diff { - let unified_diff = { - let mut tracker = turn_diff_tracker.lock().await; - tracker.get_unified_diff() - }; - if let Ok(Some(unified_diff)) = unified_diff { - let msg = EventMsg::TurnDiff(TurnDiffEvent { unified_diff }); - sess.clone().send_event(&turn_context, msg).await; - } - } - - outcome -} - -pub(super) fn get_last_assistant_message_from_turn(responses: &[ResponseItem]) -> Option { - for item in responses.iter().rev() { - if let Some(message) = last_assistant_message_from_item(item, /*plan_mode*/ false) { - return Some(message); - } - } - None -} - use crate::memories::prompts::build_memory_tool_developer_instructions; #[cfg(test)] pub(crate) use tests::make_session_and_context; diff --git a/codex-rs/core/src/codex/handlers.rs b/codex-rs/core/src/codex/handlers.rs new file mode 100644 index 0000000000..7e0ba45dad --- /dev/null +++ b/codex-rs/core/src/codex/handlers.rs @@ -0,0 +1,1232 @@ +use crate::realtime_conversation::handle_audio as handle_realtime_conversation_audio; +use crate::realtime_conversation::handle_close as handle_realtime_conversation_close; +use crate::realtime_conversation::handle_start as handle_realtime_conversation_start; +use crate::realtime_conversation::handle_text as handle_realtime_conversation_text; +use async_channel::Receiver; +use codex_otel::set_parent_from_w3c_trace_context; +use codex_protocol::protocol::Submission; +use tracing::Instrument; +use tracing::debug_span; +use tracing::info_span; + +use crate::codex::Session; +use crate::codex::SessionSettingsUpdate; +use crate::codex::SteerInputError; + +use crate::codex::spawn_review_thread; +use crate::config::Config; +use crate::config_loader::CloudRequirementsLoader; +use crate::config_loader::LoaderOverrides; +use crate::config_loader::load_config_layers_state; +use crate::realtime_context::REALTIME_TURN_TOKEN_BUDGET; +use crate::realtime_context::truncate_realtime_text_to_token_budget; +use crate::realtime_conversation::REALTIME_USER_TEXT_PREFIX; +use crate::realtime_conversation::prefix_realtime_v2_text; +use codex_exec_server::LOCAL_FS; +use codex_features::Feature; +use codex_utils_absolute_path::AbsolutePathBuf; + +use crate::review_prompts::resolve_review_request; +use crate::rollout::RolloutRecorder; +use crate::rollout::read_session_meta_line; +use crate::tasks::CompactTask; +use crate::tasks::UndoTask; +use crate::tasks::UserShellCommandMode; +use crate::tasks::UserShellCommandTask; +use crate::tasks::execute_user_shell_command; +use codex_mcp::collect_mcp_snapshot_from_manager; +use codex_mcp::compute_auth_statuses; +use codex_protocol::protocol::CodexErrorInfo; +use codex_protocol::protocol::ErrorEvent; +use codex_protocol::protocol::Event; +use codex_protocol::protocol::EventMsg; +use codex_protocol::protocol::InterAgentCommunication; +use codex_protocol::protocol::ListSkillsResponseEvent; +use codex_protocol::protocol::McpServerRefreshConfig; +use codex_protocol::protocol::Op; +use codex_protocol::protocol::RealtimeConversationListVoicesResponseEvent; +use codex_protocol::protocol::RealtimeVoicesList; +use codex_protocol::protocol::ReviewDecision; +use codex_protocol::protocol::ReviewRequest; +use codex_protocol::protocol::RolloutItem; +use codex_protocol::protocol::SkillErrorInfo; +use codex_protocol::protocol::SkillsListEntry; +use codex_protocol::protocol::ThreadMemoryMode; +use codex_protocol::protocol::ThreadNameUpdatedEvent; +use codex_protocol::protocol::ThreadRolledBackEvent; +use codex_protocol::protocol::TurnAbortReason; +use codex_protocol::protocol::WarningEvent; +use codex_protocol::request_permissions::RequestPermissionsResponse; +use codex_protocol::request_user_input::RequestUserInputResponse; + +use crate::context_manager::is_user_turn_boundary; +use codex_protocol::config_types::CollaborationMode; +use codex_protocol::config_types::ModeKind; +use codex_protocol::config_types::Settings; +use codex_protocol::dynamic_tools::DynamicToolResponse; +use codex_protocol::items::UserMessageItem; +use codex_protocol::mcp::RequestId as ProtocolRequestId; +use codex_protocol::user_input::UserInput; +use codex_rmcp_client::ElicitationAction; +use codex_rmcp_client::ElicitationResponse; +use serde_json::Value; +use std::path::PathBuf; +use std::sync::Arc; +use tracing::debug; +use tracing::info; +use tracing::warn; + +pub async fn interrupt(sess: &Arc) { + sess.interrupt_task().await; +} + +pub async fn clean_background_terminals(sess: &Arc) { + sess.close_unified_exec_processes().await; +} + +pub async fn realtime_conversation_list_voices(sess: &Session, sub_id: String) { + sess.send_event_raw(Event { + id: sub_id, + msg: EventMsg::RealtimeConversationListVoicesResponse( + RealtimeConversationListVoicesResponseEvent { + voices: RealtimeVoicesList::builtin(), + }, + ), + }) + .await; +} + +pub async fn override_turn_context(sess: &Session, sub_id: String, updates: SessionSettingsUpdate) { + if let Err(err) = sess.update_settings(updates).await { + sess.send_event_raw(Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: err.to_string(), + codex_error_info: Some(CodexErrorInfo::BadRequest), + }), + }) + .await; + } +} + +pub async fn user_input_or_turn(sess: &Arc, sub_id: String, op: Op) { + user_input_or_turn_inner( + sess, + sub_id, + op, + /*mirror_user_text_to_realtime*/ Some(()), + ) + .await; +} + +pub(super) async fn user_input_or_turn_inner( + sess: &Arc, + sub_id: String, + op: Op, + mirror_user_text_to_realtime: Option<()>, +) { + let (items, updates, responsesapi_client_metadata) = match op { + Op::UserTurn { + cwd, + approval_policy, + approvals_reviewer, + sandbox_policy, + model, + effort, + summary, + service_tier, + final_output_json_schema, + items, + collaboration_mode, + personality, + } => { + let collaboration_mode = collaboration_mode.or_else(|| { + Some(CollaborationMode { + mode: ModeKind::Default, + settings: Settings { + model: model.clone(), + reasoning_effort: effort, + developer_instructions: None, + }, + }) + }); + ( + items, + SessionSettingsUpdate { + cwd: Some(cwd), + approval_policy: Some(approval_policy), + approvals_reviewer, + sandbox_policy: Some(sandbox_policy), + windows_sandbox_level: None, + collaboration_mode, + reasoning_summary: summary, + service_tier, + final_output_json_schema: Some(final_output_json_schema), + personality, + app_server_client_name: None, + app_server_client_version: None, + }, + None, + ) + } + Op::UserInput { + items, + final_output_json_schema, + responsesapi_client_metadata, + } => ( + items, + SessionSettingsUpdate { + final_output_json_schema: Some(final_output_json_schema), + ..Default::default() + }, + responsesapi_client_metadata, + ), + _ => unreachable!(), + }; + + let Ok(current_context) = sess.new_turn_with_sub_id(sub_id.clone(), updates).await else { + // new_turn_with_sub_id already emits the error event. + return; + }; + sess.maybe_emit_unknown_model_warning_for_turn(current_context.as_ref()) + .await; + let accepted_items = match sess + .steer_input( + items.clone(), + /*expected_turn_id*/ None, + responsesapi_client_metadata.clone(), + ) + .await + { + Ok(_) => { + current_context.session_telemetry.user_prompt(&items); + Some(items) + } + Err(SteerInputError::NoActiveTurn(items)) => { + if let Some(responsesapi_client_metadata) = responsesapi_client_metadata { + current_context + .turn_metadata_state + .set_responsesapi_client_metadata(responsesapi_client_metadata); + } + current_context.session_telemetry.user_prompt(&items); + sess.refresh_mcp_servers_if_requested(¤t_context) + .await; + let accepted_items = items.clone(); + sess.spawn_task( + Arc::clone(¤t_context), + items, + crate::tasks::RegularTask::new(), + ) + .await; + Some(accepted_items) + } + Err(err) => { + sess.send_event_raw(Event { + id: sub_id, + msg: EventMsg::Error(err.to_error_event()), + }) + .await; + None + } + }; + if let (Some(items), Some(())) = (accepted_items, mirror_user_text_to_realtime) { + self::mirror_user_text_to_realtime(sess, &items).await; + } +} + +async fn mirror_user_text_to_realtime(sess: &Arc, items: &[UserInput]) { + let text = UserMessageItem::new(items).message(); + if text.is_empty() { + return; + } + let text = if sess.conversation.is_running_v2().await { + prefix_realtime_v2_text(text, REALTIME_USER_TEXT_PREFIX) + } else { + text + }; + let text = truncate_realtime_text_to_token_budget(&text, REALTIME_TURN_TOKEN_BUDGET); + if text.is_empty() { + return; + } + if sess.conversation.running_state().await.is_none() { + return; + } + if let Err(err) = sess.conversation.text_in(text).await { + debug!("failed to mirror user text to realtime conversation: {err}"); + } +} + +/// Records an inter-agent assistant envelope, then lets the shared pending-work scheduler +/// decide whether an idle session should start a regular turn. +pub async fn inter_agent_communication( + sess: &Arc, + sub_id: String, + communication: InterAgentCommunication, +) { + let trigger_turn = communication.trigger_turn; + sess.enqueue_mailbox_communication(communication); + if trigger_turn { + sess.maybe_start_turn_for_pending_work_with_sub_id(sub_id) + .await; + } +} + +pub async fn run_user_shell_command(sess: &Arc, sub_id: String, command: String) { + if let Some((turn_context, cancellation_token)) = + sess.active_turn_context_and_cancellation_token().await + { + let session = Arc::clone(sess); + tokio::spawn(async move { + execute_user_shell_command( + session, + turn_context, + command, + cancellation_token, + UserShellCommandMode::ActiveTurnAuxiliary, + ) + .await; + }); + return; + } + + let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; + sess.spawn_task( + Arc::clone(&turn_context), + Vec::new(), + UserShellCommandTask::new(command), + ) + .await; +} + +pub async fn resolve_elicitation( + sess: &Arc, + server_name: String, + request_id: ProtocolRequestId, + decision: codex_protocol::approvals::ElicitationAction, + content: Option, + meta: Option, +) { + let action = match decision { + codex_protocol::approvals::ElicitationAction::Accept => ElicitationAction::Accept, + codex_protocol::approvals::ElicitationAction::Decline => ElicitationAction::Decline, + codex_protocol::approvals::ElicitationAction::Cancel => ElicitationAction::Cancel, + }; + let content = match action { + // Preserve the legacy fallback for clients that only send an action. + ElicitationAction::Accept => Some(content.unwrap_or_else(|| serde_json::json!({}))), + ElicitationAction::Decline | ElicitationAction::Cancel => None, + }; + let response = ElicitationResponse { + action, + content, + meta, + }; + let request_id = match request_id { + ProtocolRequestId::String(value) => { + rmcp::model::NumberOrString::String(std::sync::Arc::from(value)) + } + ProtocolRequestId::Integer(value) => rmcp::model::NumberOrString::Number(value), + }; + if let Err(err) = sess + .resolve_elicitation(server_name, request_id, response) + .await + { + warn!( + error = %err, + "failed to resolve elicitation request in session" + ); + } +} + +/// Propagate a user's exec approval decision to the session. +/// Also optionally applies an execpolicy amendment. +pub async fn exec_approval( + sess: &Arc, + approval_id: String, + turn_id: Option, + decision: ReviewDecision, +) { + let event_turn_id = turn_id.unwrap_or_else(|| approval_id.clone()); + if let ReviewDecision::ApprovedExecpolicyAmendment { + proposed_execpolicy_amendment, + } = &decision + { + match sess + .persist_execpolicy_amendment(proposed_execpolicy_amendment) + .await + { + Ok(()) => { + sess.record_execpolicy_amendment_message( + &event_turn_id, + proposed_execpolicy_amendment, + ) + .await; + } + Err(err) => { + let message = format!("Failed to apply execpolicy amendment: {err}"); + tracing::warn!("{message}"); + let warning = EventMsg::Warning(WarningEvent { message }); + sess.send_event_raw(Event { + id: event_turn_id.clone(), + msg: warning, + }) + .await; + } + } + } + match decision { + ReviewDecision::Abort => { + sess.interrupt_task().await; + } + other => sess.notify_approval(&approval_id, other).await, + } +} + +pub async fn patch_approval(sess: &Arc, id: String, decision: ReviewDecision) { + match decision { + ReviewDecision::Abort => { + sess.interrupt_task().await; + } + other => sess.notify_approval(&id, other).await, + } +} + +pub async fn request_user_input_response( + sess: &Arc, + id: String, + response: RequestUserInputResponse, +) { + sess.notify_user_input_response(&id, response).await; +} + +pub async fn request_permissions_response( + sess: &Arc, + id: String, + response: RequestPermissionsResponse, +) { + sess.notify_request_permissions_response(&id, response) + .await; +} + +pub async fn dynamic_tool_response(sess: &Arc, id: String, response: DynamicToolResponse) { + sess.notify_dynamic_tool_response(&id, response).await; +} + +pub async fn add_to_history(sess: &Arc, config: &Arc, text: String) { + let id = sess.conversation_id; + let config = Arc::clone(config); + tokio::spawn(async move { + if let Err(e) = crate::message_history::append_entry(&text, &id, &config).await { + warn!("failed to append to message history: {e}"); + } + }); +} + +pub async fn get_history_entry_request( + sess: &Arc, + config: &Arc, + sub_id: String, + offset: usize, + log_id: u64, +) { + let config = Arc::clone(config); + let sess_clone = Arc::clone(sess); + + tokio::spawn(async move { + // Run lookup in blocking thread because it does file IO + locking. + let entry_opt = tokio::task::spawn_blocking(move || { + crate::message_history::lookup(log_id, offset, &config) + }) + .await + .unwrap_or(None); + + let event = Event { + id: sub_id, + msg: EventMsg::GetHistoryEntryResponse( + codex_protocol::protocol::GetHistoryEntryResponseEvent { + offset, + log_id, + entry: entry_opt.map(|e| codex_protocol::message_history::HistoryEntry { + conversation_id: e.session_id, + ts: e.ts, + text: e.text, + }), + }, + ), + }; + + sess_clone.send_event_raw(event).await; + }); +} + +pub async fn refresh_mcp_servers(sess: &Arc, refresh_config: McpServerRefreshConfig) { + let mut guard = sess.pending_mcp_server_refresh_config.lock().await; + *guard = Some(refresh_config); +} + +pub async fn reload_user_config(sess: &Arc) { + sess.reload_user_config_layer().await; +} + +pub async fn list_mcp_tools(sess: &Session, config: &Arc, sub_id: String) { + let mcp_connection_manager = sess.services.mcp_connection_manager.read().await; + let auth = sess.services.auth_manager.auth().await; + let mcp_servers = sess + .services + .mcp_manager + .effective_servers(config, auth.as_ref()) + .await; + let snapshot = collect_mcp_snapshot_from_manager( + &mcp_connection_manager, + compute_auth_statuses(mcp_servers.iter(), config.mcp_oauth_credentials_store_mode).await, + ) + .await; + let event = Event { + id: sub_id, + msg: EventMsg::McpListToolsResponse(snapshot), + }; + sess.send_event_raw(event).await; +} + +pub async fn list_skills(sess: &Session, sub_id: String, cwds: Vec, force_reload: bool) { + let default_cwd = { + let state = sess.state.lock().await; + state.session_configuration.cwd.to_path_buf() + }; + let cwds = if cwds.is_empty() { + vec![default_cwd] + } else { + cwds + }; + + let skills_manager = &sess.services.skills_manager; + let plugins_manager = &sess.services.plugins_manager; + let fs = sess + .services + .environment + .as_ref() + .map(|environment| environment.get_filesystem()); + let config = sess.get_config().await; + let codex_home = sess.codex_home().await; + let mut skills = Vec::new(); + let empty_cli_overrides: &[(String, toml::Value)] = &[]; + for cwd in cwds { + let cwd_abs = match AbsolutePathBuf::relative_to_current_dir(cwd.as_path()) { + Ok(path) => path, + Err(err) => { + let error_path = cwd.clone(); + skills.push(SkillsListEntry { + cwd, + skills: Vec::new(), + errors: vec![SkillErrorInfo { + path: error_path, + message: err.to_string(), + }], + }); + continue; + } + }; + let config_layer_stack = match load_config_layers_state( + LOCAL_FS.as_ref(), + &codex_home, + Some(cwd_abs.clone()), + empty_cli_overrides, + LoaderOverrides::default(), + CloudRequirementsLoader::default(), + ) + .await + { + Ok(config_layer_stack) => config_layer_stack, + Err(err) => { + let error_path = cwd.clone(); + skills.push(SkillsListEntry { + cwd, + skills: Vec::new(), + errors: vec![SkillErrorInfo { + path: error_path, + message: err.to_string(), + }], + }); + continue; + } + }; + let effective_skill_roots = plugins_manager + .effective_skill_roots_for_layer_stack( + &config_layer_stack, + config.features.enabled(Feature::Plugins), + ) + .await; + let skills_input = crate::SkillsLoadInput::new( + cwd_abs.clone(), + effective_skill_roots, + config_layer_stack, + config.bundled_skills_enabled(), + ); + let outcome = skills_manager + .skills_for_cwd(&skills_input, force_reload, fs.clone()) + .await; + let errors = super::errors_to_info(&outcome.errors); + let skills_metadata = super::skills_to_info(&outcome.skills, &outcome.disabled_paths); + skills.push(SkillsListEntry { + cwd, + skills: skills_metadata, + errors, + }); + } + + let event = Event { + id: sub_id, + msg: EventMsg::ListSkillsResponse(ListSkillsResponseEvent { skills }), + }; + sess.send_event_raw(event).await; +} + +pub async fn undo(sess: &Arc, sub_id: String) { + let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; + sess.spawn_task(turn_context, Vec::new(), UndoTask::new()) + .await; +} + +pub async fn compact(sess: &Arc, sub_id: String) { + let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; + + sess.spawn_task( + Arc::clone(&turn_context), + vec![UserInput::Text { + text: turn_context.compact_prompt().to_string(), + // Compaction prompt is synthesized; no UI element ranges to preserve. + text_elements: Vec::new(), + }], + CompactTask, + ) + .await; +} + +pub async fn drop_memories(sess: &Arc, config: &Arc, sub_id: String) { + let mut errors = Vec::new(); + + if let Some(state_db) = sess.services.state_db.as_deref() { + if let Err(err) = state_db.clear_memory_data().await { + errors.push(format!("failed clearing memory rows from state db: {err}")); + } + } else { + errors.push("state db unavailable; memory rows were not cleared".to_string()); + } + + if let Err(err) = crate::memories::clear_memory_roots_contents(&config.codex_home).await { + errors.push(format!( + "failed clearing memory directories under {}: {err}", + config.codex_home.display() + )); + } + + if errors.is_empty() { + let memory_root = crate::memories::memory_root(&config.codex_home); + sess.send_event_raw(Event { + id: sub_id, + msg: EventMsg::Warning(WarningEvent { + message: format!( + "Dropped memories at {} and cleared memory rows from state db.", + memory_root.display() + ), + }), + }) + .await; + return; + } + + sess.send_event_raw(Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: format!("Memory drop completed with errors: {}", errors.join("; ")), + codex_error_info: Some(CodexErrorInfo::Other), + }), + }) + .await; +} + +pub async fn update_memories(sess: &Arc, config: &Arc, sub_id: String) { + let session_source = { + let state = sess.state.lock().await; + state.session_configuration.session_source.clone() + }; + + crate::memories::start_memories_startup_task(sess, Arc::clone(config), &session_source); + + sess.send_event_raw(Event { + id: sub_id.clone(), + msg: EventMsg::Warning(WarningEvent { + message: "Memory update triggered.".to_string(), + }), + }) + .await; +} + +pub async fn thread_rollback(sess: &Arc, sub_id: String, num_turns: u32) { + if num_turns == 0 { + sess.send_event_raw(Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: "num_turns must be >= 1".to_string(), + codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), + }), + }) + .await; + return; + } + + let has_active_turn = { sess.active_turn.lock().await.is_some() }; + if has_active_turn { + sess.send_event_raw(Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: "Cannot rollback while a turn is in progress.".to_string(), + codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), + }), + }) + .await; + return; + } + + let turn_context = sess.new_default_turn_with_sub_id(sub_id).await; + let rollout_path = { + let recorder = { + let guard = sess.services.rollout.lock().await; + guard.clone() + }; + let Some(recorder) = recorder else { + sess.send_event_raw(Event { + id: turn_context.sub_id.clone(), + msg: EventMsg::Error(ErrorEvent { + message: "thread rollback requires a persisted rollout path".to_string(), + codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), + }), + }) + .await; + return; + }; + recorder.rollout_path().to_path_buf() + }; + if let Some(recorder) = { + let guard = sess.services.rollout.lock().await; + guard.clone() + } && let Err(err) = recorder.flush().await + { + sess.send_event_raw(Event { + id: turn_context.sub_id.clone(), + msg: EventMsg::Error(ErrorEvent { + message: format!( + "failed to flush rollout `{}` for rollback replay: {err}", + rollout_path.display() + ), + codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), + }), + }) + .await; + return; + } + + let initial_history = match RolloutRecorder::get_rollout_history(rollout_path.as_path()).await { + Ok(history) => history, + Err(err) => { + sess.send_event_raw(Event { + id: turn_context.sub_id.clone(), + msg: EventMsg::Error(ErrorEvent { + message: format!( + "failed to load rollout `{}` for rollback replay: {err}", + rollout_path.display() + ), + codex_error_info: Some(CodexErrorInfo::ThreadRollbackFailed), + }), + }) + .await; + return; + } + }; + + let rollback_event = ThreadRolledBackEvent { num_turns }; + let rollback_msg = EventMsg::ThreadRolledBack(rollback_event.clone()); + let replay_items = initial_history + .get_rollout_items() + .into_iter() + .chain(std::iter::once(RolloutItem::EventMsg(rollback_msg.clone()))) + .collect::>(); + sess.apply_rollout_reconstruction(turn_context.as_ref(), replay_items.as_slice()) + .await; + sess.recompute_token_usage(turn_context.as_ref()).await; + + sess.persist_rollout_items(&[RolloutItem::EventMsg(rollback_msg.clone())]) + .await; + if let Err(err) = sess.flush_rollout().await { + sess.send_event( + turn_context.as_ref(), + EventMsg::Warning(WarningEvent { + message: format!( + "Rolled the thread back, but failed to save the rollback marker. Codex will continue retrying. Error: {err}" + ), + }), + ) + .await; + } + + sess.deliver_event_raw(Event { + id: turn_context.sub_id.clone(), + msg: rollback_msg, + }) + .await; +} + +async fn persist_thread_name_update( + sess: &Arc, + event: ThreadNameUpdatedEvent, +) -> anyhow::Result { + let msg = EventMsg::ThreadNameUpdated(event); + let item = RolloutItem::EventMsg(msg.clone()); + let recorder = { + let guard = sess.services.rollout.lock().await; + guard.clone() + } + .ok_or_else(|| anyhow::anyhow!("Session persistence is disabled; cannot rename thread."))?; + recorder.persist().await?; + recorder.record_items(std::slice::from_ref(&item)).await?; + recorder.flush().await?; + Ok(msg) +} + +pub(super) async fn persist_thread_memory_mode_update( + sess: &Arc, + mode: ThreadMemoryMode, +) -> anyhow::Result<()> { + let recorder = { + let guard = sess.services.rollout.lock().await; + guard.clone() + } + .ok_or_else(|| { + anyhow::anyhow!("Session persistence is disabled; cannot update thread memory mode.") + })?; + recorder.persist().await?; + recorder.flush().await?; + + let rollout_path = recorder.rollout_path().to_path_buf(); + let mut session_meta = read_session_meta_line(rollout_path.as_path()).await?; + if session_meta.meta.id != sess.conversation_id { + anyhow::bail!( + "rollout session metadata id mismatch: expected {}, found {}", + sess.conversation_id, + session_meta.meta.id + ); + } + session_meta.meta.memory_mode = Some( + match mode { + ThreadMemoryMode::Enabled => "enabled", + ThreadMemoryMode::Disabled => "disabled", + } + .to_string(), + ); + + let item = RolloutItem::SessionMeta(session_meta); + recorder.record_items(std::slice::from_ref(&item)).await?; + recorder.flush().await?; + Ok(()) +} + +/// Persists the thread name in the rollout and state database, updates in-memory state, and +/// emits a `ThreadNameUpdated` event on success. +pub async fn set_thread_name(sess: &Arc, sub_id: String, name: String) { + let Some(name) = crate::util::normalize_thread_name(&name) else { + let event = Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: "Thread name cannot be empty.".to_string(), + codex_error_info: Some(CodexErrorInfo::BadRequest), + }), + }; + sess.send_event_raw(event).await; + return; + }; + + let updated = ThreadNameUpdatedEvent { + thread_id: sess.conversation_id, + thread_name: Some(name.clone()), + }; + + let msg = match persist_thread_name_update(sess, updated).await { + Ok(msg) => msg, + Err(err) => { + warn!("Failed to persist thread name update to rollout: {err}"); + let event = Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: err.to_string(), + codex_error_info: Some(CodexErrorInfo::Other), + }), + }; + sess.send_event_raw(event).await; + return; + } + }; + + if let Some(state_db) = sess.services.state_db.as_deref() + && let Err(err) = state_db + .update_thread_title(sess.conversation_id, &name) + .await + { + warn!("Failed to update thread title in state db: {err}"); + } + + { + let mut state = sess.state.lock().await; + state.session_configuration.thread_name = Some(name.clone()); + } + + let codex_home = sess.codex_home().await; + if let Err(err) = + crate::rollout::append_thread_name(&codex_home, sess.conversation_id, &name).await + { + warn!("Failed to update legacy thread name index: {err}"); + } + + sess.deliver_event_raw(Event { id: sub_id, msg }).await; +} + +/// Persists thread-level memory mode metadata for the active session. +/// +/// This does not involve the model and only affects whether the thread is +/// eligible for future memory generation. +pub async fn set_thread_memory_mode(sess: &Arc, sub_id: String, mode: ThreadMemoryMode) { + if let Err(err) = persist_thread_memory_mode_update(sess, mode).await { + warn!("Failed to persist thread memory mode update to rollout: {err}"); + let event = Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: err.to_string(), + codex_error_info: Some(CodexErrorInfo::Other), + }), + }; + sess.send_event_raw(event).await; + } +} + +pub async fn shutdown(sess: &Arc, sub_id: String) -> bool { + sess.abort_all_tasks(TurnAbortReason::Interrupted).await; + let _ = sess.conversation.shutdown().await; + sess.services + .unified_exec_manager + .terminate_all_processes() + .await; + sess.guardian_review_session.shutdown().await; + info!("Shutting down Codex instance"); + let history = sess.clone_history().await; + let turn_count = history + .raw_items() + .iter() + .filter(|item| is_user_turn_boundary(item)) + .count(); + sess.services.session_telemetry.counter( + "codex.conversation.turn.count", + i64::try_from(turn_count).unwrap_or(0), + &[], + ); + + // Gracefully flush and shutdown rollout recorder on session end so tests + // that inspect the rollout file do not race with the background writer. + let recorder_opt = { + let mut guard = sess.services.rollout.lock().await; + guard.take() + }; + if let Some(rec) = recorder_opt + && let Err(e) = rec.shutdown().await + { + warn!("failed to shutdown rollout recorder: {e}"); + let event = Event { + id: sub_id.clone(), + msg: EventMsg::Error(ErrorEvent { + message: "Failed to shutdown rollout recorder".to_string(), + codex_error_info: Some(CodexErrorInfo::Other), + }), + }; + sess.send_event_raw(event).await; + } + + let event = Event { + id: sub_id, + msg: EventMsg::ShutdownComplete, + }; + sess.send_event_raw(event).await; + true +} + +pub async fn review( + sess: &Arc, + config: &Arc, + sub_id: String, + review_request: ReviewRequest, +) { + let turn_context = sess.new_default_turn_with_sub_id(sub_id.clone()).await; + sess.maybe_emit_unknown_model_warning_for_turn(turn_context.as_ref()) + .await; + sess.refresh_mcp_servers_if_requested(&turn_context).await; + match resolve_review_request(review_request, &turn_context.cwd) { + Ok(resolved) => { + spawn_review_thread( + Arc::clone(sess), + Arc::clone(config), + turn_context.clone(), + sub_id, + resolved, + ) + .await; + } + Err(err) => { + let event = Event { + id: sub_id, + msg: EventMsg::Error(ErrorEvent { + message: err.to_string(), + codex_error_info: Some(CodexErrorInfo::Other), + }), + }; + sess.send_event(&turn_context, event.msg).await; + } + } +} + +pub(super) async fn submission_loop( + sess: Arc, + config: Arc, + rx_sub: Receiver, +) { + // To break out of this loop, send Op::Shutdown. + while let Ok(sub) = rx_sub.recv().await { + debug!(?sub, "Submission"); + let dispatch_span = submission_dispatch_span(&sub); + let should_exit = async { + match sub.op.clone() { + Op::Interrupt => { + interrupt(&sess).await; + false + } + Op::CleanBackgroundTerminals => { + clean_background_terminals(&sess).await; + false + } + Op::RealtimeConversationStart(params) => { + if let Err(err) = + handle_realtime_conversation_start(&sess, sub.id.clone(), params).await + { + sess.send_event_raw(Event { + id: sub.id.clone(), + msg: EventMsg::Error(ErrorEvent { + message: err.to_string(), + codex_error_info: Some(CodexErrorInfo::Other), + }), + }) + .await; + } + false + } + Op::RealtimeConversationAudio(params) => { + handle_realtime_conversation_audio(&sess, sub.id.clone(), params).await; + false + } + Op::RealtimeConversationText(params) => { + handle_realtime_conversation_text(&sess, sub.id.clone(), params).await; + false + } + Op::RealtimeConversationClose => { + handle_realtime_conversation_close(&sess, sub.id.clone()).await; + false + } + Op::RealtimeConversationListVoices => { + realtime_conversation_list_voices(&sess, sub.id.clone()).await; + false + } + Op::OverrideTurnContext { + cwd, + approval_policy, + approvals_reviewer, + sandbox_policy, + windows_sandbox_level, + model, + effort, + summary, + service_tier, + collaboration_mode, + personality, + } => { + let collaboration_mode = if let Some(collab_mode) = collaboration_mode { + collab_mode + } else { + let state = sess.state.lock().await; + state.session_configuration.collaboration_mode.with_updates( + model.clone(), + effort, + /*developer_instructions*/ None, + ) + }; + override_turn_context( + &sess, + sub.id.clone(), + SessionSettingsUpdate { + cwd, + approval_policy, + approvals_reviewer, + sandbox_policy, + windows_sandbox_level, + collaboration_mode: Some(collaboration_mode), + reasoning_summary: summary, + service_tier, + personality, + ..Default::default() + }, + ) + .await; + false + } + Op::UserInput { .. } | Op::UserTurn { .. } => { + user_input_or_turn(&sess, sub.id.clone(), sub.op).await; + false + } + Op::InterAgentCommunication { communication } => { + inter_agent_communication(&sess, sub.id.clone(), communication).await; + false + } + Op::ExecApproval { + id: approval_id, + turn_id, + decision, + } => { + exec_approval(&sess, approval_id, turn_id, decision).await; + false + } + Op::PatchApproval { id, decision } => { + patch_approval(&sess, id, decision).await; + false + } + Op::UserInputAnswer { id, response } => { + request_user_input_response(&sess, id, response).await; + false + } + Op::RequestPermissionsResponse { id, response } => { + request_permissions_response(&sess, id, response).await; + false + } + Op::DynamicToolResponse { id, response } => { + dynamic_tool_response(&sess, id, response).await; + false + } + Op::AddToHistory { text } => { + add_to_history(&sess, &config, text).await; + false + } + Op::GetHistoryEntryRequest { offset, log_id } => { + get_history_entry_request(&sess, &config, sub.id.clone(), offset, log_id).await; + false + } + Op::ListMcpTools => { + list_mcp_tools(&sess, &config, sub.id.clone()).await; + false + } + Op::RefreshMcpServers { config } => { + refresh_mcp_servers(&sess, config).await; + false + } + Op::ReloadUserConfig => { + reload_user_config(&sess).await; + false + } + Op::ListSkills { cwds, force_reload } => { + list_skills(&sess, sub.id.clone(), cwds, force_reload).await; + false + } + Op::Undo => { + undo(&sess, sub.id.clone()).await; + false + } + Op::Compact => { + compact(&sess, sub.id.clone()).await; + false + } + Op::DropMemories => { + drop_memories(&sess, &config, sub.id.clone()).await; + false + } + Op::UpdateMemories => { + update_memories(&sess, &config, sub.id.clone()).await; + false + } + Op::ThreadRollback { num_turns } => { + thread_rollback(&sess, sub.id.clone(), num_turns).await; + false + } + Op::SetThreadName { name } => { + set_thread_name(&sess, sub.id.clone(), name).await; + false + } + Op::SetThreadMemoryMode { mode } => { + set_thread_memory_mode(&sess, sub.id.clone(), mode).await; + false + } + Op::RunUserShellCommand { command } => { + run_user_shell_command(&sess, sub.id.clone(), command).await; + false + } + Op::ResolveElicitation { + server_name, + request_id, + decision, + content, + meta, + } => { + resolve_elicitation(&sess, server_name, request_id, decision, content, meta) + .await; + false + } + Op::Shutdown => shutdown(&sess, sub.id.clone()).await, + Op::Review { review_request } => { + review(&sess, &config, sub.id.clone(), review_request).await; + false + } + _ => false, // Ignore unknown ops; enum is non_exhaustive to allow extensions. + } + } + .instrument(dispatch_span) + .await; + if should_exit { + break; + } + } + // Also drain cached guardian state if the submission loop exits because + // the channel closed without receiving an explicit shutdown op. + sess.guardian_review_session.shutdown().await; + debug!("Agent loop exited"); +} + +pub(super) fn submission_dispatch_span(sub: &Submission) -> tracing::Span { + let op_name = sub.op.kind(); + let span_name = format!("op.dispatch.{op_name}"); + let dispatch_span = match &sub.op { + Op::RealtimeConversationAudio(_) => { + debug_span!( + "submission_dispatch", + otel.name = span_name.as_str(), + submission.id = sub.id.as_str(), + codex.op = op_name + ) + } + _ => info_span!( + "submission_dispatch", + otel.name = span_name.as_str(), + submission.id = sub.id.as_str(), + codex.op = op_name + ), + }; + if let Some(trace) = sub.trace.as_ref() + && !set_parent_from_w3c_trace_context(&dispatch_span, trace) + { + warn!( + submission.id = sub.id.as_str(), + "ignoring invalid submission trace carrier" + ); + } + dispatch_span +} diff --git a/codex-rs/core/src/codex/mcp.rs b/codex-rs/core/src/codex/mcp.rs new file mode 100644 index 0000000000..56628f0543 --- /dev/null +++ b/codex-rs/core/src/codex/mcp.rs @@ -0,0 +1,284 @@ +use super::*; + +impl Session { + pub async fn request_mcp_server_elicitation( + &self, + turn_context: &TurnContext, + request_id: RequestId, + params: McpServerElicitationRequestParams, + ) -> Option { + let server_name = params.server_name.clone(); + let request = match params.request { + McpServerElicitationRequest::Form { + meta, + message, + requested_schema, + } => { + let requested_schema = match serde_json::to_value(requested_schema) { + Ok(requested_schema) => requested_schema, + Err(err) => { + warn!( + "failed to serialize MCP elicitation schema for server_name: {server_name}, request_id: {request_id}: {err:#}" + ); + return None; + } + }; + codex_protocol::approvals::ElicitationRequest::Form { + meta, + message, + requested_schema, + } + } + McpServerElicitationRequest::Url { + meta, + message, + url, + elicitation_id, + } => codex_protocol::approvals::ElicitationRequest::Url { + meta, + message, + url, + elicitation_id, + }, + }; + + let (tx_response, rx_response) = oneshot::channel(); + let prev_entry = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let mut ts = at.turn_state.lock().await; + ts.insert_pending_elicitation( + server_name.clone(), + request_id.clone(), + tx_response, + ) + } + None => None, + } + }; + if prev_entry.is_some() { + warn!( + "Overwriting existing pending elicitation for server_name: {server_name}, request_id: {request_id}" + ); + } + let id = match request_id { + rmcp::model::NumberOrString::String(value) => { + codex_protocol::mcp::RequestId::String(value.to_string()) + } + rmcp::model::NumberOrString::Number(value) => { + codex_protocol::mcp::RequestId::Integer(value) + } + }; + let event = EventMsg::ElicitationRequest(ElicitationRequestEvent { + turn_id: params.turn_id, + server_name, + id, + request, + }); + self.send_event(turn_context, event).await; + rx_response.await.ok() + } + + pub async fn resolve_elicitation( + &self, + server_name: String, + id: RequestId, + response: ElicitationResponse, + ) -> anyhow::Result<()> { + let entry = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let mut ts = at.turn_state.lock().await; + ts.remove_pending_elicitation(&server_name, &id) + } + None => None, + } + }; + if let Some(tx_response) = entry { + tx_response + .send(response) + .map_err(|e| anyhow::anyhow!("failed to send elicitation response: {e:?}"))?; + return Ok(()); + } + + self.services + .mcp_connection_manager + .read() + .await + .resolve_elicitation(server_name, id, response) + .await + } + + pub async fn list_resources( + &self, + server: &str, + params: Option, + ) -> anyhow::Result { + self.services + .mcp_connection_manager + .read() + .await + .list_resources(server, params) + .await + } + + pub async fn list_resource_templates( + &self, + server: &str, + params: Option, + ) -> anyhow::Result { + self.services + .mcp_connection_manager + .read() + .await + .list_resource_templates(server, params) + .await + } + + pub async fn read_resource( + &self, + server: &str, + params: ReadResourceRequestParams, + ) -> anyhow::Result { + self.services + .mcp_connection_manager + .read() + .await + .read_resource(server, params) + .await + } + + pub async fn call_tool( + &self, + server: &str, + tool: &str, + arguments: Option, + meta: Option, + ) -> anyhow::Result { + self.services + .mcp_connection_manager + .read() + .await + .call_tool(server, tool, arguments, meta) + .await + } + + pub(crate) async fn resolve_mcp_tool_info(&self, tool_name: &ToolName) -> Option { + self.services + .mcp_connection_manager + .read() + .await + .resolve_tool_info(tool_name) + .await + } + + async fn refresh_mcp_servers_inner( + &self, + turn_context: &TurnContext, + mcp_servers: HashMap, + store_mode: OAuthCredentialsStoreMode, + ) { + let auth = self.services.auth_manager.auth().await; + let config = self.get_config().await; + let mcp_config = config + .to_mcp_config(self.services.plugins_manager.as_ref()) + .await; + let tool_plugin_provenance = self + .services + .mcp_manager + .tool_plugin_provenance(config.as_ref()) + .await; + let mcp_servers = with_codex_apps_mcp(mcp_servers, auth.as_ref(), &mcp_config); + let auth_statuses = compute_auth_statuses(mcp_servers.iter(), store_mode).await; + { + let mut guard = self.services.mcp_startup_cancellation_token.lock().await; + guard.cancel(); + *guard = CancellationToken::new(); + } + let (refreshed_manager, cancel_token) = McpConnectionManager::new( + &mcp_servers, + store_mode, + auth_statuses, + &turn_context.config.permissions.approval_policy, + turn_context.sub_id.clone(), + self.get_tx_event(), + turn_context.sandbox_policy.get().clone(), + config.codex_home.to_path_buf(), + codex_apps_tools_cache_key(auth.as_ref()), + tool_plugin_provenance, + ) + .await; + { + let mut guard = self.services.mcp_startup_cancellation_token.lock().await; + if guard.is_cancelled() { + cancel_token.cancel(); + } + *guard = cancel_token; + } + + let mut manager = self.services.mcp_connection_manager.write().await; + *manager = refreshed_manager; + } + + pub(crate) async fn refresh_mcp_servers_if_requested(&self, turn_context: &TurnContext) { + let refresh_config = { self.pending_mcp_server_refresh_config.lock().await.take() }; + let Some(refresh_config) = refresh_config else { + return; + }; + + let McpServerRefreshConfig { + mcp_servers, + mcp_oauth_credentials_store_mode, + } = refresh_config; + + let mcp_servers = + match serde_json::from_value::>(mcp_servers) { + Ok(servers) => servers, + Err(err) => { + warn!("failed to parse MCP server refresh config: {err}"); + return; + } + }; + let store_mode = match serde_json::from_value::( + mcp_oauth_credentials_store_mode, + ) { + Ok(mode) => mode, + Err(err) => { + warn!("failed to parse MCP OAuth refresh config: {err}"); + return; + } + }; + + self.refresh_mcp_servers_inner(turn_context, mcp_servers, store_mode) + .await; + } + + pub(crate) async fn refresh_mcp_servers_now( + &self, + turn_context: &TurnContext, + mcp_servers: HashMap, + store_mode: OAuthCredentialsStoreMode, + ) { + self.refresh_mcp_servers_inner(turn_context, mcp_servers, store_mode) + .await; + } + + #[cfg(test)] + pub(crate) async fn mcp_startup_cancellation_token(&self) -> CancellationToken { + self.services + .mcp_startup_cancellation_token + .lock() + .await + .clone() + } + + pub(crate) async fn cancel_mcp_startup(&self) { + self.services + .mcp_startup_cancellation_token + .lock() + .await + .cancel(); + } +} diff --git a/codex-rs/core/src/codex/review.rs b/codex-rs/core/src/codex/review.rs new file mode 100644 index 0000000000..94de4617d5 --- /dev/null +++ b/codex-rs/core/src/codex/review.rs @@ -0,0 +1,164 @@ +use super::turn_context::image_generation_tool_auth_allowed; +use super::*; + +/// Spawn a review thread using the given prompt. +pub(super) async fn spawn_review_thread( + sess: Arc, + config: Arc, + parent_turn_context: Arc, + sub_id: String, + resolved: crate::review_prompts::ResolvedReviewRequest, +) { + let model = config + .review_model + .clone() + .unwrap_or_else(|| parent_turn_context.model_info.slug.clone()); + let review_model_info = sess + .services + .models_manager + .get_model_info(&model, &config.to_models_manager_config()) + .await; + // For reviews, disable web_search and view_image regardless of global settings. + let mut review_features = sess.features.clone(); + let _ = review_features.disable(Feature::WebSearchRequest); + let _ = review_features.disable(Feature::WebSearchCached); + let review_web_search_mode = WebSearchMode::Disabled; + let tools_config = ToolsConfig::new(&ToolsConfigParams { + model_info: &review_model_info, + available_models: &sess + .services + .models_manager + .list_models(RefreshStrategy::OnlineIfUncached) + .await, + features: &review_features, + image_generation_tool_auth_allowed: image_generation_tool_auth_allowed(Some( + sess.services.auth_manager.as_ref(), + )), + web_search_mode: Some(review_web_search_mode), + session_source: parent_turn_context.session_source.clone(), + sandbox_policy: parent_turn_context.sandbox_policy.get(), + windows_sandbox_level: parent_turn_context.windows_sandbox_level, + }) + .with_unified_exec_shell_mode_for_session( + crate::tools::spec::tool_user_shell_type(sess.services.user_shell.as_ref()), + sess.services.shell_zsh_path.as_ref(), + sess.services.main_execve_wrapper_exe.as_ref(), + ) + .with_web_search_config(/*web_search_config*/ None) + .with_allow_login_shell(config.permissions.allow_login_shell) + .with_has_environment(parent_turn_context.environment.is_some()) + .with_spawn_agent_usage_hint(config.multi_agent_v2.usage_hint_enabled) + .with_spawn_agent_usage_hint_text(config.multi_agent_v2.usage_hint_text.clone()) + .with_hide_spawn_agent_metadata(config.multi_agent_v2.hide_spawn_agent_metadata) + .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( + &config.agent_roles, + )); + + let review_prompt = resolved.prompt.clone(); + let provider = parent_turn_context.provider.clone(); + let auth_manager = parent_turn_context.auth_manager.clone(); + let model_info = review_model_info.clone(); + + // Build per‑turn client with the requested model/family. + let mut per_turn_config = (*config).clone(); + per_turn_config.model = Some(model.clone()); + per_turn_config.features = review_features.clone(); + if let Err(err) = per_turn_config.web_search_mode.set(review_web_search_mode) { + let fallback_value = per_turn_config.web_search_mode.value(); + tracing::warn!( + error = %err, + ?review_web_search_mode, + ?fallback_value, + "review web_search_mode is disallowed by requirements; keeping constrained value" + ); + } + + let session_telemetry = parent_turn_context + .session_telemetry + .clone() + .with_model(model.as_str(), review_model_info.slug.as_str()); + let auth_manager_for_context = auth_manager.clone(); + let provider_for_context = provider.clone(); + let session_telemetry_for_context = session_telemetry.clone(); + let reasoning_effort = per_turn_config.model_reasoning_effort; + let reasoning_summary = per_turn_config + .model_reasoning_summary + .unwrap_or(model_info.default_reasoning_summary); + let session_source = parent_turn_context.session_source.clone(); + + let per_turn_config = Arc::new(per_turn_config); + let review_turn_id = sub_id.to_string(); + let turn_metadata_state = Arc::new(TurnMetadataState::new( + sess.conversation_id.to_string(), + &session_source, + review_turn_id.clone(), + parent_turn_context.cwd.clone(), + parent_turn_context.sandbox_policy.get(), + parent_turn_context.windows_sandbox_level, + )); + + let review_turn_context = TurnContext { + sub_id: review_turn_id, + trace_id: current_span_trace_id(), + realtime_active: parent_turn_context.realtime_active, + config: per_turn_config, + auth_manager: auth_manager_for_context, + model_info: model_info.clone(), + session_telemetry: session_telemetry_for_context, + provider: provider_for_context, + reasoning_effort, + reasoning_summary, + session_source, + environment: parent_turn_context.environment.clone(), + tools_config, + features: parent_turn_context.features.clone(), + ghost_snapshot: parent_turn_context.ghost_snapshot.clone(), + current_date: parent_turn_context.current_date.clone(), + timezone: parent_turn_context.timezone.clone(), + app_server_client_name: parent_turn_context.app_server_client_name.clone(), + developer_instructions: None, + user_instructions: None, + compact_prompt: parent_turn_context.compact_prompt.clone(), + collaboration_mode: parent_turn_context.collaboration_mode.clone(), + personality: parent_turn_context.personality, + approval_policy: parent_turn_context.approval_policy.clone(), + sandbox_policy: parent_turn_context.sandbox_policy.clone(), + file_system_sandbox_policy: parent_turn_context.file_system_sandbox_policy.clone(), + network_sandbox_policy: parent_turn_context.network_sandbox_policy, + network: parent_turn_context.network.clone(), + windows_sandbox_level: parent_turn_context.windows_sandbox_level, + shell_environment_policy: parent_turn_context.shell_environment_policy.clone(), + cwd: parent_turn_context.cwd.clone(), + final_output_json_schema: None, + codex_self_exe: parent_turn_context.codex_self_exe.clone(), + codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(), + tool_call_gate: Arc::new(ReadinessFlag::new()), + js_repl: Arc::clone(&sess.js_repl), + dynamic_tools: parent_turn_context.dynamic_tools.clone(), + truncation_policy: model_info.truncation_policy.into(), + turn_metadata_state, + turn_skills: TurnSkillsContext::new(parent_turn_context.turn_skills.outcome.clone()), + turn_timing_state: Arc::new(TurnTimingState::default()), + }; + + // Seed the child task with the review prompt as the initial user message. + let input: Vec = vec![UserInput::Text { + text: review_prompt, + // Review prompt is synthesized; no UI element ranges to preserve. + text_elements: Vec::new(), + }]; + let tc = Arc::new(review_turn_context); + tc.turn_metadata_state.spawn_git_enrichment_task(); + // TODO(ccunningham): Review turns currently rely on `spawn_task` for TurnComplete but do not + // emit a parent TurnStarted. Consider giving review a full parent turn lifecycle + // (TurnStarted + TurnComplete) for consistency with other standalone tasks. + sess.spawn_task(tc.clone(), input, ReviewTask::new()).await; + + // Announce entering review mode so UIs can switch modes. + let review_request = ReviewRequest { + target: resolved.target, + user_facing_hint: Some(resolved.user_facing_hint), + }; + sess.send_event(&tc, EventMsg::EnteredReviewMode(review_request)) + .await; +} diff --git a/codex-rs/core/src/codex/rollout_reconstruction_tests.rs b/codex-rs/core/src/codex/rollout_reconstruction_tests.rs index 753244ac2b..432cfa7a40 100644 --- a/codex-rs/core/src/codex/rollout_reconstruction_tests.rs +++ b/codex-rs/core/src/codex/rollout_reconstruction_tests.rs @@ -68,6 +68,7 @@ async fn record_initial_history_resumed_bare_turn_context_does_not_hydrate_previ approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -107,6 +108,7 @@ async fn record_initial_history_resumed_hydrates_previous_turn_settings_from_lif approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -901,6 +903,7 @@ async fn record_initial_history_resumed_turn_context_after_compaction_reestablis approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -976,6 +979,7 @@ async fn record_initial_history_resumed_turn_context_after_compaction_reestablis approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -1005,6 +1009,7 @@ async fn record_initial_history_resumed_aborted_turn_without_id_clears_active_tu approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -1117,6 +1122,7 @@ async fn record_initial_history_resumed_unmatched_abort_preserves_active_turn_fo approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: current_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -1227,6 +1233,7 @@ async fn record_initial_history_resumed_trailing_incomplete_turn_compaction_clea approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -1376,6 +1383,7 @@ async fn record_initial_history_resumed_replaced_incomplete_compacted_turn_clear approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), diff --git a/codex-rs/core/src/codex/session.rs b/codex-rs/core/src/codex/session.rs new file mode 100644 index 0000000000..766ac79ec1 --- /dev/null +++ b/codex-rs/core/src/codex/session.rs @@ -0,0 +1,844 @@ +use super::*; + +/// Context for an initialized model agent +/// +/// A session has at most 1 running task at a time, and can be interrupted by user input. +pub(crate) struct Session { + pub(crate) conversation_id: ThreadId, + pub(super) tx_event: Sender, + pub(super) agent_status: watch::Sender, + pub(super) out_of_band_elicitation_paused: watch::Sender, + pub(super) state: Mutex, + /// Serializes rebuild/apply cycles for the running proxy; each cycle + /// rebuilds from the current SessionState while holding this lock. + pub(super) managed_network_proxy_refresh_lock: Mutex<()>, + /// The set of enabled features should be invariant for the lifetime of the + /// session. + pub(super) features: ManagedFeatures, + pub(super) pending_mcp_server_refresh_config: Mutex>, + pub(crate) conversation: Arc, + pub(crate) active_turn: Mutex>, + pub(super) mailbox: Mailbox, + pub(super) mailbox_rx: Mutex, + pub(super) idle_pending_input: Mutex>, // TODO (jif) merge with mailbox! + pub(crate) guardian_review_session: GuardianReviewSessionManager, + pub(crate) services: SessionServices, + pub(super) js_repl: Arc, + pub(super) next_internal_sub_id: AtomicU64, +} + +#[derive(Clone)] +pub(crate) struct SessionConfiguration { + /// Provider identifier ("openai", "openrouter", ...). + pub(super) provider: ModelProviderInfo, + + pub(super) collaboration_mode: CollaborationMode, + pub(super) model_reasoning_summary: Option, + pub(super) service_tier: Option, + + /// Developer instructions that supplement the base instructions. + pub(super) developer_instructions: Option, + + /// Model instructions that are appended to the base instructions. + pub(super) user_instructions: Option, + + /// Personality preference for the model. + pub(super) personality: Option, + + /// Base instructions for the session. + pub(super) base_instructions: String, + + /// Compact prompt override. + pub(super) compact_prompt: Option, + + /// When to escalate for approval for execution + pub(super) approval_policy: Constrained, + pub(super) approvals_reviewer: ApprovalsReviewer, + /// How to sandbox commands executed in the system + pub(super) sandbox_policy: Constrained, + pub(super) file_system_sandbox_policy: FileSystemSandboxPolicy, + pub(super) network_sandbox_policy: NetworkSandboxPolicy, + pub(super) windows_sandbox_level: WindowsSandboxLevel, + + /// Absolute working directory that should be treated as the *root* of the + /// session. All relative paths supplied by the model as well as the + /// execution sandbox are resolved against this directory **instead** of + /// the process-wide current working directory. + pub(super) cwd: AbsolutePathBuf, + /// Directory containing all Codex state for this session. + pub(super) codex_home: AbsolutePathBuf, + /// Optional user-facing name for the thread, updated during the session. + pub(super) thread_name: Option, + + // TODO(pakrym): Remove config from here + pub(super) original_config_do_not_use: Arc, + /// Optional service name tag for session metrics. + pub(super) metrics_service_name: Option, + pub(super) app_server_client_name: Option, + pub(super) app_server_client_version: Option, + /// Source of the session (cli, vscode, exec, mcp, ...) + pub(super) session_source: SessionSource, + pub(super) dynamic_tools: Vec, + pub(super) persist_extended_history: bool, + pub(super) inherited_shell_snapshot: Option>, + pub(super) user_shell_override: Option, +} + +impl SessionConfiguration { + pub(crate) fn codex_home(&self) -> &AbsolutePathBuf { + &self.codex_home + } + + pub(super) fn thread_config_snapshot(&self) -> ThreadConfigSnapshot { + ThreadConfigSnapshot { + model: self.collaboration_mode.model().to_string(), + model_provider_id: self.original_config_do_not_use.model_provider_id.clone(), + service_tier: self.service_tier, + approval_policy: self.approval_policy.value(), + approvals_reviewer: self.approvals_reviewer, + sandbox_policy: self.sandbox_policy.get().clone(), + cwd: self.cwd.clone(), + ephemeral: self.original_config_do_not_use.ephemeral, + reasoning_effort: self.collaboration_mode.reasoning_effort(), + personality: self.personality, + session_source: self.session_source.clone(), + } + } + + pub(crate) fn apply(&self, updates: &SessionSettingsUpdate) -> ConstraintResult { + let mut next_configuration = self.clone(); + let file_system_policy_matches_legacy = self.file_system_sandbox_policy + == FileSystemSandboxPolicy::from_legacy_sandbox_policy( + self.sandbox_policy.get(), + &self.cwd, + ); + if let Some(collaboration_mode) = updates.collaboration_mode.clone() { + next_configuration.collaboration_mode = collaboration_mode; + } + if let Some(summary) = updates.reasoning_summary { + next_configuration.model_reasoning_summary = Some(summary); + } + if let Some(service_tier) = updates.service_tier { + next_configuration.service_tier = service_tier; + } + if let Some(personality) = updates.personality { + next_configuration.personality = Some(personality); + } + if let Some(approval_policy) = updates.approval_policy { + next_configuration.approval_policy.set(approval_policy)?; + } + if let Some(approvals_reviewer) = updates.approvals_reviewer { + next_configuration.approvals_reviewer = approvals_reviewer; + } + let mut sandbox_policy_changed = false; + if let Some(sandbox_policy) = updates.sandbox_policy.clone() { + next_configuration.sandbox_policy.set(sandbox_policy)?; + next_configuration.network_sandbox_policy = + NetworkSandboxPolicy::from(next_configuration.sandbox_policy.get()); + sandbox_policy_changed = true; + } + if let Some(windows_sandbox_level) = updates.windows_sandbox_level { + next_configuration.windows_sandbox_level = windows_sandbox_level; + } + + let absolute_cwd = updates + .cwd + .as_ref() + .map(|cwd| { + AbsolutePathBuf::relative_to_current_dir(normalize_for_native_workdir( + cwd.as_path(), + )) + .unwrap_or_else(|e| { + warn!("failed to normalize update cwd: {cwd:?}: {e}"); + self.cwd.clone() + }) + }) + .unwrap_or_else(|| self.cwd.clone()); + + let cwd_changed = absolute_cwd.as_path() != self.cwd.as_path(); + next_configuration.cwd = absolute_cwd; + if sandbox_policy_changed { + next_configuration.file_system_sandbox_policy = + FileSystemSandboxPolicy::from_legacy_sandbox_policy_preserving_deny_entries( + next_configuration.sandbox_policy.get(), + &next_configuration.cwd, + &self.file_system_sandbox_policy, + ); + } else if cwd_changed && file_system_policy_matches_legacy { + // Preserve richer split policies across cwd-only updates; only + // rederive when the session is already using the legacy bridge. + next_configuration.file_system_sandbox_policy = + FileSystemSandboxPolicy::from_legacy_sandbox_policy( + next_configuration.sandbox_policy.get(), + &next_configuration.cwd, + ); + } + if let Some(app_server_client_name) = updates.app_server_client_name.clone() { + next_configuration.app_server_client_name = Some(app_server_client_name); + } + if let Some(app_server_client_version) = updates.app_server_client_version.clone() { + next_configuration.app_server_client_version = Some(app_server_client_version); + } + Ok(next_configuration) + } +} + +#[derive(Default, Clone)] +pub(crate) struct SessionSettingsUpdate { + pub(crate) cwd: Option, + pub(crate) approval_policy: Option, + pub(crate) approvals_reviewer: Option, + pub(crate) sandbox_policy: Option, + pub(crate) windows_sandbox_level: Option, + pub(crate) collaboration_mode: Option, + pub(crate) reasoning_summary: Option, + pub(crate) service_tier: Option>, + pub(crate) final_output_json_schema: Option>, + pub(crate) personality: Option, + pub(crate) app_server_client_name: Option, + pub(crate) app_server_client_version: Option, +} + +pub(crate) struct AppServerClientMetadata { + pub(crate) client_name: Option, + pub(crate) client_version: Option, +} + +impl Session { + #[instrument(name = "session_init", level = "info", skip_all)] + #[allow(clippy::too_many_arguments)] + pub(crate) async fn new( + mut session_configuration: SessionConfiguration, + config: Arc, + auth_manager: Arc, + models_manager: Arc, + exec_policy: Arc, + tx_event: Sender, + agent_status: watch::Sender, + initial_history: InitialHistory, + session_source: SessionSource, + skills_manager: Arc, + plugins_manager: Arc, + mcp_manager: Arc, + skills_watcher: Arc, + agent_control: AgentControl, + environment: Option>, + analytics_events_client: Option, + ) -> anyhow::Result> { + debug!( + "Configuring session: model={}; provider={:?}", + session_configuration.collaboration_mode.model(), + session_configuration.provider + ); + let forked_from_id = initial_history.forked_from_id(); + + let (conversation_id, rollout_params) = match &initial_history { + InitialHistory::New | InitialHistory::Cleared | InitialHistory::Forked(_) => { + let conversation_id = ThreadId::default(); + ( + conversation_id, + RolloutRecorderParams::new( + conversation_id, + forked_from_id, + session_source, + BaseInstructions { + text: session_configuration.base_instructions.clone(), + }, + session_configuration.dynamic_tools.clone(), + if session_configuration.persist_extended_history { + EventPersistenceMode::Extended + } else { + EventPersistenceMode::Limited + }, + ), + ) + } + InitialHistory::Resumed(resumed_history) => ( + resumed_history.conversation_id, + RolloutRecorderParams::resume( + resumed_history.rollout_path.clone(), + if session_configuration.persist_extended_history { + EventPersistenceMode::Extended + } else { + EventPersistenceMode::Limited + }, + ), + ), + }; + let window_generation = match &initial_history { + InitialHistory::Resumed(resumed_history) => u64::try_from( + resumed_history + .history + .iter() + .filter(|item| matches!(item, RolloutItem::Compacted(_))) + .count(), + ) + .unwrap_or(u64::MAX), + InitialHistory::New | InitialHistory::Cleared | InitialHistory::Forked(_) => 0, + }; + let state_builder = match &initial_history { + InitialHistory::Resumed(resumed) => metadata::builder_from_items( + resumed.history.as_slice(), + resumed.rollout_path.as_path(), + ), + InitialHistory::New | InitialHistory::Cleared | InitialHistory::Forked(_) => None, + }; + + // Kick off independent async setup tasks in parallel to reduce startup latency. + // + // - initialize RolloutRecorder with new or resumed session info + // - perform default shell discovery + // - load history metadata (skipped for subagents) + let rollout_fut = async { + if config.ephemeral { + Ok::<_, anyhow::Error>((None, None)) + } else { + let state_db_ctx = state_db::init(&config).await; + let rollout_recorder = RolloutRecorder::new( + &config, + rollout_params, + state_db_ctx.clone(), + state_builder.clone(), + ) + .await?; + Ok((Some(rollout_recorder), state_db_ctx)) + } + } + .instrument(info_span!( + "session_init.rollout", + otel.name = "session_init.rollout", + session_init.ephemeral = config.ephemeral, + )); + + let is_subagent = matches!( + session_configuration.session_source, + SessionSource::SubAgent(_) + ); + let history_meta_fut = async { + if is_subagent { + (0, 0) + } else { + crate::message_history::history_metadata(&config).await + } + } + .instrument(info_span!( + "session_init.history_metadata", + otel.name = "session_init.history_metadata", + session_init.is_subagent = is_subagent, + )); + let auth_manager_clone = Arc::clone(&auth_manager); + let config_for_mcp = Arc::clone(&config); + let mcp_manager_for_mcp = Arc::clone(&mcp_manager); + let auth_and_mcp_fut = async move { + let auth = auth_manager_clone.auth().await; + let mcp_servers = mcp_manager_for_mcp + .effective_servers(&config_for_mcp, auth.as_ref()) + .await; + let auth_statuses = compute_auth_statuses( + mcp_servers.iter(), + config_for_mcp.mcp_oauth_credentials_store_mode, + ) + .await; + (auth, mcp_servers, auth_statuses) + } + .instrument(info_span!( + "session_init.auth_mcp", + otel.name = "session_init.auth_mcp", + )); + + // Join all independent futures. + let ( + rollout_recorder_and_state_db, + (history_log_id, history_entry_count), + (auth, mcp_servers, auth_statuses), + ) = tokio::join!(rollout_fut, history_meta_fut, auth_and_mcp_fut); + + let (rollout_recorder, state_db_ctx) = rollout_recorder_and_state_db.map_err(|e| { + error!("failed to initialize rollout recorder: {e:#}"); + e + })?; + let rollout_path = rollout_recorder + .as_ref() + .map(|rec| rec.rollout_path().to_path_buf()); + + let mut post_session_configured_events = Vec::::new(); + + for usage in config.features.legacy_feature_usages() { + post_session_configured_events.push(Event { + id: INITIAL_SUBMIT_ID.to_owned(), + msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent { + summary: usage.summary.clone(), + details: usage.details.clone(), + }), + }); + } + if crate::config::uses_deprecated_instructions_file(&config.config_layer_stack) { + post_session_configured_events.push(Event { + id: INITIAL_SUBMIT_ID.to_owned(), + msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent { + summary: "`experimental_instructions_file` is deprecated and ignored. Use `model_instructions_file` instead." + .to_string(), + details: Some( + "Move the setting to `model_instructions_file` in config.toml (or under a profile) to load instructions from a file." + .to_string(), + ), + }), + }); + } + for message in &config.startup_warnings { + post_session_configured_events.push(Event { + id: "".to_owned(), + msg: EventMsg::Warning(WarningEvent { + message: message.clone(), + }), + }); + } + let config_path = config.codex_home.join(CONFIG_TOML_FILE); + if let Some(event) = unstable_features_warning_event( + config + .config_layer_stack + .effective_config() + .get("features") + .and_then(TomlValue::as_table), + config.suppress_unstable_features_warning, + &config.features, + &config_path.display().to_string(), + ) { + post_session_configured_events.push(event); + } + if config.permissions.approval_policy.value() == AskForApproval::OnFailure { + post_session_configured_events.push(Event { + id: "".to_owned(), + msg: EventMsg::Warning(WarningEvent { + message: "`on-failure` approval policy is deprecated and will be removed in a future release. Use `on-request` for interactive approvals or `never` for non-interactive runs.".to_string(), + }), + }); + } + + let auth = auth.as_ref(); + let auth_mode = auth.map(CodexAuth::auth_mode).map(TelemetryAuthMode::from); + let account_id = auth.and_then(CodexAuth::get_account_id); + let account_email = auth.and_then(CodexAuth::get_account_email); + let originator = originator().value; + let terminal_type = user_agent(); + let session_model = session_configuration.collaboration_mode.model().to_string(); + let auth_env_telemetry = collect_auth_env_telemetry( + &session_configuration.provider, + auth_manager.codex_api_key_env_enabled(), + ); + let mut session_telemetry = SessionTelemetry::new( + conversation_id, + session_model.as_str(), + session_model.as_str(), + account_id.clone(), + account_email.clone(), + auth_mode, + originator.clone(), + config.otel.log_user_prompt, + terminal_type.clone(), + session_configuration.session_source.clone(), + ) + .with_auth_env(auth_env_telemetry.to_otel_metadata()); + if let Some(service_name) = session_configuration.metrics_service_name.as_deref() { + session_telemetry = session_telemetry.with_metrics_service_name(service_name); + } + let network_proxy_audit_metadata = NetworkProxyAuditMetadata { + conversation_id: Some(conversation_id.to_string()), + app_version: Some(env!("CARGO_PKG_VERSION").to_string()), + user_account_id: account_id, + auth_mode: auth_mode.map(|mode| mode.to_string()), + originator: Some(originator), + user_email: account_email, + terminal_type: Some(terminal_type), + model: Some(session_model.clone()), + slug: Some(session_model), + }; + config.features.emit_metrics(&session_telemetry); + session_telemetry.counter( + THREAD_STARTED_METRIC, + /*inc*/ 1, + &[( + "is_git", + if get_git_repo_root(&session_configuration.cwd).is_some() { + "true" + } else { + "false" + }, + )], + ); + + session_telemetry.conversation_starts( + config.model_provider.name.as_str(), + session_configuration.collaboration_mode.reasoning_effort(), + config + .model_reasoning_summary + .unwrap_or(ReasoningSummaryConfig::Auto), + config.model_context_window, + config.model_auto_compact_token_limit, + config.permissions.approval_policy.value(), + config.permissions.sandbox_policy.get().clone(), + mcp_servers.keys().map(String::as_str).collect(), + config.active_profile.clone(), + ); + + let use_zsh_fork_shell = config.features.enabled(Feature::ShellZshFork); + let mut default_shell = if let Some(user_shell_override) = + session_configuration.user_shell_override.clone() + { + user_shell_override + } else if use_zsh_fork_shell { + let zsh_path = config.zsh_path.as_ref().ok_or_else(|| { + anyhow::anyhow!( + "zsh fork feature enabled, but `zsh_path` is not configured; set `zsh_path` in config.toml" + ) + })?; + let zsh_path = zsh_path.to_path_buf(); + shell::get_shell(shell::ShellType::Zsh, Some(&zsh_path)).ok_or_else(|| { + anyhow::anyhow!( + "zsh fork feature enabled, but zsh_path `{}` is not usable; set `zsh_path` to a valid zsh executable", + zsh_path.display() + ) + })? + } else { + shell::default_user_shell() + }; + // Create the mutable state for the Session. + let shell_snapshot_tx = if config.features.enabled(Feature::ShellSnapshot) { + if let Some(snapshot) = session_configuration.inherited_shell_snapshot.clone() { + let (tx, rx) = watch::channel(Some(snapshot)); + default_shell.shell_snapshot = rx; + tx + } else { + ShellSnapshot::start_snapshotting( + config.codex_home.clone(), + conversation_id, + session_configuration.cwd.clone(), + &mut default_shell, + session_telemetry.clone(), + ) + } + } else { + let (tx, rx) = watch::channel(None); + default_shell.shell_snapshot = rx; + tx + }; + let thread_name = + thread_title_from_state_db(state_db_ctx.as_ref(), &config.codex_home, conversation_id) + .instrument(info_span!( + "session_init.thread_name_lookup", + otel.name = "session_init.thread_name_lookup", + )) + .await; + session_configuration.thread_name = thread_name.clone(); + let state = SessionState::new(session_configuration.clone()); + let managed_network_requirements_configured = config + .config_layer_stack + .requirements_toml() + .network + .is_some(); + let managed_network_requirements_enabled = config.managed_network_requirements_enabled(); + let network_approval = Arc::new(NetworkApprovalService::default()); + // The managed proxy can call back into core for allowlist-miss decisions. + let network_policy_decider_session = if managed_network_requirements_configured { + config + .permissions + .network + .as_ref() + .map(|_| Arc::new(RwLock::new(std::sync::Weak::::new()))) + } else { + None + }; + let blocked_request_observer = if managed_network_requirements_configured { + config + .permissions + .network + .as_ref() + .map(|_| build_blocked_request_observer(Arc::clone(&network_approval))) + } else { + None + }; + let network_policy_decider = + network_policy_decider_session + .as_ref() + .map(|network_policy_decider_session| { + build_network_policy_decider( + Arc::clone(&network_approval), + Arc::clone(network_policy_decider_session), + ) + }); + let (network_proxy, session_network_proxy) = + if let Some(spec) = config.permissions.network.as_ref() { + let current_exec_policy = exec_policy.current(); + let (network_proxy, session_network_proxy) = Self::start_managed_network_proxy( + spec, + current_exec_policy.as_ref(), + config.permissions.sandbox_policy.get(), + network_policy_decider.as_ref().map(Arc::clone), + blocked_request_observer.as_ref().map(Arc::clone), + managed_network_requirements_configured, + network_proxy_audit_metadata, + ) + .instrument(info_span!( + "session_init.network_proxy", + otel.name = "session_init.network_proxy", + session_init.managed_network_requirements_enabled = + managed_network_requirements_enabled, + )) + .await?; + (Some(network_proxy), Some(session_network_proxy)) + } else { + (None, None) + }; + + let mut hook_shell_argv = + default_shell.derive_exec_args("", /*use_login_shell*/ false); + let hook_shell_program = hook_shell_argv.remove(0); + let _ = hook_shell_argv.pop(); + let hooks = Hooks::new(HooksConfig { + legacy_notify_argv: config.notify.clone(), + feature_enabled: config.features.enabled(Feature::CodexHooks), + config_layer_stack: Some(config.config_layer_stack.clone()), + shell_program: Some(hook_shell_program), + shell_args: hook_shell_argv, + }); + for warning in hooks.startup_warnings() { + post_session_configured_events.push(Event { + id: INITIAL_SUBMIT_ID.to_owned(), + msg: EventMsg::Warning(WarningEvent { + message: warning.clone(), + }), + }); + } + + let installation_id = resolve_installation_id(&config.codex_home).await?; + let analytics_events_client = analytics_events_client.unwrap_or_else(|| { + AnalyticsEventsClient::new( + Arc::clone(&auth_manager), + config.chatgpt_base_url.trim_end_matches('/').to_string(), + config.analytics_enabled, + ) + }); + let services = SessionServices { + // Initialize the MCP connection manager with an uninitialized + // instance. It will be replaced with one created via + // McpConnectionManager::new() once all its constructor args are + // available. This also ensures `SessionConfigured` is emitted + // before any MCP-related events. It is reasonable to consider + // changing this to use Option or OnceCell, though the current + // setup is straightforward enough and performs well. + mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::new_uninitialized( + &config.permissions.approval_policy, + &config.permissions.sandbox_policy, + ))), + mcp_startup_cancellation_token: Mutex::new(CancellationToken::new()), + unified_exec_manager: UnifiedExecProcessManager::new( + config.background_terminal_max_timeout, + ), + shell_zsh_path: config.zsh_path.clone(), + main_execve_wrapper_exe: config.main_execve_wrapper_exe.clone(), + analytics_events_client, + hooks, + rollout: Mutex::new(rollout_recorder), + user_shell: Arc::new(default_shell), + agent_identity_manager: Arc::new(AgentIdentityManager::new( + config.as_ref(), + Arc::clone(&auth_manager), + session_configuration.session_source.clone(), + )), + shell_snapshot_tx, + show_raw_agent_reasoning: config.show_raw_agent_reasoning, + exec_policy, + auth_manager: Arc::clone(&auth_manager), + session_telemetry, + models_manager: Arc::clone(&models_manager), + tool_approvals: Mutex::new(ApprovalStore::default()), + guardian_rejections: Mutex::new(HashMap::new()), + skills_manager, + plugins_manager: Arc::clone(&plugins_manager), + mcp_manager: Arc::clone(&mcp_manager), + skills_watcher, + agent_control, + network_proxy, + network_approval: Arc::clone(&network_approval), + state_db: state_db_ctx.clone(), + thread_store: LocalThreadStore::new(RolloutConfig::from_view(config.as_ref())), + model_client: ModelClient::new( + Some(Arc::clone(&auth_manager)), + conversation_id, + installation_id, + session_configuration.provider.clone(), + session_configuration.session_source.clone(), + config.model_verbosity, + config.features.enabled(Feature::EnableRequestCompression), + config.features.enabled(Feature::RuntimeMetrics), + Self::build_model_client_beta_features_header(config.as_ref()), + ), + code_mode_service: crate::tools::code_mode::CodeModeService::new( + config.js_repl_node_path.clone(), + ), + environment, + }; + services + .model_client + .set_window_generation(window_generation); + let js_repl = Arc::new(JsReplHandle::with_node_path( + config.js_repl_node_path.clone(), + config.js_repl_node_module_dirs.clone(), + )); + let (out_of_band_elicitation_paused, _out_of_band_elicitation_paused_rx) = + watch::channel(false); + + let (mailbox, mailbox_rx) = Mailbox::new(); + let sess = Arc::new(Session { + conversation_id, + tx_event: tx_event.clone(), + agent_status, + out_of_band_elicitation_paused, + state: Mutex::new(state), + managed_network_proxy_refresh_lock: Mutex::new(()), + features: config.features.clone(), + pending_mcp_server_refresh_config: Mutex::new(None), + conversation: Arc::new(RealtimeConversationManager::new()), + active_turn: Mutex::new(None), + mailbox, + mailbox_rx: Mutex::new(mailbox_rx), + idle_pending_input: Mutex::new(Vec::new()), + guardian_review_session: GuardianReviewSessionManager::default(), + services, + js_repl, + next_internal_sub_id: AtomicU64::new(0), + }); + if let Some(network_policy_decider_session) = network_policy_decider_session { + let mut guard = network_policy_decider_session.write().await; + *guard = Arc::downgrade(&sess); + } + // Dispatch the SessionConfiguredEvent first and then report any errors. + // If resuming, include converted initial messages in the payload so UIs can render them immediately. + let initial_messages = initial_history.get_event_msgs(); + let events = std::iter::once(Event { + id: INITIAL_SUBMIT_ID.to_owned(), + msg: EventMsg::SessionConfigured(SessionConfiguredEvent { + session_id: conversation_id, + forked_from_id, + thread_name: session_configuration.thread_name.clone(), + model: session_configuration.collaboration_mode.model().to_string(), + model_provider_id: config.model_provider_id.clone(), + service_tier: session_configuration.service_tier, + approval_policy: session_configuration.approval_policy.value(), + approvals_reviewer: session_configuration.approvals_reviewer, + sandbox_policy: session_configuration.sandbox_policy.get().clone(), + cwd: session_configuration.cwd.clone(), + reasoning_effort: session_configuration.collaboration_mode.reasoning_effort(), + history_log_id, + history_entry_count, + initial_messages, + network_proxy: session_network_proxy.filter(|_| { + Self::managed_network_proxy_active_for_sandbox_policy( + session_configuration.sandbox_policy.get(), + ) + }), + rollout_path, + }), + }) + .chain(post_session_configured_events.into_iter()); + for event in events { + sess.send_event_raw(event).await; + } + + // Start the watcher after SessionConfigured so it cannot emit earlier events. + sess.start_skills_watcher_listener(); + sess.start_agent_identity_registration(); + let mut required_mcp_servers: Vec = mcp_servers + .iter() + .filter(|(_, server)| server.enabled && server.required) + .map(|(name, _)| name.clone()) + .collect(); + required_mcp_servers.sort(); + let enabled_mcp_server_count = mcp_servers.values().filter(|server| server.enabled).count(); + let required_mcp_server_count = required_mcp_servers.len(); + let tool_plugin_provenance = mcp_manager.tool_plugin_provenance(config.as_ref()).await; + { + let mut cancel_guard = sess.services.mcp_startup_cancellation_token.lock().await; + cancel_guard.cancel(); + *cancel_guard = CancellationToken::new(); + } + let (mcp_connection_manager, cancel_token) = McpConnectionManager::new( + &mcp_servers, + config.mcp_oauth_credentials_store_mode, + auth_statuses.clone(), + &session_configuration.approval_policy, + INITIAL_SUBMIT_ID.to_owned(), + tx_event.clone(), + session_configuration.sandbox_policy.get().clone(), + config.codex_home.to_path_buf(), + codex_apps_tools_cache_key(auth), + tool_plugin_provenance, + ) + .instrument(info_span!( + "session_init.mcp_manager_init", + otel.name = "session_init.mcp_manager_init", + session_init.enabled_mcp_server_count = enabled_mcp_server_count, + session_init.required_mcp_server_count = required_mcp_server_count, + )) + .await; + { + let mut manager_guard = sess.services.mcp_connection_manager.write().await; + *manager_guard = mcp_connection_manager; + } + { + let mut cancel_guard = sess.services.mcp_startup_cancellation_token.lock().await; + if cancel_guard.is_cancelled() { + cancel_token.cancel(); + } + *cancel_guard = cancel_token; + } + if !required_mcp_servers.is_empty() { + let failures = sess + .services + .mcp_connection_manager + .read() + .await + .required_startup_failures(&required_mcp_servers) + .instrument(info_span!( + "session_init.required_mcp_wait", + otel.name = "session_init.required_mcp_wait", + session_init.required_mcp_server_count = required_mcp_server_count, + )) + .await; + if !failures.is_empty() { + let details = failures + .iter() + .map(|failure| format!("{}: {}", failure.server, failure.error)) + .collect::>() + .join("; "); + return Err(anyhow::anyhow!( + "required MCP servers failed to initialize: {details}" + )); + } + } + sess.schedule_startup_prewarm(session_configuration.base_instructions.clone()) + .await; + let session_start_source = match &initial_history { + InitialHistory::Resumed(_) => codex_hooks::SessionStartSource::Resume, + InitialHistory::New | InitialHistory::Forked(_) => { + codex_hooks::SessionStartSource::Startup + } + InitialHistory::Cleared => codex_hooks::SessionStartSource::Clear, + }; + + // record_initial_history can emit events. We record only after the SessionConfiguredEvent is emitted. + sess.record_initial_history(initial_history).await; + { + let mut state = sess.state.lock().await; + state.set_pending_session_start_source(Some(session_start_source)); + } + + memories::start_memories_startup_task( + &sess, + Arc::clone(&config), + &session_configuration.session_source, + ); + + Ok(sess) + } +} diff --git a/codex-rs/core/src/codex/turn.rs b/codex-rs/core/src/codex/turn.rs new file mode 100644 index 0000000000..69f6641c3e --- /dev/null +++ b/codex-rs/core/src/codex/turn.rs @@ -0,0 +1,2263 @@ +use crate::SkillInjections; +use crate::SkillLoadOutcome; +use crate::build_skill_injections; +use crate::client::ModelClientSession; +use crate::client_common::Prompt; +use crate::client_common::ResponseEvent; +use crate::codex::PreviousTurnSettings; +use crate::codex::Session; +use crate::codex::TurnContext; +use crate::collect_env_var_dependencies; +use crate::collect_explicit_skill_mentions; +use crate::compact::InitialContextInjection; +use crate::compact::collect_user_messages; +use crate::compact::run_inline_auto_compact_task; +use crate::compact::should_use_remote_compact_task; +use crate::compact_remote::run_inline_remote_auto_compact_task; +use crate::connectors; +use crate::feedback_tags; +use crate::hook_runtime::PendingInputHookDisposition; +use crate::hook_runtime::emit_hook_completed_events; +use crate::hook_runtime::inspect_pending_input; +use crate::hook_runtime::record_additional_contexts; +use crate::hook_runtime::record_pending_input; +use crate::hook_runtime::run_pending_session_start_hooks; +use crate::hook_runtime::run_user_prompt_submit_hooks; +use crate::injection::ToolMentionKind; +use crate::injection::app_id_from_path; +use crate::injection::tool_kind_for_path; +use crate::mcp_skill_dependencies::maybe_prompt_and_install_mcp_dependencies; +use crate::mcp_tool_exposure::build_mcp_tool_exposure; +use crate::mentions::build_connector_slug_counts; +use crate::mentions::build_skill_name_counts; +use crate::mentions::collect_explicit_app_ids; +use crate::mentions::collect_explicit_plugin_mentions; +use crate::mentions::collect_tool_mentions_from_messages; +use crate::parse_turn_item; +use crate::plugins::build_plugin_injections; +use crate::resolve_skill_dependencies_for_turn; +use crate::stream_events_utils::HandleOutputCtx; +use crate::stream_events_utils::handle_non_tool_response_item; +use crate::stream_events_utils::handle_output_item_done; +use crate::stream_events_utils::last_assistant_message_from_item; +use crate::stream_events_utils::mark_thread_memory_mode_polluted_if_external_context; +use crate::stream_events_utils::raw_assistant_output_text_from_item; +use crate::stream_events_utils::record_completed_response_item; +use crate::tools::ToolRouter; +use crate::tools::context::SharedTurnDiffTracker; +use crate::tools::parallel::ToolCallRuntime; +use crate::tools::registry::ToolArgumentDiffConsumer; +use crate::tools::router::ToolRouterParams; +use crate::turn_diff_tracker::TurnDiffTracker; +use crate::turn_timing::record_turn_ttft_metric; +use crate::unavailable_tool::collect_unavailable_called_tools; +use crate::util::backoff; +use crate::util::error_or_panic; +use codex_analytics::AppInvocation; +use codex_analytics::CompactionPhase; +use codex_analytics::CompactionReason; +use codex_analytics::InvocationType; +use codex_analytics::TurnResolvedConfigFact; +use codex_analytics::build_track_events_context; +use codex_async_utils::OrCancelExt; +use codex_features::Feature; +use codex_hooks::HookEvent; +use codex_hooks::HookEventAfterAgent; +use codex_hooks::HookPayload; +use codex_hooks::HookResult; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::ToolInfo; +use codex_protocol::config_types::ModeKind; +use codex_protocol::error::CodexErr; +use codex_protocol::error::Result as CodexResult; +use codex_protocol::items::PlanItem; +use codex_protocol::items::TurnItem; +use codex_protocol::items::UserMessageItem; +use codex_protocol::items::build_hook_prompt_message; +use codex_protocol::models::BaseInstructions; +use codex_protocol::models::ContentItem; +use codex_protocol::models::ResponseInputItem; +use codex_protocol::models::ResponseItem; +use codex_protocol::protocol::AgentMessageContentDeltaEvent; +use codex_protocol::protocol::AgentReasoningSectionBreakEvent; +use codex_protocol::protocol::AskForApproval; +use codex_protocol::protocol::CodexErrorInfo; +use codex_protocol::protocol::ErrorEvent; +use codex_protocol::protocol::EventMsg; +use codex_protocol::protocol::PlanDeltaEvent; +use codex_protocol::protocol::ReasoningContentDeltaEvent; +use codex_protocol::protocol::ReasoningRawContentDeltaEvent; +use codex_protocol::protocol::TurnDiffEvent; +use codex_protocol::protocol::WarningEvent; +use codex_protocol::user_input::UserInput; +use codex_tools::ToolName; +use codex_tools::filter_tool_suggest_discoverable_tools_for_client; +use codex_utils_stream_parser::AssistantTextChunk; +use codex_utils_stream_parser::AssistantTextStreamParser; +use codex_utils_stream_parser::ProposedPlanSegment; +use codex_utils_stream_parser::extract_proposed_plan_text; +use codex_utils_stream_parser::strip_citations; +use futures::future::BoxFuture; +use futures::prelude::*; +use futures::stream::FuturesOrdered; +use std::collections::HashMap; +use std::collections::HashSet; +use std::sync::Arc; +use std::time::Duration; +use tokio_util::sync::CancellationToken; +use tracing::Instrument; +use tracing::error; +use tracing::field; +use tracing::info; +use tracing::instrument; +use tracing::trace; +use tracing::trace_span; +use tracing::warn; + +const EXPLICIT_APPS_READY_TIMEOUT: Duration = Duration::from_secs(3); + +// Explicit plugin mentions imply app usage even when the user did not +// mention the app directly. If those connectors are still missing from the +// current `codex_apps` snapshot, give startup a bounded chance to finish. +#[cfg_attr(test, allow(dead_code))] +pub(super) fn explicitly_enabled_connectors_missing_from_tools( + connector_ids: &HashSet, + mcp_tools: &HashMap, +) -> bool { + let accessible_connector_ids = connectors::accessible_connectors_from_mcp_tools(mcp_tools) + .into_iter() + .map(|connector| connector.id) + .collect::>(); + + connector_ids + .iter() + .any(|connector_id| !accessible_connector_ids.contains(connector_id)) +} + +/// Takes a user message as input and runs a loop where, at each sampling request, the model +/// replies with either: +/// +/// - requested function calls +/// - an assistant message +/// +/// While it is possible for the model to return multiple of these items in a +/// single sampling request, in practice, we generally one item per sampling request: +/// +/// - If the model requests a function call, we execute it and send the output +/// back to the model in the next sampling request. +/// - If the model sends only an assistant message, we record it in the +/// conversation history and consider the turn complete. +/// +pub(crate) async fn run_turn( + sess: Arc, + turn_context: Arc, + input: Vec, + prewarmed_client_session: Option, + cancellation_token: CancellationToken, +) -> Option { + if input.is_empty() && !sess.has_pending_input().await { + return None; + } + + let model_info = turn_context.model_info.clone(); + let auto_compact_limit = model_info.auto_compact_token_limit().unwrap_or(i64::MAX); + let mut prewarmed_client_session = prewarmed_client_session; + // TODO(ccunningham): Pre-turn compaction runs before context updates and the + // new user message are recorded. Estimate pending incoming items (context + // diffs/full reinjection + user input) and trigger compaction preemptively + // when they would push the thread over the compaction threshold. + let pre_sampling_compacted = match run_pre_sampling_compact(&sess, &turn_context).await { + Ok(pre_sampling_compacted) => pre_sampling_compacted, + Err(_) => { + error!("Failed to run pre-sampling compact"); + return None; + } + }; + if pre_sampling_compacted && let Some(mut client_session) = prewarmed_client_session.take() { + client_session.reset_websocket_session(); + } + + let skills_outcome = Some(turn_context.turn_skills.outcome.as_ref()); + + sess.record_context_updates_and_set_reference_context_item(turn_context.as_ref()) + .await; + + let loaded_plugins = sess + .services + .plugins_manager + .plugins_for_config(&turn_context.config) + .await; + // Structured plugin:// mentions are resolved from the current session's + // enabled plugins, then converted into turn-scoped guidance below. + let mentioned_plugins = + collect_explicit_plugin_mentions(&input, loaded_plugins.capability_summaries()); + let mut explicitly_enabled_connectors = collect_explicit_app_ids(&input); + if turn_context.apps_enabled() { + // Treat app connectors declared by explicit plugin mentions as + // explicit for this turn too. That lets them participate in both + // startup waiting and first-turn tool exposure. + explicitly_enabled_connectors.extend(mentioned_plugins.iter().flat_map(|plugin| { + plugin + .app_connector_ids + .iter() + .map(|connector_id| connector_id.0.clone()) + })); + } + let mcp_tools = if turn_context.apps_enabled() || !mentioned_plugins.is_empty() { + // Plugin mentions need raw MCP/app inventory even when app tools + // are normally hidden so we can describe the plugin's currently + // usable capabilities for this turn. + let mcp_connection_manager = sess.services.mcp_connection_manager.read().await; + let mut mcp_tools = match mcp_connection_manager + .list_all_tools() + .or_cancel(&cancellation_token) + .await + { + Ok(mcp_tools) => mcp_tools, + Err(_) if turn_context.apps_enabled() => return None, + Err(_) => HashMap::new(), + }; + if turn_context.apps_enabled() + && !explicitly_enabled_connectors.is_empty() + && explicitly_enabled_connectors_missing_from_tools( + &explicitly_enabled_connectors, + &mcp_tools, + ) + { + // The caller explicitly asked for one of these app-backed surfaces, + // but the first snapshot still does not expose it, so wait + // briefly and then rebuild the tool view for this turn. + let codex_apps_ready = match mcp_connection_manager + .wait_for_server_ready(CODEX_APPS_MCP_SERVER_NAME, EXPLICIT_APPS_READY_TIMEOUT) + .or_cancel(&cancellation_token) + .await + { + Ok(codex_apps_ready) => codex_apps_ready, + Err(_) => return None, + }; + if codex_apps_ready { + mcp_tools = match mcp_connection_manager + .list_all_tools() + .or_cancel(&cancellation_token) + .await + { + Ok(mcp_tools) => mcp_tools, + Err(_) => return None, + }; + } + } + mcp_tools + } else { + HashMap::new() + }; + let available_connectors = if turn_context.apps_enabled() { + let connectors = codex_connectors::merge::merge_plugin_connectors_with_accessible( + loaded_plugins + .effective_apps() + .into_iter() + .map(|connector_id| connector_id.0), + connectors::accessible_connectors_from_mcp_tools(&mcp_tools), + ); + connectors::with_app_enabled_state(connectors, &turn_context.config) + } else { + Vec::new() + }; + let connector_slug_counts = build_connector_slug_counts(&available_connectors); + let skill_name_counts_lower = skills_outcome + .as_ref() + .map_or_else(HashMap::new, |outcome| { + build_skill_name_counts(&outcome.skills, &outcome.disabled_paths).1 + }); + let mentioned_skills = skills_outcome.as_ref().map_or_else(Vec::new, |outcome| { + collect_explicit_skill_mentions( + &input, + &outcome.skills, + &outcome.disabled_paths, + &connector_slug_counts, + ) + }); + let config = turn_context.config.clone(); + if config + .features + .enabled(Feature::SkillEnvVarDependencyPrompt) + { + let env_var_dependencies = collect_env_var_dependencies(&mentioned_skills); + resolve_skill_dependencies_for_turn(&sess, &turn_context, &env_var_dependencies).await; + } + + maybe_prompt_and_install_mcp_dependencies( + sess.as_ref(), + turn_context.as_ref(), + &cancellation_token, + &mentioned_skills, + ) + .await; + + let session_telemetry = turn_context.session_telemetry.clone(); + let thread_id = sess.conversation_id.to_string(); + let tracking = build_track_events_context( + turn_context.model_info.slug.clone(), + thread_id, + turn_context.sub_id.clone(), + ); + let SkillInjections { + items: skill_items, + warnings: skill_warnings, + } = build_skill_injections( + &mentioned_skills, + skills_outcome, + Some(&session_telemetry), + &sess.services.analytics_events_client, + tracking.clone(), + ) + .await; + + for message in skill_warnings { + sess.send_event(&turn_context, EventMsg::Warning(WarningEvent { message })) + .await; + } + + let plugin_items = + build_plugin_injections(&mentioned_plugins, &mcp_tools, &available_connectors); + let mentioned_plugin_metadata = mentioned_plugins + .iter() + .filter_map(crate::plugins::PluginCapabilitySummary::telemetry_metadata) + .collect::>(); + + explicitly_enabled_connectors.extend(collect_explicit_app_ids_from_skill_items( + &skill_items, + &available_connectors, + &skill_name_counts_lower, + )); + let connector_names_by_id = available_connectors + .iter() + .map(|connector| (connector.id.as_str(), connector.name.as_str())) + .collect::>(); + let mentioned_app_invocations = explicitly_enabled_connectors + .iter() + .map(|connector_id| AppInvocation { + connector_id: Some(connector_id.clone()), + app_name: connector_names_by_id + .get(connector_id.as_str()) + .map(|name| (*name).to_string()), + invocation_type: Some(InvocationType::Explicit), + }) + .collect::>(); + + if run_pending_session_start_hooks(&sess, &turn_context).await { + return None; + } + let additional_contexts = if input.is_empty() { + Vec::new() + } else { + let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input.clone()); + let response_item: ResponseItem = initial_input_for_turn.clone().into(); + let user_prompt_submit_outcome = run_user_prompt_submit_hooks( + &sess, + &turn_context, + UserMessageItem::new(&input).message(), + ) + .await; + if user_prompt_submit_outcome.should_stop { + record_additional_contexts( + &sess, + &turn_context, + user_prompt_submit_outcome.additional_contexts, + ) + .await; + return None; + } + sess.record_user_prompt_and_emit_turn_item(turn_context.as_ref(), &input, response_item) + .await; + user_prompt_submit_outcome.additional_contexts + }; + sess.services + .analytics_events_client + .track_app_mentioned(tracking.clone(), mentioned_app_invocations); + for plugin in mentioned_plugin_metadata { + sess.services + .analytics_events_client + .track_plugin_used(tracking.clone(), plugin); + } + sess.merge_connector_selection(explicitly_enabled_connectors.clone()) + .await; + record_additional_contexts(&sess, &turn_context, additional_contexts).await; + if !input.is_empty() { + // Track the previous-turn baseline from the regular user-turn path only so + // standalone tasks (compact/shell/review/undo) cannot suppress future + // model/realtime injections. + sess.set_previous_turn_settings(Some(PreviousTurnSettings { + model: turn_context.model_info.slug.clone(), + realtime_active: Some(turn_context.realtime_active), + })) + .await; + } + if let Err(error) = sess.ensure_agent_task_registered().await { + warn!(error = %error, "agent task registration failed"); + sess.send_event( + turn_context.as_ref(), + EventMsg::Error(ErrorEvent { + message: format!( + "Agent task registration failed. Please try again; Codex will attempt to register the task again on the next turn: {error}" + ), + codex_error_info: Some(CodexErrorInfo::Other), + }), + ) + .await; + return None; + } + + if !skill_items.is_empty() { + sess.record_conversation_items(&turn_context, &skill_items) + .await; + } + if !plugin_items.is_empty() { + sess.record_conversation_items(&turn_context, &plugin_items) + .await; + } + + track_turn_resolved_config_analytics(&sess, &turn_context, &input).await; + + let skills_outcome = Some(turn_context.turn_skills.outcome.as_ref()); + sess.maybe_start_ghost_snapshot(Arc::clone(&turn_context), cancellation_token.child_token()) + .await; + let mut last_agent_message: Option = None; + let mut stop_hook_active = false; + // Although from the perspective of codex.rs, TurnDiffTracker has the lifecycle of a Task which contains + // many turns, from the perspective of the user, it is a single turn. + let turn_diff_tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new())); + let mut server_model_warning_emitted_for_turn = false; + + // `ModelClientSession` is turn-scoped and caches WebSocket + sticky routing state, so we reuse + // one instance across retries within this turn. + let mut client_session = + prewarmed_client_session.unwrap_or_else(|| sess.services.model_client.new_session()); + // Pending input is drained into history before building the next model request. + // However, we defer that drain until after sampling in two cases: + // 1. At the start of a turn, so the fresh user prompt in `input` gets sampled first. + // 2. After auto-compact, when model/tool continuation needs to resume before any steer. + let mut can_drain_pending_input = input.is_empty(); + + loop { + if run_pending_session_start_hooks(&sess, &turn_context).await { + break; + } + + // Note that pending_input would be something like a message the user + // submitted through the UI while the model was running. Though the UI + // may support this, the model might not. + let pending_input = if can_drain_pending_input { + sess.get_pending_input().await + } else { + Vec::new() + }; + + let mut blocked_pending_input = false; + let mut blocked_pending_input_contexts = Vec::new(); + let mut requeued_pending_input = false; + let mut accepted_pending_input = Vec::new(); + if !pending_input.is_empty() { + let mut pending_input_iter = pending_input.into_iter(); + while let Some(pending_input_item) = pending_input_iter.next() { + match inspect_pending_input(&sess, &turn_context, pending_input_item).await { + PendingInputHookDisposition::Accepted(pending_input) => { + accepted_pending_input.push(*pending_input); + } + PendingInputHookDisposition::Blocked { + additional_contexts, + } => { + let remaining_pending_input = pending_input_iter.collect::>(); + if !remaining_pending_input.is_empty() { + let _ = sess.prepend_pending_input(remaining_pending_input).await; + requeued_pending_input = true; + } + blocked_pending_input_contexts = additional_contexts; + blocked_pending_input = true; + break; + } + } + } + } + + let has_accepted_pending_input = !accepted_pending_input.is_empty(); + for pending_input in accepted_pending_input { + record_pending_input(&sess, &turn_context, pending_input).await; + } + record_additional_contexts(&sess, &turn_context, blocked_pending_input_contexts).await; + + if blocked_pending_input && !has_accepted_pending_input { + if requeued_pending_input { + continue; + } + break; + } + + // Construct the input that we will send to the model. + let sampling_request_input: Vec = { + sess.clone_history() + .await + .for_prompt(&turn_context.model_info.input_modalities) + }; + + let sampling_request_input_messages = sampling_request_input + .iter() + .filter_map(|item| match parse_turn_item(item) { + Some(TurnItem::UserMessage(user_message)) => Some(user_message), + _ => None, + }) + .map(|user_message| user_message.message()) + .collect::>(); + let turn_metadata_header = turn_context.turn_metadata_state.current_header_value(); + match run_sampling_request( + Arc::clone(&sess), + Arc::clone(&turn_context), + Arc::clone(&turn_diff_tracker), + &mut client_session, + turn_metadata_header.as_deref(), + sampling_request_input, + &explicitly_enabled_connectors, + skills_outcome, + &mut server_model_warning_emitted_for_turn, + cancellation_token.child_token(), + ) + .await + { + Ok(sampling_request_output) => { + let SamplingRequestResult { + needs_follow_up: model_needs_follow_up, + last_agent_message: sampling_request_last_agent_message, + } = sampling_request_output; + can_drain_pending_input = true; + let has_pending_input = sess.has_pending_input().await; + let needs_follow_up = model_needs_follow_up || has_pending_input; + let total_usage_tokens = sess.get_total_token_usage().await; + let token_limit_reached = total_usage_tokens >= auto_compact_limit; + + let estimated_token_count = + sess.get_estimated_token_count(turn_context.as_ref()).await; + + trace!( + turn_id = %turn_context.sub_id, + total_usage_tokens, + estimated_token_count = ?estimated_token_count, + auto_compact_limit, + token_limit_reached, + model_needs_follow_up, + has_pending_input, + needs_follow_up, + "post sampling token usage" + ); + + // as long as compaction works well in getting us way below the token limit, we shouldn't worry about being in an infinite loop. + if token_limit_reached && needs_follow_up { + if run_auto_compact( + &sess, + &turn_context, + InitialContextInjection::BeforeLastUserMessage, + CompactionReason::ContextLimit, + CompactionPhase::MidTurn, + ) + .await + .is_err() + { + return None; + } + client_session.reset_websocket_session(); + can_drain_pending_input = !model_needs_follow_up; + continue; + } + + if !needs_follow_up { + last_agent_message = sampling_request_last_agent_message; + let stop_hook_permission_mode = match turn_context.approval_policy.value() { + AskForApproval::Never => "bypassPermissions", + AskForApproval::UnlessTrusted + | AskForApproval::OnFailure + | AskForApproval::OnRequest + | AskForApproval::Granular(_) => "default", + } + .to_string(); + let stop_request = codex_hooks::StopRequest { + session_id: sess.conversation_id, + turn_id: turn_context.sub_id.clone(), + cwd: turn_context.cwd.clone(), + transcript_path: sess.hook_transcript_path().await, + model: turn_context.model_info.slug.clone(), + permission_mode: stop_hook_permission_mode, + stop_hook_active, + last_assistant_message: last_agent_message.clone(), + }; + for run in sess.hooks().preview_stop(&stop_request) { + sess.send_event( + &turn_context, + EventMsg::HookStarted(codex_protocol::protocol::HookStartedEvent { + turn_id: Some(turn_context.sub_id.clone()), + run, + }), + ) + .await; + } + let stop_outcome = sess.hooks().run_stop(stop_request).await; + emit_hook_completed_events(&sess, &turn_context, stop_outcome.hook_events) + .await; + if stop_outcome.should_block { + if let Some(hook_prompt_message) = + build_hook_prompt_message(&stop_outcome.continuation_fragments) + { + sess.record_conversation_items( + &turn_context, + std::slice::from_ref(&hook_prompt_message), + ) + .await; + stop_hook_active = true; + continue; + } else { + sess.send_event( + &turn_context, + EventMsg::Warning(WarningEvent { + message: "Stop hook requested continuation without a prompt; ignoring the block.".to_string(), + }), + ) + .await; + } + } + if stop_outcome.should_stop { + break; + } + let hook_outcomes = sess + .hooks() + .dispatch(HookPayload { + session_id: sess.conversation_id, + cwd: turn_context.cwd.clone(), + client: turn_context.app_server_client_name.clone(), + triggered_at: chrono::Utc::now(), + hook_event: HookEvent::AfterAgent { + event: HookEventAfterAgent { + thread_id: sess.conversation_id, + turn_id: turn_context.sub_id.clone(), + input_messages: sampling_request_input_messages, + last_assistant_message: last_agent_message.clone(), + }, + }, + }) + .await; + + let mut abort_message = None; + for hook_outcome in hook_outcomes { + let hook_name = hook_outcome.hook_name; + match hook_outcome.result { + HookResult::Success => {} + HookResult::FailedContinue(error) => { + warn!( + turn_id = %turn_context.sub_id, + hook_name = %hook_name, + error = %error, + "after_agent hook failed; continuing" + ); + } + HookResult::FailedAbort(error) => { + let message = format!( + "after_agent hook '{hook_name}' failed and aborted turn completion: {error}" + ); + warn!( + turn_id = %turn_context.sub_id, + hook_name = %hook_name, + error = %error, + "after_agent hook failed; aborting operation" + ); + if abort_message.is_none() { + abort_message = Some(message); + } + } + } + } + if let Some(message) = abort_message { + sess.send_event( + &turn_context, + EventMsg::Error(ErrorEvent { + message, + codex_error_info: None, + }), + ) + .await; + return None; + } + break; + } + continue; + } + Err(CodexErr::TurnAborted) => { + // Aborted turn is reported via a different event. + break; + } + Err(CodexErr::InvalidImageRequest()) => { + { + let mut state = sess.state.lock().await; + error_or_panic( + "Invalid image detected; sanitizing tool output to prevent poisoning", + ); + if state.history.replace_last_turn_images("Invalid image") { + continue; + } + } + + let event = EventMsg::Error(ErrorEvent { + message: "Invalid image in your last message. Please remove it and try again." + .to_string(), + codex_error_info: Some(CodexErrorInfo::BadRequest), + }); + sess.send_event(&turn_context, event).await; + break; + } + Err(e) => { + info!("Turn error: {e:#}"); + let event = EventMsg::Error(e.to_error_event(/*message_prefix*/ None)); + sess.send_event(&turn_context, event).await; + // let the user continue the conversation + break; + } + } + } + + last_agent_message +} + +async fn track_turn_resolved_config_analytics( + sess: &Session, + turn_context: &TurnContext, + input: &[UserInput], +) { + if !sess.enabled(Feature::GeneralAnalytics) { + return; + } + + let thread_config = { + let state = sess.state.lock().await; + state.session_configuration.thread_config_snapshot() + }; + let is_first_turn = { + let mut state = sess.state.lock().await; + state.take_next_turn_is_first() + }; + sess.services + .analytics_events_client + .track_turn_resolved_config(TurnResolvedConfigFact { + turn_id: turn_context.sub_id.clone(), + thread_id: sess.conversation_id.to_string(), + num_input_images: input + .iter() + .filter(|item| { + matches!(item, UserInput::Image { .. } | UserInput::LocalImage { .. }) + }) + .count(), + submission_type: None, + ephemeral: thread_config.ephemeral, + session_source: thread_config.session_source, + model: turn_context.model_info.slug.clone(), + model_provider: turn_context.config.model_provider_id.clone(), + sandbox_policy: turn_context.sandbox_policy.get().clone(), + reasoning_effort: turn_context.reasoning_effort, + reasoning_summary: Some(turn_context.reasoning_summary), + service_tier: turn_context.config.service_tier, + approval_policy: turn_context.approval_policy.value(), + approvals_reviewer: turn_context.config.approvals_reviewer, + sandbox_network_access: turn_context.network_sandbox_policy.is_enabled(), + collaboration_mode: turn_context.collaboration_mode.mode, + personality: turn_context.personality, + is_first_turn, + }); +} + +async fn run_pre_sampling_compact( + sess: &Arc, + turn_context: &Arc, +) -> CodexResult { + let total_usage_tokens_before_compaction = sess.get_total_token_usage().await; + let mut pre_sampling_compacted = maybe_run_previous_model_inline_compact( + sess, + turn_context, + total_usage_tokens_before_compaction, + ) + .await?; + let total_usage_tokens = sess.get_total_token_usage().await; + let auto_compact_limit = turn_context + .model_info + .auto_compact_token_limit() + .unwrap_or(i64::MAX); + // Compact if the total usage tokens are greater than the auto compact limit + if total_usage_tokens >= auto_compact_limit { + run_auto_compact( + sess, + turn_context, + InitialContextInjection::DoNotInject, + CompactionReason::ContextLimit, + CompactionPhase::PreTurn, + ) + .await?; + pre_sampling_compacted = true; + } + Ok(pre_sampling_compacted) +} + +/// Runs pre-sampling compaction against the previous model when switching to a smaller +/// context-window model. +/// +/// Returns `Ok(true)` when compaction ran successfully, `Ok(false)` when compaction was skipped +/// because the model/context-window preconditions were not met, and `Err(_)` only when compaction +/// was attempted and failed. +async fn maybe_run_previous_model_inline_compact( + sess: &Arc, + turn_context: &Arc, + total_usage_tokens: i64, +) -> CodexResult { + let Some(previous_turn_settings) = sess.previous_turn_settings().await else { + return Ok(false); + }; + let previous_model_turn_context = Arc::new( + turn_context + .with_model(previous_turn_settings.model, &sess.services.models_manager) + .await, + ); + + let Some(old_context_window) = previous_model_turn_context.model_context_window() else { + return Ok(false); + }; + let Some(new_context_window) = turn_context.model_context_window() else { + return Ok(false); + }; + let new_auto_compact_limit = turn_context + .model_info + .auto_compact_token_limit() + .unwrap_or(i64::MAX); + let should_run = total_usage_tokens > new_auto_compact_limit + && previous_model_turn_context.model_info.slug != turn_context.model_info.slug + && old_context_window > new_context_window; + if should_run { + run_auto_compact( + sess, + &previous_model_turn_context, + InitialContextInjection::DoNotInject, + CompactionReason::ModelDownshift, + CompactionPhase::PreTurn, + ) + .await?; + return Ok(true); + } + Ok(false) +} + +async fn run_auto_compact( + sess: &Arc, + turn_context: &Arc, + initial_context_injection: InitialContextInjection, + reason: CompactionReason, + phase: CompactionPhase, +) -> CodexResult<()> { + if should_use_remote_compact_task(&turn_context.provider) { + run_inline_remote_auto_compact_task( + Arc::clone(sess), + Arc::clone(turn_context), + initial_context_injection, + reason, + phase, + ) + .await?; + } else { + run_inline_auto_compact_task( + Arc::clone(sess), + Arc::clone(turn_context), + initial_context_injection, + reason, + phase, + ) + .await?; + } + Ok(()) +} + +pub(super) fn collect_explicit_app_ids_from_skill_items( + skill_items: &[ResponseItem], + connectors: &[connectors::AppInfo], + skill_name_counts_lower: &HashMap, +) -> HashSet { + if skill_items.is_empty() || connectors.is_empty() { + return HashSet::new(); + } + + let skill_messages = skill_items + .iter() + .filter_map(|item| match item { + ResponseItem::Message { content, .. } => { + content.iter().find_map(|content_item| match content_item { + ContentItem::InputText { text } => Some(text.clone()), + _ => None, + }) + } + _ => None, + }) + .collect::>(); + if skill_messages.is_empty() { + return HashSet::new(); + } + + let mentions = collect_tool_mentions_from_messages(&skill_messages); + let mention_names_lower = mentions + .plain_names + .iter() + .map(|name| name.to_ascii_lowercase()) + .collect::>(); + let mut connector_ids = mentions + .paths + .iter() + .filter(|path| tool_kind_for_path(path) == ToolMentionKind::App) + .filter_map(|path| app_id_from_path(path).map(str::to_string)) + .collect::>(); + + let connector_slug_counts = build_connector_slug_counts(connectors); + for connector in connectors { + let slug = codex_connectors::metadata::connector_mention_slug(connector); + let connector_count = connector_slug_counts.get(&slug).copied().unwrap_or(0); + let skill_count = skill_name_counts_lower.get(&slug).copied().unwrap_or(0); + if connector_count == 1 && skill_count == 0 && mention_names_lower.contains(&slug) { + connector_ids.insert(connector.id.clone()); + } + } + + connector_ids +} + +pub(super) fn filter_connectors_for_input( + connectors: &[connectors::AppInfo], + input: &[ResponseItem], + explicitly_enabled_connectors: &HashSet, + skill_name_counts_lower: &HashMap, +) -> Vec { + let connectors: Vec = connectors + .iter() + .filter(|connector| connector.is_enabled) + .cloned() + .collect::>(); + if connectors.is_empty() { + return Vec::new(); + } + + let user_messages = collect_user_messages(input); + if user_messages.is_empty() && explicitly_enabled_connectors.is_empty() { + return Vec::new(); + } + + let mentions = collect_tool_mentions_from_messages(&user_messages); + let mention_names_lower = mentions + .plain_names + .iter() + .map(|name| name.to_ascii_lowercase()) + .collect::>(); + + let connector_slug_counts = build_connector_slug_counts(&connectors); + let mut allowed_connector_ids = explicitly_enabled_connectors.clone(); + for path in mentions + .paths + .iter() + .filter(|path| tool_kind_for_path(path) == ToolMentionKind::App) + { + if let Some(connector_id) = app_id_from_path(path) { + allowed_connector_ids.insert(connector_id.to_string()); + } + } + + connectors + .into_iter() + .filter(|connector| { + connector_inserted_in_messages( + connector, + &mention_names_lower, + &allowed_connector_ids, + &connector_slug_counts, + skill_name_counts_lower, + ) + }) + .collect() +} + +fn connector_inserted_in_messages( + connector: &connectors::AppInfo, + mention_names_lower: &HashSet, + allowed_connector_ids: &HashSet, + connector_slug_counts: &HashMap, + skill_name_counts_lower: &HashMap, +) -> bool { + if allowed_connector_ids.contains(&connector.id) { + return true; + } + + let mention_slug = codex_connectors::metadata::connector_mention_slug(connector); + let connector_count = connector_slug_counts + .get(&mention_slug) + .copied() + .unwrap_or(0); + let skill_count = skill_name_counts_lower + .get(&mention_slug) + .copied() + .unwrap_or(0); + connector_count == 1 && skill_count == 0 && mention_names_lower.contains(&mention_slug) +} + +pub(crate) fn build_prompt( + input: Vec, + router: &ToolRouter, + turn_context: &TurnContext, + base_instructions: BaseInstructions, +) -> Prompt { + let deferred_dynamic_tools = turn_context + .dynamic_tools + .iter() + .filter(|tool| tool.defer_loading) + .map(|tool| tool.name.as_str()) + .collect::>(); + let tools = if deferred_dynamic_tools.is_empty() { + router.model_visible_specs() + } else { + router + .model_visible_specs() + .into_iter() + .filter(|spec| !deferred_dynamic_tools.contains(spec.name())) + .collect() + }; + + Prompt { + input, + tools, + parallel_tool_calls: turn_context.model_info.supports_parallel_tool_calls, + base_instructions, + personality: turn_context.personality, + output_schema: turn_context.final_output_json_schema.clone(), + } +} + +#[allow(clippy::too_many_arguments)] +#[instrument(level = "trace", + skip_all, + fields( + turn_id = %turn_context.sub_id, + model = %turn_context.model_info.slug, + cwd = %turn_context.cwd.display() + ) +)] +async fn run_sampling_request( + sess: Arc, + turn_context: Arc, + turn_diff_tracker: SharedTurnDiffTracker, + client_session: &mut ModelClientSession, + turn_metadata_header: Option<&str>, + input: Vec, + explicitly_enabled_connectors: &HashSet, + skills_outcome: Option<&SkillLoadOutcome>, + server_model_warning_emitted_for_turn: &mut bool, + cancellation_token: CancellationToken, +) -> CodexResult { + let router = built_tools( + sess.as_ref(), + turn_context.as_ref(), + &input, + explicitly_enabled_connectors, + skills_outcome, + &cancellation_token, + ) + .await?; + + let base_instructions = sess.get_base_instructions().await; + + let tool_runtime = ToolCallRuntime::new( + Arc::clone(&router), + Arc::clone(&sess), + Arc::clone(&turn_context), + Arc::clone(&turn_diff_tracker), + ); + let _code_mode_worker = sess + .services + .code_mode_service + .start_turn_worker( + &sess, + &turn_context, + Arc::clone(&router), + Arc::clone(&turn_diff_tracker), + ) + .await; + let mut retries = 0; + let mut initial_input = Some(input); + loop { + let prompt_input = if let Some(input) = initial_input.take() { + input + } else { + sess.clone_history() + .await + .for_prompt(&turn_context.model_info.input_modalities) + }; + let prompt = build_prompt( + prompt_input, + router.as_ref(), + turn_context.as_ref(), + base_instructions.clone(), + ); + let err = match try_run_sampling_request( + tool_runtime.clone(), + Arc::clone(&sess), + Arc::clone(&turn_context), + client_session, + turn_metadata_header, + Arc::clone(&turn_diff_tracker), + server_model_warning_emitted_for_turn, + &prompt, + cancellation_token.child_token(), + ) + .await + { + Ok(output) => { + return Ok(output); + } + Err(CodexErr::ContextWindowExceeded) => { + sess.set_total_tokens_full(&turn_context).await; + return Err(CodexErr::ContextWindowExceeded); + } + Err(CodexErr::UsageLimitReached(e)) => { + let rate_limits = e.rate_limits.clone(); + if let Some(rate_limits) = rate_limits { + sess.update_rate_limits(&turn_context, *rate_limits).await; + } + return Err(CodexErr::UsageLimitReached(e)); + } + Err(err) => err, + }; + + if !err.is_retryable() { + return Err(err); + } + + // Use the configured provider-specific stream retry budget. + let max_retries = turn_context.provider.stream_max_retries(); + if retries >= max_retries + && client_session.try_switch_fallback_transport( + &turn_context.session_telemetry, + &turn_context.model_info, + ) + { + sess.send_event( + &turn_context, + EventMsg::Warning(WarningEvent { + message: format!("Falling back from WebSockets to HTTPS transport. {err:#}"), + }), + ) + .await; + retries = 0; + continue; + } + if retries < max_retries { + retries += 1; + let delay = match &err { + CodexErr::Stream(_, requested_delay) => { + requested_delay.unwrap_or_else(|| backoff(retries)) + } + _ => backoff(retries), + }; + warn!( + "stream disconnected - retrying sampling request ({retries}/{max_retries} in {delay:?})...", + ); + + // In release builds, hide the first websocket retry notification to reduce noisy + // transient reconnect messages. In debug builds, keep full visibility for diagnosis. + let report_error = retries > 1 + || cfg!(debug_assertions) + || !sess.services.model_client.responses_websocket_enabled(); + if report_error { + // Surface retry information to any UI/front‑end so the + // user understands what is happening instead of staring + // at a seemingly frozen screen. + sess.notify_stream_error( + &turn_context, + format!("Reconnecting... {retries}/{max_retries}"), + err, + ) + .await; + } + tokio::time::sleep(delay).await; + } else { + return Err(err); + } + } +} + +pub(crate) async fn built_tools( + sess: &Session, + turn_context: &TurnContext, + input: &[ResponseItem], + explicitly_enabled_connectors: &HashSet, + skills_outcome: Option<&SkillLoadOutcome>, + cancellation_token: &CancellationToken, +) -> CodexResult> { + let mcp_connection_manager = sess.services.mcp_connection_manager.read().await; + let has_mcp_servers = mcp_connection_manager.has_servers(); + let all_mcp_tools = mcp_connection_manager + .list_all_tools() + .or_cancel(cancellation_token) + .await?; + drop(mcp_connection_manager); + let loaded_plugins = sess + .services + .plugins_manager + .plugins_for_config(&turn_context.config) + .await; + + let mut effective_explicitly_enabled_connectors = explicitly_enabled_connectors.clone(); + effective_explicitly_enabled_connectors.extend(sess.get_connector_selection().await); + + let apps_enabled = turn_context.apps_enabled(); + let accessible_connectors = + apps_enabled.then(|| connectors::accessible_connectors_from_mcp_tools(&all_mcp_tools)); + let accessible_connectors_with_enabled_state = + accessible_connectors.as_ref().map(|connectors| { + connectors::with_app_enabled_state(connectors.clone(), &turn_context.config) + }); + let connectors = if apps_enabled { + let connectors = codex_connectors::merge::merge_plugin_connectors_with_accessible( + loaded_plugins + .effective_apps() + .into_iter() + .map(|connector_id| connector_id.0), + accessible_connectors.clone().unwrap_or_default(), + ); + Some(connectors::with_app_enabled_state( + connectors, + &turn_context.config, + )) + } else { + None + }; + let auth = sess.services.auth_manager.auth().await; + let discoverable_tools = if apps_enabled && turn_context.tools_config.tool_suggest { + if let Some(accessible_connectors) = accessible_connectors_with_enabled_state.as_ref() { + match connectors::list_tool_suggest_discoverable_tools_with_auth( + &turn_context.config, + auth.as_ref(), + accessible_connectors.as_slice(), + ) + .await + .map(|discoverable_tools| { + filter_tool_suggest_discoverable_tools_for_client( + discoverable_tools, + turn_context.app_server_client_name.as_deref(), + ) + }) { + Ok(discoverable_tools) if discoverable_tools.is_empty() => None, + Ok(discoverable_tools) => Some(discoverable_tools), + Err(err) => { + warn!("failed to load discoverable tool suggestions: {err:#}"); + None + } + } + } else { + None + } + } else { + None + }; + + let explicitly_enabled = if let Some(connectors) = connectors.as_ref() { + let skill_name_counts_lower = skills_outcome.map_or_else(HashMap::new, |outcome| { + build_skill_name_counts(&outcome.skills, &outcome.disabled_paths).1 + }); + + filter_connectors_for_input( + connectors, + input, + &effective_explicitly_enabled_connectors, + &skill_name_counts_lower, + ) + } else { + Vec::new() + }; + let mcp_tool_exposure = build_mcp_tool_exposure( + &all_mcp_tools, + connectors.as_deref(), + explicitly_enabled.as_slice(), + &turn_context.config, + &turn_context.tools_config, + ); + let mcp_tools = has_mcp_servers.then_some(mcp_tool_exposure.direct_tools); + let deferred_mcp_tools = mcp_tool_exposure.deferred_tools; + let unavailable_called_tools = if turn_context + .config + .features + .enabled(Feature::UnavailableDummyTools) + { + let exposed_tool_names = mcp_tools + .iter() + .chain(deferred_mcp_tools.iter()) + .flat_map(|tools| tools.keys().map(String::as_str)) + .collect::>(); + collect_unavailable_called_tools(input, &exposed_tool_names) + } else { + Vec::new() + }; + + let parallel_mcp_server_names = turn_context + .config + .mcp_servers + .get() + .iter() + .filter_map(|(server_name, server_config)| { + server_config + .supports_parallel_tool_calls + .then_some(server_name.clone()) + }) + .collect::>(); + + Ok(Arc::new(ToolRouter::from_config( + &turn_context.tools_config, + ToolRouterParams { + mcp_tools, + deferred_mcp_tools, + unavailable_called_tools, + parallel_mcp_server_names, + discoverable_tools, + dynamic_tools: turn_context.dynamic_tools.as_slice(), + }, + ))) +} + +#[derive(Debug)] +struct SamplingRequestResult { + needs_follow_up: bool, + last_agent_message: Option, +} + +/// Ephemeral per-response state for streaming a single proposed plan. +/// This is intentionally not persisted or stored in session/state since it +/// only exists while a response is actively streaming. The final plan text +/// is extracted from the completed assistant message. +/// Tracks a single proposed plan item across a streaming response. +struct ProposedPlanItemState { + item_id: String, + started: bool, + completed: bool, +} + +/// Aggregated state used only while streaming a plan-mode response. +/// Includes per-item parsers, deferred agent message bookkeeping, and the plan item lifecycle. +struct PlanModeStreamState { + /// Agent message items started by the model but deferred until we see non-plan text. + pending_agent_message_items: HashMap, + /// Agent message items whose start notification has been emitted. + started_agent_message_items: HashSet, + /// Leading whitespace buffered until we see non-whitespace text for an item. + leading_whitespace_by_item: HashMap, + /// Tracks plan item lifecycle while streaming plan output. + plan_item_state: ProposedPlanItemState, +} + +impl PlanModeStreamState { + fn new(turn_id: &str) -> Self { + Self { + pending_agent_message_items: HashMap::new(), + started_agent_message_items: HashSet::new(), + leading_whitespace_by_item: HashMap::new(), + plan_item_state: ProposedPlanItemState::new(turn_id), + } + } +} + +#[derive(Debug, Default)] +pub(super) struct AssistantMessageStreamParsers { + plan_mode: bool, + parsers_by_item: HashMap, +} + +type ParsedAssistantTextDelta = AssistantTextChunk; + +impl AssistantMessageStreamParsers { + pub(super) fn new(plan_mode: bool) -> Self { + Self { + plan_mode, + parsers_by_item: HashMap::new(), + } + } + + fn parser_mut(&mut self, item_id: &str) -> &mut AssistantTextStreamParser { + let plan_mode = self.plan_mode; + self.parsers_by_item + .entry(item_id.to_string()) + .or_insert_with(|| AssistantTextStreamParser::new(plan_mode)) + } + + pub(super) fn seed_item_text(&mut self, item_id: &str, text: &str) -> ParsedAssistantTextDelta { + if text.is_empty() { + return ParsedAssistantTextDelta::default(); + } + self.parser_mut(item_id).push_str(text) + } + + pub(super) fn parse_delta(&mut self, item_id: &str, delta: &str) -> ParsedAssistantTextDelta { + self.parser_mut(item_id).push_str(delta) + } + + pub(super) fn finish_item(&mut self, item_id: &str) -> ParsedAssistantTextDelta { + let Some(mut parser) = self.parsers_by_item.remove(item_id) else { + return ParsedAssistantTextDelta::default(); + }; + parser.finish() + } + + fn drain_finished(&mut self) -> Vec<(String, ParsedAssistantTextDelta)> { + let parsers_by_item = std::mem::take(&mut self.parsers_by_item); + parsers_by_item + .into_iter() + .map(|(item_id, mut parser)| (item_id, parser.finish())) + .collect() + } +} + +impl ProposedPlanItemState { + fn new(turn_id: &str) -> Self { + Self { + item_id: format!("{turn_id}-plan"), + started: false, + completed: false, + } + } + + async fn start(&mut self, sess: &Session, turn_context: &TurnContext) { + if self.started || self.completed { + return; + } + self.started = true; + let item = TurnItem::Plan(PlanItem { + id: self.item_id.clone(), + text: String::new(), + }); + sess.emit_turn_item_started(turn_context, &item).await; + } + + async fn push_delta(&mut self, sess: &Session, turn_context: &TurnContext, delta: &str) { + if self.completed { + return; + } + if delta.is_empty() { + return; + } + let event = PlanDeltaEvent { + thread_id: sess.conversation_id.to_string(), + turn_id: turn_context.sub_id.clone(), + item_id: self.item_id.clone(), + delta: delta.to_string(), + }; + sess.send_event(turn_context, EventMsg::PlanDelta(event)) + .await; + } + + async fn complete_with_text( + &mut self, + sess: &Session, + turn_context: &TurnContext, + text: String, + ) { + if self.completed || !self.started { + return; + } + self.completed = true; + let item = TurnItem::Plan(PlanItem { + id: self.item_id.clone(), + text, + }); + sess.emit_turn_item_completed(turn_context, item).await; + } +} + +/// In plan mode we defer agent message starts until the parser emits non-plan +/// text. The parser buffers each line until it can rule out a tag prefix, so +/// plan-only outputs never show up as empty assistant messages. +async fn maybe_emit_pending_agent_message_start( + sess: &Session, + turn_context: &TurnContext, + state: &mut PlanModeStreamState, + item_id: &str, +) { + if state.started_agent_message_items.contains(item_id) { + return; + } + if let Some(item) = state.pending_agent_message_items.remove(item_id) { + sess.emit_turn_item_started(turn_context, &item).await; + state + .started_agent_message_items + .insert(item_id.to_string()); + } +} + +/// Agent messages are text-only today; concatenate all text entries. +fn agent_message_text(item: &codex_protocol::items::AgentMessageItem) -> String { + item.content + .iter() + .map(|entry| match entry { + codex_protocol::items::AgentMessageContent::Text { text } => text.as_str(), + }) + .collect() +} + +pub(super) fn realtime_text_for_event(msg: &EventMsg) -> Option { + match msg { + EventMsg::AgentMessage(event) => Some(event.message.clone()), + EventMsg::ItemCompleted(event) => match &event.item { + TurnItem::AgentMessage(item) => Some(agent_message_text(item)), + _ => None, + }, + EventMsg::Error(_) + | EventMsg::Warning(_) + | EventMsg::RealtimeConversationStarted(_) + | EventMsg::RealtimeConversationSdp(_) + | EventMsg::RealtimeConversationRealtime(_) + | EventMsg::RealtimeConversationClosed(_) + | EventMsg::ModelReroute(_) + | EventMsg::ContextCompacted(_) + | EventMsg::ThreadRolledBack(_) + | EventMsg::TurnStarted(_) + | EventMsg::TurnComplete(_) + | EventMsg::TokenCount(_) + | EventMsg::UserMessage(_) + | EventMsg::AgentMessageDelta(_) + | EventMsg::AgentReasoning(_) + | EventMsg::AgentReasoningDelta(_) + | EventMsg::AgentReasoningRawContent(_) + | EventMsg::AgentReasoningRawContentDelta(_) + | EventMsg::AgentReasoningSectionBreak(_) + | EventMsg::SessionConfigured(_) + | EventMsg::ThreadNameUpdated(_) + | EventMsg::McpStartupUpdate(_) + | EventMsg::McpStartupComplete(_) + | EventMsg::McpToolCallBegin(_) + | EventMsg::McpToolCallEnd(_) + | EventMsg::WebSearchBegin(_) + | EventMsg::WebSearchEnd(_) + | EventMsg::ExecCommandBegin(_) + | EventMsg::ExecCommandOutputDelta(_) + | EventMsg::TerminalInteraction(_) + | EventMsg::ExecCommandEnd(_) + | EventMsg::PatchApplyBegin(_) + | EventMsg::PatchApplyUpdated(_) + | EventMsg::PatchApplyEnd(_) + | EventMsg::ViewImageToolCall(_) + | EventMsg::ImageGenerationBegin(_) + | EventMsg::ImageGenerationEnd(_) + | EventMsg::ExecApprovalRequest(_) + | EventMsg::RequestPermissions(_) + | EventMsg::RequestUserInput(_) + | EventMsg::DynamicToolCallRequest(_) + | EventMsg::DynamicToolCallResponse(_) + | EventMsg::GuardianAssessment(_) + | EventMsg::ElicitationRequest(_) + | EventMsg::ApplyPatchApprovalRequest(_) + | EventMsg::DeprecationNotice(_) + | EventMsg::BackgroundEvent(_) + | EventMsg::UndoStarted(_) + | EventMsg::UndoCompleted(_) + | EventMsg::StreamError(_) + | EventMsg::TurnDiff(_) + | EventMsg::GetHistoryEntryResponse(_) + | EventMsg::McpListToolsResponse(_) + | EventMsg::ListSkillsResponse(_) + | EventMsg::RealtimeConversationListVoicesResponse(_) + | EventMsg::SkillsUpdateAvailable + | EventMsg::PlanUpdate(_) + | EventMsg::TurnAborted(_) + | EventMsg::ShutdownComplete + | EventMsg::EnteredReviewMode(_) + | EventMsg::ExitedReviewMode(_) + | EventMsg::RawResponseItem(_) + | EventMsg::ItemStarted(_) + | EventMsg::HookStarted(_) + | EventMsg::HookCompleted(_) + | EventMsg::AgentMessageContentDelta(_) + | EventMsg::PlanDelta(_) + | EventMsg::ReasoningContentDelta(_) + | EventMsg::ReasoningRawContentDelta(_) + | EventMsg::CollabAgentSpawnBegin(_) + | EventMsg::CollabAgentSpawnEnd(_) + | EventMsg::CollabAgentInteractionBegin(_) + | EventMsg::CollabAgentInteractionEnd(_) + | EventMsg::CollabWaitingBegin(_) + | EventMsg::CollabWaitingEnd(_) + | EventMsg::CollabCloseBegin(_) + | EventMsg::CollabCloseEnd(_) + | EventMsg::CollabResumeBegin(_) + | EventMsg::CollabResumeEnd(_) => None, + } +} + +/// Split the stream into normal assistant text vs. proposed plan content. +/// Normal text becomes AgentMessage deltas; plan content becomes PlanDelta + +/// TurnItem::Plan. +async fn handle_plan_segments( + sess: &Session, + turn_context: &TurnContext, + state: &mut PlanModeStreamState, + item_id: &str, + segments: Vec, +) { + for segment in segments { + match segment { + ProposedPlanSegment::Normal(delta) => { + if delta.is_empty() { + continue; + } + let has_non_whitespace = delta.chars().any(|ch| !ch.is_whitespace()); + if !has_non_whitespace && !state.started_agent_message_items.contains(item_id) { + let entry = state + .leading_whitespace_by_item + .entry(item_id.to_string()) + .or_default(); + entry.push_str(&delta); + continue; + } + let delta = if !state.started_agent_message_items.contains(item_id) { + if let Some(prefix) = state.leading_whitespace_by_item.remove(item_id) { + format!("{prefix}{delta}") + } else { + delta + } + } else { + delta + }; + maybe_emit_pending_agent_message_start(sess, turn_context, state, item_id).await; + + let event = AgentMessageContentDeltaEvent { + thread_id: sess.conversation_id.to_string(), + turn_id: turn_context.sub_id.clone(), + item_id: item_id.to_string(), + delta, + }; + sess.send_event(turn_context, EventMsg::AgentMessageContentDelta(event)) + .await; + } + ProposedPlanSegment::ProposedPlanStart => { + if !state.plan_item_state.completed { + state.plan_item_state.start(sess, turn_context).await; + } + } + ProposedPlanSegment::ProposedPlanDelta(delta) => { + if !state.plan_item_state.completed { + if !state.plan_item_state.started { + state.plan_item_state.start(sess, turn_context).await; + } + state + .plan_item_state + .push_delta(sess, turn_context, &delta) + .await; + } + } + ProposedPlanSegment::ProposedPlanEnd => {} + } + } +} + +async fn emit_streamed_assistant_text_delta( + sess: &Session, + turn_context: &TurnContext, + plan_mode_state: Option<&mut PlanModeStreamState>, + item_id: &str, + parsed: ParsedAssistantTextDelta, +) { + if parsed.is_empty() { + return; + } + if !parsed.citations.is_empty() { + // Citation extraction is intentionally local for now; we strip citations from display text + // but do not yet surface them in protocol events. + let _citations = parsed.citations; + } + if let Some(state) = plan_mode_state { + if !parsed.plan_segments.is_empty() { + handle_plan_segments(sess, turn_context, state, item_id, parsed.plan_segments).await; + } + return; + } + if parsed.visible_text.is_empty() { + return; + } + let event = AgentMessageContentDeltaEvent { + thread_id: sess.conversation_id.to_string(), + turn_id: turn_context.sub_id.clone(), + item_id: item_id.to_string(), + delta: parsed.visible_text, + }; + sess.send_event(turn_context, EventMsg::AgentMessageContentDelta(event)) + .await; +} + +/// Flush buffered assistant text parser state when an assistant message item ends. +async fn flush_assistant_text_segments_for_item( + sess: &Session, + turn_context: &TurnContext, + plan_mode_state: Option<&mut PlanModeStreamState>, + parsers: &mut AssistantMessageStreamParsers, + item_id: &str, +) { + let parsed = parsers.finish_item(item_id); + emit_streamed_assistant_text_delta(sess, turn_context, plan_mode_state, item_id, parsed).await; +} + +/// Flush any remaining buffered assistant text parser state at response completion. +async fn flush_assistant_text_segments_all( + sess: &Session, + turn_context: &TurnContext, + mut plan_mode_state: Option<&mut PlanModeStreamState>, + parsers: &mut AssistantMessageStreamParsers, +) { + for (item_id, parsed) in parsers.drain_finished() { + emit_streamed_assistant_text_delta( + sess, + turn_context, + plan_mode_state.as_deref_mut(), + &item_id, + parsed, + ) + .await; + } +} + +/// Emit completion for plan items by parsing the finalized assistant message. +async fn maybe_complete_plan_item_from_message( + sess: &Session, + turn_context: &TurnContext, + state: &mut PlanModeStreamState, + item: &ResponseItem, +) { + if let ResponseItem::Message { role, content, .. } = item + && role == "assistant" + { + let mut text = String::new(); + for entry in content { + if let ContentItem::OutputText { text: chunk } = entry { + text.push_str(chunk); + } + } + if let Some(plan_text) = extract_proposed_plan_text(&text) { + let (plan_text, _citations) = strip_citations(&plan_text); + if !state.plan_item_state.started { + state.plan_item_state.start(sess, turn_context).await; + } + state + .plan_item_state + .complete_with_text(sess, turn_context, plan_text) + .await; + } + } +} + +/// Emit a completed agent message in plan mode, respecting deferred starts. +async fn emit_agent_message_in_plan_mode( + sess: &Session, + turn_context: &TurnContext, + agent_message: codex_protocol::items::AgentMessageItem, + state: &mut PlanModeStreamState, +) { + let agent_message_id = agent_message.id.clone(); + let text = agent_message_text(&agent_message); + if text.trim().is_empty() { + state.pending_agent_message_items.remove(&agent_message_id); + state.started_agent_message_items.remove(&agent_message_id); + return; + } + + maybe_emit_pending_agent_message_start(sess, turn_context, state, &agent_message_id).await; + + if !state + .started_agent_message_items + .contains(&agent_message_id) + { + let start_item = state + .pending_agent_message_items + .remove(&agent_message_id) + .unwrap_or_else(|| { + TurnItem::AgentMessage(codex_protocol::items::AgentMessageItem { + id: agent_message_id.clone(), + content: Vec::new(), + phase: None, + memory_citation: None, + }) + }); + sess.emit_turn_item_started(turn_context, &start_item).await; + state + .started_agent_message_items + .insert(agent_message_id.clone()); + } + + sess.emit_turn_item_completed(turn_context, TurnItem::AgentMessage(agent_message)) + .await; + state.started_agent_message_items.remove(&agent_message_id); +} + +/// Emit completion for a plan-mode turn item, handling agent messages specially. +async fn emit_turn_item_in_plan_mode( + sess: &Session, + turn_context: &TurnContext, + turn_item: TurnItem, + previously_active_item: Option<&TurnItem>, + state: &mut PlanModeStreamState, +) { + match turn_item { + TurnItem::AgentMessage(agent_message) => { + emit_agent_message_in_plan_mode(sess, turn_context, agent_message, state).await; + } + _ => { + if previously_active_item.is_none() { + sess.emit_turn_item_started(turn_context, &turn_item).await; + } + sess.emit_turn_item_completed(turn_context, turn_item).await; + } + } +} + +/// Handle a completed assistant response item in plan mode, returning true if handled. +async fn handle_assistant_item_done_in_plan_mode( + sess: &Session, + turn_context: &TurnContext, + item: &ResponseItem, + state: &mut PlanModeStreamState, + previously_active_item: Option<&TurnItem>, + last_agent_message: &mut Option, +) -> bool { + if let ResponseItem::Message { role, .. } = item + && role == "assistant" + { + maybe_complete_plan_item_from_message(sess, turn_context, state, item).await; + + if let Some(turn_item) = + handle_non_tool_response_item(sess, turn_context, item, /*plan_mode*/ true).await + { + emit_turn_item_in_plan_mode( + sess, + turn_context, + turn_item, + previously_active_item, + state, + ) + .await; + } + + record_completed_response_item(sess, turn_context, item).await; + if let Some(agent_message) = last_assistant_message_from_item(item, /*plan_mode*/ true) { + *last_agent_message = Some(agent_message); + } + return true; + } + false +} + +async fn drain_in_flight( + in_flight: &mut FuturesOrdered>>, + sess: Arc, + turn_context: Arc, +) -> CodexResult<()> { + while let Some(res) = in_flight.next().await { + match res { + Ok(response_input) => { + let response_item = response_input.into(); + sess.record_conversation_items(&turn_context, std::slice::from_ref(&response_item)) + .await; + mark_thread_memory_mode_polluted_if_external_context( + sess.as_ref(), + turn_context.as_ref(), + &response_item, + ) + .await; + } + Err(err) => { + error_or_panic(format!("in-flight tool future failed during drain: {err}")); + } + } + } + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +#[instrument(level = "trace", + skip_all, + fields( + turn_id = %turn_context.sub_id, + model = %turn_context.model_info.slug + ) +)] +async fn try_run_sampling_request( + tool_runtime: ToolCallRuntime, + sess: Arc, + turn_context: Arc, + client_session: &mut ModelClientSession, + turn_metadata_header: Option<&str>, + turn_diff_tracker: SharedTurnDiffTracker, + server_model_warning_emitted_for_turn: &mut bool, + prompt: &Prompt, + cancellation_token: CancellationToken, +) -> CodexResult { + feedback_tags!( + model = turn_context.model_info.slug.clone(), + approval_policy = turn_context.approval_policy.value(), + sandbox_policy = turn_context.sandbox_policy.get(), + effort = turn_context.reasoning_effort, + auth_mode = sess.services.auth_manager.auth_mode(), + features = sess.features.enabled_features(), + ); + let mut stream = client_session + .stream( + prompt, + &turn_context.model_info, + &turn_context.session_telemetry, + turn_context.reasoning_effort, + turn_context.reasoning_summary, + turn_context.config.service_tier, + turn_metadata_header, + ) + .instrument(trace_span!("stream_request")) + .or_cancel(&cancellation_token) + .await??; + let mut in_flight: FuturesOrdered>> = + FuturesOrdered::new(); + let mut needs_follow_up = false; + let mut last_agent_message: Option = None; + let mut active_item: Option = None; + let mut active_tool_argument_diff_consumer: Option<( + String, + Box, + )> = None; + let mut should_emit_turn_diff = false; + let plan_mode = turn_context.collaboration_mode.mode == ModeKind::Plan; + let mut assistant_message_stream_parsers = AssistantMessageStreamParsers::new(plan_mode); + let mut plan_mode_state = plan_mode.then(|| PlanModeStreamState::new(&turn_context.sub_id)); + let receiving_span = trace_span!("receiving_stream"); + let outcome: CodexResult = loop { + let handle_responses = trace_span!( + parent: &receiving_span, + "handle_responses", + otel.name = field::Empty, + tool_name = field::Empty, + from = field::Empty, + ); + + let event = match stream + .next() + .instrument(trace_span!(parent: &handle_responses, "receiving")) + .or_cancel(&cancellation_token) + .await + { + Ok(event) => event, + Err(codex_async_utils::CancelErr::Cancelled) => break Err(CodexErr::TurnAborted), + }; + + let event = match event { + Some(Ok(event)) => event, + Some(Err(err)) => break Err(err), + None => { + break Err(CodexErr::Stream( + "stream closed before response.completed".into(), + None, + )); + } + }; + + sess.services + .session_telemetry + .record_responses(&handle_responses, &event); + record_turn_ttft_metric(&turn_context, &event).await; + + match event { + ResponseEvent::Created => {} + ResponseEvent::OutputItemDone(item) => { + active_tool_argument_diff_consumer = None; + let previously_active_item = active_item.take(); + if let Some(previous) = previously_active_item.as_ref() + && matches!(previous, TurnItem::AgentMessage(_)) + { + let item_id = previous.id(); + flush_assistant_text_segments_for_item( + &sess, + &turn_context, + plan_mode_state.as_mut(), + &mut assistant_message_stream_parsers, + &item_id, + ) + .await; + } + if let Some(state) = plan_mode_state.as_mut() + && handle_assistant_item_done_in_plan_mode( + &sess, + &turn_context, + &item, + state, + previously_active_item.as_ref(), + &mut last_agent_message, + ) + .await + { + continue; + } + + let mut ctx = HandleOutputCtx { + sess: sess.clone(), + turn_context: turn_context.clone(), + tool_runtime: tool_runtime.clone(), + cancellation_token: cancellation_token.child_token(), + }; + + let output_result = + match handle_output_item_done(&mut ctx, item, previously_active_item) + .instrument(handle_responses) + .await + { + Ok(output_result) => output_result, + Err(err) => break Err(err), + }; + if let Some(tool_future) = output_result.tool_future { + in_flight.push_back(tool_future); + } + if let Some(agent_message) = output_result.last_agent_message { + last_agent_message = Some(agent_message); + } + needs_follow_up |= output_result.needs_follow_up; + } + ResponseEvent::OutputItemAdded(item) => { + if let ResponseItem::CustomToolCall { call_id, name, .. } = &item { + let tool_name = ToolName::plain(name.as_str()); + active_tool_argument_diff_consumer = tool_runtime + .create_diff_consumer(&tool_name) + .map(|consumer| (call_id.clone(), consumer)); + } else if matches!(&item, ResponseItem::FunctionCall { .. }) { + active_tool_argument_diff_consumer = None; + } + if let Some(turn_item) = handle_non_tool_response_item( + sess.as_ref(), + turn_context.as_ref(), + &item, + plan_mode, + ) + .await + { + let mut turn_item = turn_item; + let mut seeded_parsed: Option = None; + let mut seeded_item_id: Option = None; + if matches!(turn_item, TurnItem::AgentMessage(_)) + && let Some(raw_text) = raw_assistant_output_text_from_item(&item) + { + let item_id = turn_item.id(); + let mut seeded = + assistant_message_stream_parsers.seed_item_text(&item_id, &raw_text); + if let TurnItem::AgentMessage(agent_message) = &mut turn_item { + agent_message.content = + vec![codex_protocol::items::AgentMessageContent::Text { + text: if plan_mode { + String::new() + } else { + std::mem::take(&mut seeded.visible_text) + }, + }]; + } + seeded_parsed = plan_mode.then_some(seeded); + seeded_item_id = Some(item_id); + } + if let Some(state) = plan_mode_state.as_mut() + && matches!(turn_item, TurnItem::AgentMessage(_)) + { + let item_id = turn_item.id(); + state + .pending_agent_message_items + .insert(item_id, turn_item.clone()); + } else { + sess.emit_turn_item_started(&turn_context, &turn_item).await; + } + if let (Some(state), Some(item_id), Some(parsed)) = ( + plan_mode_state.as_mut(), + seeded_item_id.as_deref(), + seeded_parsed, + ) { + emit_streamed_assistant_text_delta( + &sess, + &turn_context, + Some(state), + item_id, + parsed, + ) + .await; + } + active_item = Some(turn_item); + } + } + ResponseEvent::ServerModel(server_model) => { + if !*server_model_warning_emitted_for_turn + && sess + .maybe_warn_on_server_model_mismatch(&turn_context, server_model) + .await + { + *server_model_warning_emitted_for_turn = true; + } + } + ResponseEvent::ServerReasoningIncluded(included) => { + sess.set_server_reasoning_included(included).await; + } + ResponseEvent::RateLimits(snapshot) => { + // Update internal state with latest rate limits, but defer sending until + // token usage is available to avoid duplicate TokenCount events. + sess.update_rate_limits(&turn_context, snapshot).await; + } + ResponseEvent::ModelsEtag(etag) => { + // Update internal state with latest models etag + sess.services.models_manager.refresh_if_new_etag(etag).await; + } + ResponseEvent::Completed { + response_id: _, + token_usage, + } => { + flush_assistant_text_segments_all( + &sess, + &turn_context, + plan_mode_state.as_mut(), + &mut assistant_message_stream_parsers, + ) + .await; + sess.update_token_usage_info(&turn_context, token_usage.as_ref()) + .await; + should_emit_turn_diff = true; + + break Ok(SamplingRequestResult { + needs_follow_up, + last_agent_message, + }); + } + ResponseEvent::OutputTextDelta(delta) => { + // In review child threads, suppress assistant text deltas; the + // UI will show a selection popup from the final ReviewOutput. + if let Some(active) = active_item.as_ref() { + let item_id = active.id(); + if matches!(active, TurnItem::AgentMessage(_)) { + let parsed = assistant_message_stream_parsers.parse_delta(&item_id, &delta); + emit_streamed_assistant_text_delta( + &sess, + &turn_context, + plan_mode_state.as_mut(), + &item_id, + parsed, + ) + .await; + } else { + let event = AgentMessageContentDeltaEvent { + thread_id: sess.conversation_id.to_string(), + turn_id: turn_context.sub_id.clone(), + item_id, + delta, + }; + sess.send_event(&turn_context, EventMsg::AgentMessageContentDelta(event)) + .await; + } + } else { + error_or_panic("OutputTextDelta without active item".to_string()); + } + } + ResponseEvent::ToolCallInputDelta { + item_id: _, + call_id, + delta, + } => { + let Some((active_call_id, consumer)) = active_tool_argument_diff_consumer.as_mut() + else { + continue; + }; + let call_id = match call_id { + Some(call_id) if call_id.as_str() != active_call_id.as_str() => continue, + Some(call_id) => call_id, + None => active_call_id.clone(), + }; + if let Some(event) = consumer.consume_diff(turn_context.as_ref(), call_id, &delta) { + sess.send_event(&turn_context, event).await; + } + } + ResponseEvent::ReasoningSummaryDelta { + delta, + summary_index, + } => { + if let Some(active) = active_item.as_ref() { + let event = ReasoningContentDeltaEvent { + thread_id: sess.conversation_id.to_string(), + turn_id: turn_context.sub_id.clone(), + item_id: active.id(), + delta, + summary_index, + }; + sess.send_event(&turn_context, EventMsg::ReasoningContentDelta(event)) + .await; + } else { + error_or_panic("ReasoningSummaryDelta without active item".to_string()); + } + } + ResponseEvent::ReasoningSummaryPartAdded { summary_index } => { + if let Some(active) = active_item.as_ref() { + let event = + EventMsg::AgentReasoningSectionBreak(AgentReasoningSectionBreakEvent { + item_id: active.id(), + summary_index, + }); + sess.send_event(&turn_context, event).await; + } else { + error_or_panic("ReasoningSummaryPartAdded without active item".to_string()); + } + } + ResponseEvent::ReasoningContentDelta { + delta, + content_index, + } => { + if let Some(active) = active_item.as_ref() { + let event = ReasoningRawContentDeltaEvent { + thread_id: sess.conversation_id.to_string(), + turn_id: turn_context.sub_id.clone(), + item_id: active.id(), + delta, + content_index, + }; + sess.send_event(&turn_context, EventMsg::ReasoningRawContentDelta(event)) + .await; + } else { + error_or_panic("ReasoningRawContentDelta without active item".to_string()); + } + } + } + }; + + flush_assistant_text_segments_all( + &sess, + &turn_context, + plan_mode_state.as_mut(), + &mut assistant_message_stream_parsers, + ) + .await; + + drain_in_flight(&mut in_flight, sess.clone(), turn_context.clone()).await?; + + if cancellation_token.is_cancelled() { + return Err(CodexErr::TurnAborted); + } + + if should_emit_turn_diff { + let unified_diff = { + let mut tracker = turn_diff_tracker.lock().await; + tracker.get_unified_diff() + }; + if let Ok(Some(unified_diff)) = unified_diff { + let msg = EventMsg::TurnDiff(TurnDiffEvent { unified_diff }); + sess.clone().send_event(&turn_context, msg).await; + } + } + + outcome +} + +pub(crate) fn get_last_assistant_message_from_turn(responses: &[ResponseItem]) -> Option { + for item in responses.iter().rev() { + if let Some(message) = last_assistant_message_from_item(item, /*plan_mode*/ false) { + return Some(message); + } + } + None +} diff --git a/codex-rs/core/src/codex/turn_context.rs b/codex-rs/core/src/codex/turn_context.rs new file mode 100644 index 0000000000..6e5bf22c4b --- /dev/null +++ b/codex-rs/core/src/codex/turn_context.rs @@ -0,0 +1,615 @@ +use super::*; + +pub(super) fn image_generation_tool_auth_allowed(auth_manager: Option<&AuthManager>) -> bool { + matches!( + auth_manager.and_then(AuthManager::auth_mode), + Some(AuthMode::Chatgpt) + ) +} + +#[derive(Clone, Debug)] +pub(crate) struct TurnSkillsContext { + pub(crate) outcome: Arc, + pub(crate) implicit_invocation_seen_skills: Arc>>, +} + +impl TurnSkillsContext { + pub(crate) fn new(outcome: Arc) -> Self { + Self { + outcome, + implicit_invocation_seen_skills: Arc::new(Mutex::new(HashSet::new())), + } + } +} + +/// The context needed for a single turn of the thread. +#[derive(Debug)] +pub(crate) struct TurnContext { + pub(crate) sub_id: String, + pub(crate) trace_id: Option, + pub(crate) realtime_active: bool, + pub(crate) config: Arc, + pub(crate) auth_manager: Option>, + pub(crate) model_info: ModelInfo, + pub(crate) session_telemetry: SessionTelemetry, + pub(crate) provider: ModelProviderInfo, + pub(crate) reasoning_effort: Option, + pub(crate) reasoning_summary: ReasoningSummaryConfig, + pub(crate) session_source: SessionSource, + pub(crate) environment: Option>, + /// The session's absolute working directory. All relative paths provided + /// by the model as well as sandbox policies are resolved against this path + /// instead of `std::env::current_dir()`. + pub(crate) cwd: AbsolutePathBuf, + pub(crate) current_date: Option, + pub(crate) timezone: Option, + pub(crate) app_server_client_name: Option, + pub(crate) developer_instructions: Option, + pub(crate) compact_prompt: Option, + pub(crate) user_instructions: Option, + pub(crate) collaboration_mode: CollaborationMode, + pub(crate) personality: Option, + pub(crate) approval_policy: Constrained, + pub(crate) sandbox_policy: Constrained, + pub(crate) file_system_sandbox_policy: FileSystemSandboxPolicy, + pub(crate) network_sandbox_policy: NetworkSandboxPolicy, + pub(crate) network: Option, + pub(crate) windows_sandbox_level: WindowsSandboxLevel, + pub(crate) shell_environment_policy: ShellEnvironmentPolicy, + pub(crate) tools_config: ToolsConfig, + pub(crate) features: ManagedFeatures, + pub(crate) ghost_snapshot: GhostSnapshotConfig, + pub(crate) final_output_json_schema: Option, + pub(crate) codex_self_exe: Option, + pub(crate) codex_linux_sandbox_exe: Option, + pub(crate) tool_call_gate: Arc, + pub(crate) truncation_policy: TruncationPolicy, + pub(crate) js_repl: Arc, + pub(crate) dynamic_tools: Vec, + pub(crate) turn_metadata_state: Arc, + pub(crate) turn_skills: TurnSkillsContext, + pub(crate) turn_timing_state: Arc, +} +impl TurnContext { + pub(crate) fn model_context_window(&self) -> Option { + let effective_context_window_percent = self.model_info.effective_context_window_percent; + self.model_info.context_window.map(|context_window| { + context_window.saturating_mul(effective_context_window_percent) / 100 + }) + } + + pub(crate) fn apps_enabled(&self) -> bool { + let is_chatgpt_auth = self + .auth_manager + .as_deref() + .and_then(AuthManager::auth_cached) + .as_ref() + .is_some_and(CodexAuth::is_chatgpt_auth); + self.features.apps_enabled_for_auth(is_chatgpt_auth) + } + + pub(crate) async fn with_model(&self, model: String, models_manager: &ModelsManager) -> Self { + let mut config = (*self.config).clone(); + config.model = Some(model.clone()); + let model_info = models_manager + .get_model_info(model.as_str(), &config.to_models_manager_config()) + .await; + let truncation_policy = model_info.truncation_policy.into(); + let supported_reasoning_levels = model_info + .supported_reasoning_levels + .iter() + .map(|preset| preset.effort) + .collect::>(); + let reasoning_effort = if let Some(current_reasoning_effort) = self.reasoning_effort { + if supported_reasoning_levels.contains(¤t_reasoning_effort) { + Some(current_reasoning_effort) + } else { + supported_reasoning_levels + .get(supported_reasoning_levels.len().saturating_sub(1) / 2) + .copied() + .or(model_info.default_reasoning_level) + } + } else { + supported_reasoning_levels + .get(supported_reasoning_levels.len().saturating_sub(1) / 2) + .copied() + .or(model_info.default_reasoning_level) + }; + config.model_reasoning_effort = reasoning_effort; + + let collaboration_mode = self.collaboration_mode.with_updates( + Some(model.clone()), + Some(reasoning_effort), + /*developer_instructions*/ None, + ); + let features = self.features.clone(); + let tools_config = ToolsConfig::new(&ToolsConfigParams { + model_info: &model_info, + available_models: &models_manager + .list_models(RefreshStrategy::OnlineIfUncached) + .await, + features: &features, + image_generation_tool_auth_allowed: image_generation_tool_auth_allowed( + self.auth_manager.as_deref(), + ), + web_search_mode: self.tools_config.web_search_mode, + session_source: self.session_source.clone(), + sandbox_policy: self.sandbox_policy.get(), + windows_sandbox_level: self.windows_sandbox_level, + }) + .with_unified_exec_shell_mode(self.tools_config.unified_exec_shell_mode.clone()) + .with_web_search_config(self.tools_config.web_search_config.clone()) + .with_allow_login_shell(self.tools_config.allow_login_shell) + .with_has_environment(self.tools_config.has_environment) + .with_spawn_agent_usage_hint(config.multi_agent_v2.usage_hint_enabled) + .with_spawn_agent_usage_hint_text(config.multi_agent_v2.usage_hint_text.clone()) + .with_hide_spawn_agent_metadata(config.multi_agent_v2.hide_spawn_agent_metadata) + .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( + &config.agent_roles, + )); + + Self { + sub_id: self.sub_id.clone(), + trace_id: self.trace_id.clone(), + realtime_active: self.realtime_active, + config: Arc::new(config), + auth_manager: self.auth_manager.clone(), + model_info: model_info.clone(), + session_telemetry: self + .session_telemetry + .clone() + .with_model(model.as_str(), model_info.slug.as_str()), + provider: self.provider.clone(), + reasoning_effort, + reasoning_summary: self.reasoning_summary, + session_source: self.session_source.clone(), + environment: self.environment.clone(), + cwd: self.cwd.clone(), + current_date: self.current_date.clone(), + timezone: self.timezone.clone(), + app_server_client_name: self.app_server_client_name.clone(), + developer_instructions: self.developer_instructions.clone(), + compact_prompt: self.compact_prompt.clone(), + user_instructions: self.user_instructions.clone(), + collaboration_mode, + personality: self.personality, + approval_policy: self.approval_policy.clone(), + sandbox_policy: self.sandbox_policy.clone(), + file_system_sandbox_policy: self.file_system_sandbox_policy.clone(), + network_sandbox_policy: self.network_sandbox_policy, + network: self.network.clone(), + windows_sandbox_level: self.windows_sandbox_level, + shell_environment_policy: self.shell_environment_policy.clone(), + tools_config, + features, + ghost_snapshot: self.ghost_snapshot.clone(), + final_output_json_schema: self.final_output_json_schema.clone(), + codex_self_exe: self.codex_self_exe.clone(), + codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(), + tool_call_gate: Arc::new(ReadinessFlag::new()), + truncation_policy, + js_repl: Arc::clone(&self.js_repl), + dynamic_tools: self.dynamic_tools.clone(), + turn_metadata_state: self.turn_metadata_state.clone(), + turn_skills: self.turn_skills.clone(), + turn_timing_state: Arc::clone(&self.turn_timing_state), + } + } + + pub(crate) fn resolve_path(&self, path: Option) -> AbsolutePathBuf { + path.as_ref() + .map_or_else(|| self.cwd.clone(), |path| self.cwd.join(path)) + } + + pub(crate) fn file_system_sandbox_context( + &self, + additional_permissions: Option, + ) -> FileSystemSandboxContext { + FileSystemSandboxContext { + sandbox_policy: self.sandbox_policy.get().clone(), + windows_sandbox_level: self.windows_sandbox_level, + windows_sandbox_private_desktop: self + .config + .permissions + .windows_sandbox_private_desktop, + use_legacy_landlock: self.features.use_legacy_landlock(), + additional_permissions, + } + } + + pub(crate) fn compact_prompt(&self) -> &str { + self.compact_prompt + .as_deref() + .unwrap_or(compact::SUMMARIZATION_PROMPT) + } + + pub(crate) fn to_turn_context_item(&self) -> TurnContextItem { + let legacy_file_system_sandbox_policy = FileSystemSandboxPolicy::from_legacy_sandbox_policy( + self.sandbox_policy.get(), + &self.cwd, + ); + // Omit the derived split filesystem policy when it is equivalent to + // the legacy sandbox policy. This keeps turn-context payloads stable + // while both fields exist; once callers consume only the split policy, + // this comparison and the legacy projection should go away. + let file_system_sandbox_policy = (self.file_system_sandbox_policy + != legacy_file_system_sandbox_policy) + .then(|| self.file_system_sandbox_policy.clone()); + + TurnContextItem { + turn_id: Some(self.sub_id.clone()), + trace_id: self.trace_id.clone(), + cwd: self.cwd.to_path_buf(), + current_date: self.current_date.clone(), + timezone: self.timezone.clone(), + approval_policy: self.approval_policy.value(), + sandbox_policy: self.sandbox_policy.get().clone(), + network: self.turn_context_network_item(), + file_system_sandbox_policy, + model: self.model_info.slug.clone(), + personality: self.personality, + collaboration_mode: Some(self.collaboration_mode.clone()), + realtime_active: Some(self.realtime_active), + effort: self.reasoning_effort, + summary: self.reasoning_summary, + user_instructions: self.user_instructions.clone(), + developer_instructions: self.developer_instructions.clone(), + final_output_json_schema: self.final_output_json_schema.clone(), + truncation_policy: Some(self.truncation_policy), + } + } + + fn turn_context_network_item(&self) -> Option { + let network = self + .config + .config_layer_stack + .requirements() + .network + .as_ref()?; + Some(TurnContextNetworkItem { + allowed_domains: network + .domains + .as_ref() + .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) + .unwrap_or_default(), + denied_domains: network + .domains + .as_ref() + .and_then(codex_config::NetworkDomainPermissionsToml::denied_domains) + .unwrap_or_default(), + }) + } +} + +fn local_time_context() -> (String, String) { + match iana_time_zone::get_timezone() { + Ok(timezone) => (Local::now().format("%Y-%m-%d").to_string(), timezone), + Err(_) => ( + Utc::now().format("%Y-%m-%d").to_string(), + "Etc/UTC".to_string(), + ), + } +} + +impl Session { + /// Don't expand the number of mutated arguments on config. We are in the process of getting rid of it. + pub(crate) fn build_per_turn_config(session_configuration: &SessionConfiguration) -> Config { + // todo(aibrahim): store this state somewhere else so we don't need to mut config + let config = session_configuration.original_config_do_not_use.clone(); + let mut per_turn_config = (*config).clone(); + per_turn_config.cwd = session_configuration.cwd.clone(); + per_turn_config.model_reasoning_effort = + session_configuration.collaboration_mode.reasoning_effort(); + per_turn_config.model_reasoning_summary = session_configuration.model_reasoning_summary; + per_turn_config.service_tier = session_configuration.service_tier; + per_turn_config.personality = session_configuration.personality; + per_turn_config.approvals_reviewer = session_configuration.approvals_reviewer; + let resolved_web_search_mode = resolve_web_search_mode_for_turn( + &per_turn_config.web_search_mode, + session_configuration.sandbox_policy.get(), + ); + if let Err(err) = per_turn_config + .web_search_mode + .set(resolved_web_search_mode) + { + let fallback_value = per_turn_config.web_search_mode.value(); + tracing::warn!( + error = %err, + ?resolved_web_search_mode, + ?fallback_value, + "resolved web_search_mode is disallowed by requirements; keeping constrained value" + ); + } + per_turn_config.features = config.features.clone(); + per_turn_config + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn make_turn_context( + conversation_id: ThreadId, + auth_manager: Option>, + session_telemetry: &SessionTelemetry, + provider: ModelProviderInfo, + session_configuration: &SessionConfiguration, + user_shell: &shell::Shell, + shell_zsh_path: Option<&PathBuf>, + main_execve_wrapper_exe: Option<&PathBuf>, + per_turn_config: Config, + model_info: ModelInfo, + models_manager: &ModelsManager, + network: Option, + environment: Option>, + sub_id: String, + js_repl: Arc, + skills_outcome: Arc, + ) -> TurnContext { + let reasoning_effort = session_configuration.collaboration_mode.reasoning_effort(); + let reasoning_summary = session_configuration + .model_reasoning_summary + .unwrap_or(model_info.default_reasoning_summary); + let session_telemetry = session_telemetry.clone().with_model( + session_configuration.collaboration_mode.model(), + model_info.slug.as_str(), + ); + let session_source = session_configuration.session_source.clone(); + let image_generation_tool_auth_allowed = + image_generation_tool_auth_allowed(auth_manager.as_deref()); + let auth_manager_for_context = auth_manager; + let provider_for_context = provider; + let session_telemetry_for_context = session_telemetry; + let tools_config = ToolsConfig::new(&ToolsConfigParams { + model_info: &model_info, + available_models: &models_manager.try_list_models().unwrap_or_default(), + features: &per_turn_config.features, + image_generation_tool_auth_allowed, + web_search_mode: Some(per_turn_config.web_search_mode.value()), + session_source: session_source.clone(), + sandbox_policy: session_configuration.sandbox_policy.get(), + windows_sandbox_level: session_configuration.windows_sandbox_level, + }) + .with_unified_exec_shell_mode_for_session( + crate::tools::spec::tool_user_shell_type(user_shell), + shell_zsh_path, + main_execve_wrapper_exe, + ) + .with_web_search_config(per_turn_config.web_search_config.clone()) + .with_allow_login_shell(per_turn_config.permissions.allow_login_shell) + .with_has_environment(environment.is_some()) + .with_spawn_agent_usage_hint(per_turn_config.multi_agent_v2.usage_hint_enabled) + .with_spawn_agent_usage_hint_text(per_turn_config.multi_agent_v2.usage_hint_text.clone()) + .with_hide_spawn_agent_metadata(per_turn_config.multi_agent_v2.hide_spawn_agent_metadata) + .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( + &per_turn_config.agent_roles, + )); + + let cwd = session_configuration.cwd.clone(); + + let per_turn_config = Arc::new(per_turn_config); + let turn_metadata_state = Arc::new(TurnMetadataState::new( + conversation_id.to_string(), + &session_source, + sub_id.clone(), + cwd.clone(), + session_configuration.sandbox_policy.get(), + session_configuration.windows_sandbox_level, + )); + let (current_date, timezone) = local_time_context(); + TurnContext { + sub_id, + trace_id: current_span_trace_id(), + realtime_active: false, + config: per_turn_config.clone(), + auth_manager: auth_manager_for_context, + model_info: model_info.clone(), + session_telemetry: session_telemetry_for_context, + provider: provider_for_context, + reasoning_effort, + reasoning_summary, + session_source, + environment, + cwd, + current_date: Some(current_date), + timezone: Some(timezone), + app_server_client_name: session_configuration.app_server_client_name.clone(), + developer_instructions: session_configuration.developer_instructions.clone(), + compact_prompt: session_configuration.compact_prompt.clone(), + user_instructions: session_configuration.user_instructions.clone(), + collaboration_mode: session_configuration.collaboration_mode.clone(), + personality: session_configuration.personality, + approval_policy: session_configuration.approval_policy.clone(), + sandbox_policy: session_configuration.sandbox_policy.clone(), + file_system_sandbox_policy: session_configuration.file_system_sandbox_policy.clone(), + network_sandbox_policy: session_configuration.network_sandbox_policy, + network, + windows_sandbox_level: session_configuration.windows_sandbox_level, + shell_environment_policy: per_turn_config.permissions.shell_environment_policy.clone(), + tools_config, + features: per_turn_config.features.clone(), + ghost_snapshot: per_turn_config.ghost_snapshot.clone(), + final_output_json_schema: None, + codex_self_exe: per_turn_config.codex_self_exe.clone(), + codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(), + tool_call_gate: Arc::new(ReadinessFlag::new()), + truncation_policy: model_info.truncation_policy.into(), + js_repl, + dynamic_tools: session_configuration.dynamic_tools.clone(), + turn_metadata_state, + turn_skills: TurnSkillsContext::new(skills_outcome), + turn_timing_state: Arc::new(TurnTimingState::default()), + } + } + + pub(crate) async fn new_turn_with_sub_id( + &self, + sub_id: String, + updates: SessionSettingsUpdate, + ) -> ConstraintResult> { + let ( + session_configuration, + sandbox_policy_changed, + previous_cwd, + codex_home, + session_source, + ) = { + let mut state = self.state.lock().await; + match state.session_configuration.clone().apply(&updates) { + Ok(next) => { + let previous_cwd = state.session_configuration.cwd.clone(); + let sandbox_policy_changed = + state.session_configuration.sandbox_policy != next.sandbox_policy; + let codex_home = next.codex_home.clone(); + let session_source = next.session_source.clone(); + state.session_configuration = next.clone(); + ( + next, + sandbox_policy_changed, + previous_cwd, + codex_home, + session_source, + ) + } + Err(err) => { + drop(state); + self.send_event_raw(Event { + id: sub_id.clone(), + msg: EventMsg::Error(ErrorEvent { + message: err.to_string(), + codex_error_info: Some(CodexErrorInfo::BadRequest), + }), + }) + .await; + return Err(err); + } + } + }; + + self.maybe_refresh_shell_snapshot_for_cwd( + &previous_cwd, + &session_configuration.cwd, + &codex_home, + &session_source, + ); + + if sandbox_policy_changed { + self.refresh_managed_network_proxy_for_current_sandbox_policy() + .await; + } + + Ok(self + .new_turn_from_configuration( + sub_id, + session_configuration, + updates.final_output_json_schema, + ) + .await) + } + + async fn new_turn_from_configuration( + &self, + sub_id: String, + session_configuration: SessionConfiguration, + final_output_json_schema: Option>, + ) -> Arc { + let per_turn_config = Self::build_per_turn_config(&session_configuration); + { + let mcp_connection_manager = self.services.mcp_connection_manager.read().await; + mcp_connection_manager.set_approval_policy(&session_configuration.approval_policy); + mcp_connection_manager + .set_sandbox_policy(per_turn_config.permissions.sandbox_policy.get()); + } + + let model_info = self + .services + .models_manager + .get_model_info( + session_configuration.collaboration_mode.model(), + &per_turn_config.to_models_manager_config(), + ) + .await; + let plugin_outcome = self + .services + .plugins_manager + .plugins_for_config(&per_turn_config) + .await; + let effective_skill_roots = plugin_outcome.effective_skill_roots(); + let skills_input = skills_load_input_from_config(&per_turn_config, effective_skill_roots); + let fs = self + .services + .environment + .as_ref() + .map(|environment| environment.get_filesystem()); + let skills_outcome = Arc::new( + self.services + .skills_manager + .skills_for_config(&skills_input, fs) + .await, + ); + let mut turn_context: TurnContext = Self::make_turn_context( + self.conversation_id, + Some(Arc::clone(&self.services.auth_manager)), + &self.services.session_telemetry, + session_configuration.provider.clone(), + &session_configuration, + self.services.user_shell.as_ref(), + self.services.shell_zsh_path.as_ref(), + self.services.main_execve_wrapper_exe.as_ref(), + per_turn_config, + model_info, + &self.services.models_manager, + self.services + .network_proxy + .as_ref() + .and_then(|started_proxy| { + Self::managed_network_proxy_active_for_sandbox_policy( + session_configuration.sandbox_policy.get(), + ) + .then(|| started_proxy.proxy()) + }), + self.services.environment.clone(), + sub_id, + Arc::clone(&self.js_repl), + skills_outcome, + ); + turn_context.realtime_active = self.conversation.running_state().await.is_some(); + + if let Some(final_schema) = final_output_json_schema { + turn_context.final_output_json_schema = final_schema; + } + let turn_context = Arc::new(turn_context); + turn_context.turn_metadata_state.spawn_git_enrichment_task(); + turn_context + } + + pub(crate) async fn maybe_emit_unknown_model_warning_for_turn(&self, tc: &TurnContext) { + if tc.model_info.used_fallback_model_metadata { + self.send_event( + tc, + EventMsg::Warning(WarningEvent { + message: format!( + "Model metadata for `{}` not found. Defaulting to fallback metadata; this can degrade performance and cause issues.", + tc.model_info.slug + ), + }), + ) + .await; + } + } + + pub(crate) async fn new_default_turn(&self) -> Arc { + self.new_default_turn_with_sub_id(self.next_internal_sub_id()) + .await + } + + pub(crate) async fn new_default_turn_with_sub_id(&self, sub_id: String) -> Arc { + let session_configuration = { + let state = self.state.lock().await; + state.session_configuration.clone() + }; + self.new_turn_from_configuration( + sub_id, + session_configuration, + /*final_output_json_schema*/ None, + ) + .await + } +} diff --git a/codex-rs/core/src/codex_delegate.rs b/codex-rs/core/src/codex_delegate.rs index 55b3619e11..9cd58e044f 100644 --- a/codex-rs/core/src/codex_delegate.rs +++ b/codex-rs/core/src/codex_delegate.rs @@ -75,10 +75,9 @@ pub(crate) async fn run_codex_thread_interactive( let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); let (tx_ops, rx_ops) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); - let CodexSpawnOk { codex, .. } = Codex::spawn(CodexSpawnArgs { + let CodexSpawnOk { codex, .. } = Box::pin(Codex::spawn(CodexSpawnArgs { config, auth_manager, - analytics_events_client: Some(parent_session.services.analytics_events_client.clone()), models_manager, environment_manager: Arc::new(EnvironmentManager::from_environment( parent_ctx.environment.as_deref(), @@ -97,7 +96,8 @@ pub(crate) async fn run_codex_thread_interactive( user_shell_override: None, inherited_exec_policy: Some(Arc::clone(&parent_session.services.exec_policy)), parent_trace: None, - }) + analytics_events_client: Some(parent_session.services.analytics_events_client.clone()), + })) .await?; if parent_session.enabled(codex_features::Feature::GeneralAnalytics) { let thread_config = codex.thread_config_snapshot().await; @@ -172,7 +172,7 @@ pub(crate) async fn run_codex_thread_one_shot( // Use a child token so we can stop the delegate after completion without // requiring the caller to cancel the parent token. let child_cancel = cancel_token.child_token(); - let io = run_codex_thread_interactive( + let io = Box::pin(run_codex_thread_interactive( config, auth_manager, models_manager, @@ -181,7 +181,7 @@ pub(crate) async fn run_codex_thread_one_shot( child_cancel.clone(), subagent_source, initial_history, - ) + )) .await?; // Send the initial input to kick off the one-shot turn. @@ -571,7 +571,7 @@ async fn handle_patch_approval( new_guardian_review_id(), GuardianApprovalRequest::ApplyPatch { id: approval_id.clone(), - cwd: parent_ctx.cwd.to_path_buf(), + cwd: parent_ctx.cwd.clone(), files, patch, }, diff --git a/codex-rs/core/src/codex_delegate_tests.rs b/codex-rs/core/src/codex_delegate_tests.rs index 62ee884815..beee114a96 100644 --- a/codex-rs/core/src/codex_delegate_tests.rs +++ b/codex-rs/core/src/codex_delegate_tests.rs @@ -23,9 +23,10 @@ use codex_protocol::request_permissions::RequestPermissionsResponse; use codex_protocol::request_user_input::RequestUserInputAnswer; use codex_protocol::request_user_input::RequestUserInputEvent; use codex_protocol::request_user_input::RequestUserInputQuestion; +use core_test_support::PathBufExt; +use core_test_support::test_path_buf; use pretty_assertions::assert_eq; use std::collections::HashMap; -use std::path::PathBuf; use std::sync::Arc; use tokio::sync::Mutex; use tokio::sync::watch; @@ -282,7 +283,7 @@ async fn handle_exec_approval_uses_call_id_for_guardian_review_and_approval_id_f approval_id: Some("callback-approval-1".to_string()), turn_id: "child-turn-1".to_string(), command: vec!["rm".to_string(), "-rf".to_string(), "tmp".to_string()], - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), reason: Some("unsafe subcommand".to_string()), network_approval_context: None, proposed_execpolicy_amendment: None, @@ -313,7 +314,7 @@ async fn handle_exec_approval_uses_call_id_for_guardian_review_and_approval_id_f let expected_action = GuardianAssessmentAction::Command { source: GuardianCommandSource::Shell, command: "rm -rf tmp".to_string(), - cwd: "/tmp".into(), + cwd: test_path_buf("/tmp").abs(), }; assert!(!assessment_event.id.is_empty()); assert_eq!( diff --git a/codex-rs/core/src/codex_tests.rs b/codex-rs/core/src/codex_tests.rs index 7a9af06a02..a6f76c6034 100644 --- a/codex-rs/core/src/codex_tests.rs +++ b/codex-rs/core/src/codex_tests.rs @@ -45,6 +45,8 @@ use crate::rollout::recorder::RolloutRecorder; use crate::state::TaskKind; use crate::tasks::SessionTask; use crate::tasks::SessionTaskContext; +use crate::tasks::UserShellCommandMode; +use crate::tasks::execute_user_shell_command; use crate::tools::ToolRouter; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolPayload; @@ -110,6 +112,7 @@ use opentelemetry::trace::TraceId; use std::path::Path; use std::time::Duration; use tokio::time::sleep; +use tokio::time::timeout; use tracing_opentelemetry::OpenTelemetrySpanExt; use codex_protocol::mcp::CallToolResult as McpCallToolResult; @@ -312,6 +315,8 @@ fn test_tool_runtime(session: Arc, turn_context: Arc) -> T crate::tools::router::ToolRouterParams { mcp_tools: None, deferred_mcp_tools: None, + unavailable_called_tools: Vec::new(), + parallel_mcp_server_names: HashSet::new(), discoverable_tools: None, dynamic_tools: turn_context.dynamic_tools.as_slice(), }, @@ -408,7 +413,7 @@ fn make_mcp_tool( ) -> ToolInfo { let tool_namespace = if server_name == CODEX_APPS_MCP_SERVER_NAME { connector_name - .map(crate::connectors::sanitize_name) + .map(codex_connectors::metadata::sanitize_name) .map(|connector_name| format!("mcp__{server_name}__{connector_name}")) .unwrap_or_else(|| server_name.to_string()) } else { @@ -495,8 +500,8 @@ fn explicit_connectors_ignore_non_app_tool_matches() { )); } -fn tools_config_for_mcp_tool_exposure(search_tool: bool) -> ToolsConfig { - let config = test_config(); +async fn tools_config_for_mcp_tool_exposure(search_tool: bool) -> ToolsConfig { + let config = test_config().await; let model_info = ModelsManager::construct_model_info_offline_for_tests( "gpt-5-codex", &config.to_models_manager_config(), @@ -630,78 +635,12 @@ async fn start_managed_network_proxy_ignores_invalid_execpolicy_network_rules() Ok(()) } -#[tokio::test] -async fn managed_network_proxy_refreshes_when_sandbox_policy_changes() -> anyhow::Result<()> { - let spec = crate::config::NetworkProxySpec::from_config_and_constraints( - NetworkProxyConfig::default(), - Some(NetworkConstraints { - domains: Some(NetworkDomainPermissionsToml { - entries: std::collections::BTreeMap::from([( - "blocked.example.com".to_string(), - NetworkDomainPermissionToml::Deny, - )]), - }), - danger_full_access_denylist_only: Some(true), - allow_local_binding: Some(false), - ..Default::default() - }), - &SandboxPolicy::new_workspace_write_policy(), - )?; - let exec_policy = Policy::empty(); - - let (started_proxy, _) = Session::start_managed_network_proxy( - &spec, - &exec_policy, - &SandboxPolicy::new_workspace_write_policy(), - /*network_policy_decider*/ None, - /*blocked_request_observer*/ None, - /*managed_network_requirements_enabled*/ false, - crate::config::NetworkProxyAuditMetadata::default(), - ) - .await?; - - assert!(!started_proxy.proxy().allow_local_binding()); - let current_cfg = started_proxy.proxy().current_cfg().await?; - assert_eq!(current_cfg.network.allowed_domains(), None); - assert_eq!( - current_cfg.network.denied_domains(), - Some(vec!["blocked.example.com".to_string()]) - ); - - let spec = spec.recompute_for_sandbox_policy(&SandboxPolicy::DangerFullAccess)?; - spec.apply_to_started_proxy(&started_proxy).await?; - - assert!(started_proxy.proxy().allow_local_binding()); - let current_cfg = started_proxy.proxy().current_cfg().await?; - assert_eq!( - current_cfg.network.allowed_domains(), - Some(vec!["*".to_string()]) - ); - assert_eq!( - current_cfg.network.denied_domains(), - Some(vec!["blocked.example.com".to_string()]) - ); - - let spec = spec.recompute_for_sandbox_policy(&SandboxPolicy::new_workspace_write_policy())?; - spec.apply_to_started_proxy(&started_proxy).await?; - - assert!(!started_proxy.proxy().allow_local_binding()); - let current_cfg = started_proxy.proxy().current_cfg().await?; - assert_eq!(current_cfg.network.allowed_domains(), None); - assert_eq!( - current_cfg.network.denied_domains(), - Some(vec!["blocked.example.com".to_string()]) - ); - Ok(()) -} - #[tokio::test] async fn managed_network_proxy_decider_survives_full_access_start() -> anyhow::Result<()> { let spec = crate::config::NetworkProxySpec::from_config_and_constraints( NetworkProxyConfig::default(), Some(NetworkConstraints { enabled: Some(true), - danger_full_access_denylist_only: Some(true), ..Default::default() }), &SandboxPolicy::DangerFullAccess, @@ -763,6 +702,293 @@ async fn managed_network_proxy_decider_survives_full_access_start() -> anyhow::R Ok(()) } +#[tokio::test] +async fn new_turn_refreshes_managed_network_proxy_for_sandbox_change() -> anyhow::Result<()> { + let (mut session, _turn_context) = make_session_and_context().await; + let initial_policy = SandboxPolicy::new_workspace_write_policy(); + + let mut network_config = NetworkProxyConfig::default(); + network_config + .network + .set_allowed_domains(vec!["evil.com".to_string()]); + let requirements = NetworkConstraints { + domains: Some(NetworkDomainPermissionsToml { + entries: std::collections::BTreeMap::from([( + "*.example.com".to_string(), + NetworkDomainPermissionToml::Allow, + )]), + }), + ..Default::default() + }; + let spec = crate::config::NetworkProxySpec::from_config_and_constraints( + network_config, + Some(requirements), + &initial_policy, + )?; + let (started_proxy, _) = Session::start_managed_network_proxy( + &spec, + &Policy::empty(), + &initial_policy, + /*network_policy_decider*/ None, + /*blocked_request_observer*/ None, + /*managed_network_requirements_enabled*/ false, + crate::config::NetworkProxyAuditMetadata::default(), + ) + .await?; + assert_eq!( + started_proxy + .proxy() + .current_cfg() + .await? + .network + .allowed_domains(), + Some(vec!["*.example.com".to_string(), "evil.com".to_string()]) + ); + + { + let mut state = session.state.lock().await; + let mut config = (*state.session_configuration.original_config_do_not_use).clone(); + config.permissions.network = Some(spec); + config.permissions.sandbox_policy = + codex_config::Constrained::allow_any(initial_policy.clone()); + state.session_configuration.original_config_do_not_use = Arc::new(config); + state.session_configuration.sandbox_policy = + codex_config::Constrained::allow_any(initial_policy); + } + session.services.network_proxy = Some(started_proxy); + + session + .new_turn_with_sub_id( + "sandbox-policy-change".to_string(), + SessionSettingsUpdate { + sandbox_policy: Some(SandboxPolicy::DangerFullAccess), + ..Default::default() + }, + ) + .await?; + + let started_proxy = session + .services + .network_proxy + .as_ref() + .expect("managed network proxy should be present"); + assert_eq!( + started_proxy + .proxy() + .current_cfg() + .await? + .network + .allowed_domains(), + Some(vec!["*.example.com".to_string()]) + ); + + Ok(()) +} + +#[tokio::test] +async fn danger_full_access_turns_do_not_expose_managed_network_proxy() -> anyhow::Result<()> { + let network_spec = crate::config::NetworkProxySpec::from_config_and_constraints( + NetworkProxyConfig::default(), + Some(NetworkConstraints { + enabled: Some(true), + ..Default::default() + }), + &SandboxPolicy::DangerFullAccess, + )?; + + let session = make_session_with_config(move |config| { + config.permissions.sandbox_policy = + codex_config::Constrained::allow_any(SandboxPolicy::DangerFullAccess); + config.permissions.network = Some(network_spec); + }) + .await?; + + let turn_context = session.new_default_turn().await; + assert!(turn_context.network.is_none()); + Ok(()) +} + +#[tokio::test] +async fn danger_full_access_tool_attempts_do_not_enforce_managed_network() -> anyhow::Result<()> { + #[derive(Default)] + struct ProbeToolRuntime { + enforce_managed_network: Vec, + } + + impl crate::tools::sandboxing::Approvable<()> for ProbeToolRuntime { + type ApprovalKey = String; + + fn approval_keys(&self, _req: &()) -> Vec { + vec!["probe".to_string()] + } + + fn start_approval_async<'a>( + &'a mut self, + _req: &'a (), + _ctx: crate::tools::sandboxing::ApprovalCtx<'a>, + ) -> futures::future::BoxFuture<'a, ReviewDecision> { + Box::pin(async { ReviewDecision::Approved }) + } + } + + impl crate::tools::sandboxing::Sandboxable for ProbeToolRuntime { + fn sandbox_preference(&self) -> codex_sandboxing::SandboxablePreference { + codex_sandboxing::SandboxablePreference::Auto + } + } + + impl crate::tools::sandboxing::ToolRuntime<(), ()> for ProbeToolRuntime { + async fn run( + &mut self, + _req: &(), + attempt: &crate::tools::sandboxing::SandboxAttempt<'_>, + _ctx: &crate::tools::sandboxing::ToolCtx, + ) -> Result<(), crate::tools::sandboxing::ToolError> { + self.enforce_managed_network + .push(attempt.enforce_managed_network); + Ok(()) + } + } + + let network_spec = crate::config::NetworkProxySpec::from_config_and_constraints( + NetworkProxyConfig::default(), + Some(NetworkConstraints { + enabled: Some(true), + ..Default::default() + }), + &SandboxPolicy::DangerFullAccess, + )?; + + let session = make_session_with_config(move |config| { + config.permissions.sandbox_policy = + codex_config::Constrained::allow_any(SandboxPolicy::DangerFullAccess); + config.permissions.network = Some(network_spec); + + let layers = config + .config_layer_stack + .get_layers( + ConfigLayerStackOrdering::LowestPrecedenceFirst, + /*include_disabled*/ true, + ) + .into_iter() + .cloned() + .collect(); + let mut requirements = config.config_layer_stack.requirements().clone(); + requirements.network = Some(Sourced::new( + NetworkConstraints { + enabled: Some(true), + ..Default::default() + }, + RequirementSource::CloudRequirements, + )); + let mut requirements_toml = config.config_layer_stack.requirements_toml().clone(); + requirements_toml.network = Some(crate::config_loader::NetworkRequirementsToml { + enabled: Some(true), + ..Default::default() + }); + config.config_layer_stack = ConfigLayerStack::new(layers, requirements, requirements_toml) + .expect("rebuild config layer stack with network requirements"); + }) + .await?; + + let turn = session.new_default_turn().await; + assert!(turn.network.is_none()); + + let mut orchestrator = crate::tools::orchestrator::ToolOrchestrator::new(); + let mut tool = ProbeToolRuntime::default(); + let tool_ctx = crate::tools::sandboxing::ToolCtx { + session: Arc::clone(&session), + turn: Arc::clone(&turn), + call_id: "probe-call".to_string(), + tool_name: "probe".to_string(), + }; + + orchestrator + .run( + &mut tool, + &(), + &tool_ctx, + turn.as_ref(), + AskForApproval::Never, + ) + .await + .expect("probe runtime should succeed"); + + assert_eq!(tool.enforce_managed_network, vec![false]); + + Ok(()) +} + +#[tokio::test] +async fn workspace_write_turns_continue_to_expose_managed_network_proxy() -> anyhow::Result<()> { + let sandbox_policy = SandboxPolicy::new_workspace_write_policy(); + let network_spec = crate::config::NetworkProxySpec::from_config_and_constraints( + NetworkProxyConfig::default(), + Some(NetworkConstraints { + enabled: Some(true), + ..Default::default() + }), + &sandbox_policy, + )?; + + let session = make_session_with_config(move |config| { + config.permissions.sandbox_policy = codex_config::Constrained::allow_any(sandbox_policy); + config.permissions.network = Some(network_spec); + }) + .await?; + + let turn_context = session.new_default_turn().await; + assert!(turn_context.network.is_some()); + Ok(()) +} + +#[tokio::test] +async fn user_shell_commands_do_not_inherit_managed_network_proxy() -> anyhow::Result<()> { + let sandbox_policy = SandboxPolicy::new_workspace_write_policy(); + let network_spec = crate::config::NetworkProxySpec::from_config_and_constraints( + NetworkProxyConfig::default(), + Some(NetworkConstraints { + enabled: Some(true), + ..Default::default() + }), + &sandbox_policy, + )?; + + let (session, rx) = make_session_with_config_and_rx(move |config| { + config.permissions.sandbox_policy = codex_config::Constrained::allow_any(sandbox_policy); + config.permissions.network = Some(network_spec); + }) + .await?; + + let turn_context = session.new_default_turn().await; + assert!(turn_context.network.is_some()); + + #[cfg(windows)] + let command = r#"$val = $env:HTTP_PROXY; if ([string]::IsNullOrEmpty($val)) { $val = 'not-set' } ; [System.Console]::Write($val)"#.to_string(); + #[cfg(not(windows))] + let command = r#"sh -c "printf '%s' \"${HTTP_PROXY:-not-set}\"""#.to_string(); + + execute_user_shell_command( + Arc::clone(&session), + turn_context, + command, + CancellationToken::new(), + UserShellCommandMode::StandaloneTurn, + ) + .await; + + loop { + let event = rx.recv().await.expect("channel open"); + if let EventMsg::ExecCommandEnd(event) = event.msg { + assert_eq!(event.exit_code, 0); + assert_eq!(event.stdout.trim(), "not-set"); + break; + } + } + + Ok(()) +} + #[tokio::test] async fn get_base_instructions_no_user_content() { let prompt_with_apply_patch_instructions = @@ -798,7 +1024,7 @@ async fn get_base_instructions_no_user_content() { ]; let (session, _turn_context) = make_session_and_context().await; - let config = test_config(); + let config = test_config().await; for test_case in test_cases { let model_info = model_info_for_slug(test_case.slug, &config); @@ -963,10 +1189,10 @@ fn collect_explicit_app_ids_from_skill_items_skips_plain_mentions_with_skill_con assert_eq!(connector_ids, HashSet::::new()); } -#[test] -fn mcp_tool_exposure_directly_exposes_small_effective_tool_sets() { - let config = test_config(); - let tools_config = tools_config_for_mcp_tool_exposure(/*search_tool*/ true); +#[tokio::test] +async fn mcp_tool_exposure_directly_exposes_small_effective_tool_sets() { + let config = test_config().await; + let tools_config = tools_config_for_mcp_tool_exposure(/*search_tool*/ true).await; let mcp_tools = numbered_mcp_tools(DIRECT_MCP_TOOL_EXPOSURE_THRESHOLD - 1); let exposure = build_mcp_tool_exposure( @@ -985,10 +1211,10 @@ fn mcp_tool_exposure_directly_exposes_small_effective_tool_sets() { assert!(exposure.deferred_tools.is_none()); } -#[test] -fn mcp_tool_exposure_searches_large_effective_tool_sets() { - let config = test_config(); - let tools_config = tools_config_for_mcp_tool_exposure(/*search_tool*/ true); +#[tokio::test] +async fn mcp_tool_exposure_searches_large_effective_tool_sets() { + let config = test_config().await; + let tools_config = tools_config_for_mcp_tool_exposure(/*search_tool*/ true).await; let mcp_tools = numbered_mcp_tools(DIRECT_MCP_TOOL_EXPOSURE_THRESHOLD); let exposure = build_mcp_tool_exposure( @@ -1011,10 +1237,10 @@ fn mcp_tool_exposure_searches_large_effective_tool_sets() { assert_eq!(deferred_tool_names, expected_tool_names); } -#[test] -fn mcp_tool_exposure_directly_exposes_explicit_apps_in_large_search_sets() { - let config = test_config(); - let tools_config = tools_config_for_mcp_tool_exposure(/*search_tool*/ true); +#[tokio::test] +async fn mcp_tool_exposure_directly_exposes_explicit_apps_without_deferred_overlap() { + let config = test_config().await; + let tools_config = tools_config_for_mcp_tool_exposure(/*search_tool*/ true).await; let mut mcp_tools = numbered_mcp_tools(DIRECT_MCP_TOOL_EXPOSURE_THRESHOLD - 1); mcp_tools.extend([( "mcp__codex_apps__calendar_create_event".to_string(), @@ -1043,13 +1269,19 @@ fn mcp_tool_exposure_directly_exposes_explicit_apps_in_large_search_sets() { ); assert_eq!( exposure.deferred_tools.as_ref().map(HashMap::len), - Some(DIRECT_MCP_TOOL_EXPOSURE_THRESHOLD) + Some(DIRECT_MCP_TOOL_EXPOSURE_THRESHOLD - 1) ); let deferred_tools = exposure .deferred_tools .as_ref() .expect("large tool sets should be discoverable through tool_search"); - assert!(deferred_tools.contains_key("mcp__codex_apps__calendar_create_event")); + assert!( + tool_names + .iter() + .all(|direct_tool_name| !deferred_tools.contains_key(direct_tool_name)), + "direct tools should not also be deferred: {tool_names:?}" + ); + assert!(!deferred_tools.contains_key("mcp__codex_apps__calendar_create_event")); assert!(deferred_tools.contains_key("mcp__rmcp__tool_0")); } @@ -1450,6 +1682,7 @@ async fn record_initial_history_forked_hydrates_previous_turn_settings() { approval_policy: turn_context.approval_policy.value(), sandbox_policy: turn_context.sandbox_policy.get().clone(), network: None, + file_system_sandbox_policy: None, model: previous_model.to_string(), personality: turn_context.personality, collaboration_mode: Some(turn_context.collaboration_mode.clone()), @@ -2558,12 +2791,19 @@ async fn new_default_turn_uses_config_aware_skills_for_role_overrides() { ) .expect("write skill"); + let skill_fs = session + .services + .environment + .as_ref() + .map(|environment| environment.get_filesystem()) + .unwrap_or_else(|| std::sync::Arc::clone(&codex_exec_server::LOCAL_FS)); let parent_outcome = session .services .skills_manager .skills_for_cwd( &crate::skills_load_input_from_config(&parent_config, Vec::new()), /*force_reload*/ true, + Some(Arc::clone(&skill_fs)), ) .await; let parent_skill = parent_outcome @@ -2593,7 +2833,7 @@ enabled = false "custom".to_string(), crate::config::AgentRoleConfig { description: None, - config_file: Some(role_path), + config_file: Some(role_path.to_path_buf()), nickname_candidates: None, }, ); @@ -2706,7 +2946,7 @@ async fn session_new_fails_when_zsh_fork_enabled_without_zsh_path() { let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let models_manager = Arc::new(ModelsManager::new( - config.codex_home.clone(), + config.codex_home.to_path_buf(), auth_manager.clone(), /*model_catalog*/ None, CollaborationModesConfig::default(), @@ -2759,7 +2999,7 @@ async fn session_new_fails_when_zsh_fork_enabled_without_zsh_path() { let (tx_event, _rx_event) = async_channel::unbounded(); let (agent_status_tx, _agent_status_rx) = watch::channel(AgentStatus::PendingInit); - let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.clone())); + let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.to_path_buf())); let mcp_manager = Arc::new(McpManager::new(Arc::clone(&plugins_manager))); let skills_manager = Arc::new(SkillsManager::new( config.codex_home.clone(), @@ -2806,7 +3046,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { let conversation_id = ThreadId::default(); let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let models_manager = Arc::new(ModelsManager::new( - config.codex_home.clone(), + config.codex_home.to_path_buf(), auth_manager.clone(), /*model_catalog*/ None, CollaborationModesConfig::default(), @@ -2873,7 +3113,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { ); let state = SessionState::new(session_configuration.clone()); - let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.clone())); + let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.to_path_buf())); let mcp_manager = Arc::new(McpManager::new(Arc::clone(&plugins_manager))); let skills_manager = Arc::new(SkillsManager::new( config.codex_home.clone(), @@ -2909,6 +3149,11 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { }), rollout: Mutex::new(None), user_shell: Arc::new(default_user_shell()), + agent_identity_manager: Arc::new(crate::agent_identity::AgentIdentityManager::new( + config.as_ref(), + Arc::clone(&auth_manager), + session_configuration.session_source.clone(), + )), shell_snapshot_tx: watch::channel(None).0, show_raw_agent_reasoning: config.show_raw_agent_reasoning, exec_policy, @@ -2925,6 +3170,9 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { network_proxy: None, network_approval: Arc::clone(&network_approval), state_db: None, + thread_store: codex_thread_store::LocalThreadStore::new( + codex_rollout::RolloutConfig::from_view(config.as_ref()), + ), model_client: ModelClient::new( Some(auth_manager.clone()), conversation_id, @@ -2948,11 +3196,18 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { let plugin_outcome = services .plugins_manager - .plugins_for_config(&per_turn_config); + .plugins_for_config(&per_turn_config) + .await; let effective_skill_roots = plugin_outcome.effective_skill_roots(); let skills_input = crate::skills_load_input_from_config(&per_turn_config, effective_skill_roots); - let skills_outcome = Arc::new(services.skills_manager.skills_for_config(&skills_input)); + let skill_fs = environment.get_filesystem(); + let skills_outcome = Arc::new( + services + .skills_manager + .skills_for_config(&skills_input, Some(Arc::clone(&skill_fs))) + .await, + ); let turn_context = Session::make_turn_context( conversation_id, Some(Arc::clone(&auth_manager)), @@ -2996,6 +3251,109 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { (session, turn_context) } +async fn make_session_with_config( + mutator: impl FnOnce(&mut Config), +) -> anyhow::Result> { + let (session, _rx_event) = make_session_with_config_and_rx(mutator).await?; + Ok(session) +} + +async fn make_session_with_config_and_rx( + mutator: impl FnOnce(&mut Config), +) -> anyhow::Result<(Arc, async_channel::Receiver)> { + let codex_home = tempfile::tempdir().expect("create temp dir"); + let mut config = build_test_config(codex_home.path()).await; + mutator(&mut config); + let config = Arc::new(config); + let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); + let models_manager = Arc::new(ModelsManager::new( + config.codex_home.to_path_buf(), + auth_manager.clone(), + /*model_catalog*/ None, + CollaborationModesConfig::default(), + )); + let model = ModelsManager::get_model_offline_for_tests(config.model.as_deref()); + let model_info = ModelsManager::construct_model_info_offline_for_tests( + model.as_str(), + &config.to_models_manager_config(), + ); + let collaboration_mode = CollaborationMode { + mode: ModeKind::Default, + settings: Settings { + model, + reasoning_effort: config.model_reasoning_effort, + developer_instructions: None, + }, + }; + let session_configuration = SessionConfiguration { + provider: config.model_provider.clone(), + collaboration_mode, + model_reasoning_summary: config.model_reasoning_summary, + developer_instructions: config.developer_instructions.clone(), + user_instructions: config.user_instructions.clone(), + service_tier: None, + personality: config.personality, + base_instructions: config + .base_instructions + .clone() + .unwrap_or_else(|| model_info.get_model_instructions(config.personality)), + compact_prompt: config.compact_prompt.clone(), + approval_policy: config.permissions.approval_policy.clone(), + approvals_reviewer: config.approvals_reviewer, + sandbox_policy: config.permissions.sandbox_policy.clone(), + file_system_sandbox_policy: config.permissions.file_system_sandbox_policy.clone(), + network_sandbox_policy: config.permissions.network_sandbox_policy, + windows_sandbox_level: WindowsSandboxLevel::from_config(&config), + cwd: config.cwd.clone(), + codex_home: config.codex_home.clone(), + thread_name: None, + original_config_do_not_use: Arc::clone(&config), + metrics_service_name: None, + app_server_client_name: None, + app_server_client_version: None, + session_source: SessionSource::Exec, + dynamic_tools: Vec::new(), + persist_extended_history: false, + inherited_shell_snapshot: None, + user_shell_override: None, + }; + + let (tx_event, rx_event) = async_channel::unbounded(); + let (agent_status_tx, _agent_status_rx) = watch::channel(AgentStatus::PendingInit); + let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.to_path_buf())); + let mcp_manager = Arc::new(McpManager::new(Arc::clone(&plugins_manager))); + let skills_manager = Arc::new(SkillsManager::new( + config.codex_home.clone(), + /*bundled_skills_enabled*/ true, + )); + + let session = Session::new( + session_configuration, + Arc::clone(&config), + auth_manager, + models_manager, + Arc::new(ExecPolicyManager::default()), + tx_event, + agent_status_tx, + InitialHistory::New, + SessionSource::Exec, + skills_manager, + plugins_manager, + mcp_manager, + Arc::new(SkillsWatcher::noop()), + AgentControl::default(), + Some(Arc::new( + codex_exec_server::Environment::create(/*exec_server_url*/ None) + .await + .expect("create environment"), + )), + /*analytics_events_client*/ None, + ) + .await?; + + Ok((session, rx_event)) +} + #[tokio::test] async fn notify_request_permissions_response_ignores_unmatched_call_id() { let (session, _turn_context) = make_session_and_context().await; @@ -3651,7 +4009,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( let conversation_id = ThreadId::default(); let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let models_manager = Arc::new(ModelsManager::new( - config.codex_home.clone(), + config.codex_home.to_path_buf(), auth_manager.clone(), /*model_catalog*/ None, CollaborationModesConfig::default(), @@ -3718,7 +4076,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( ); let state = SessionState::new(session_configuration.clone()); - let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.clone())); + let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.to_path_buf())); let mcp_manager = Arc::new(McpManager::new(Arc::clone(&plugins_manager))); let skills_manager = Arc::new(SkillsManager::new( config.codex_home.clone(), @@ -3754,6 +4112,11 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( }), rollout: Mutex::new(None), user_shell: Arc::new(default_user_shell()), + agent_identity_manager: Arc::new(crate::agent_identity::AgentIdentityManager::new( + config.as_ref(), + Arc::clone(&auth_manager), + session_configuration.session_source.clone(), + )), shell_snapshot_tx: watch::channel(None).0, show_raw_agent_reasoning: config.show_raw_agent_reasoning, exec_policy, @@ -3770,6 +4133,9 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( network_proxy: None, network_approval: Arc::clone(&network_approval), state_db: None, + thread_store: codex_thread_store::LocalThreadStore::new( + codex_rollout::RolloutConfig::from_view(config.as_ref()), + ), model_client: ModelClient::new( Some(Arc::clone(&auth_manager)), conversation_id, @@ -3793,11 +4159,18 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( let plugin_outcome = services .plugins_manager - .plugins_for_config(&per_turn_config); + .plugins_for_config(&per_turn_config) + .await; let effective_skill_roots = plugin_outcome.effective_skill_roots(); let skills_input = crate::skills_load_input_from_config(&per_turn_config, effective_skill_roots); - let skills_outcome = Arc::new(services.skills_manager.skills_for_config(&skills_input)); + let skill_fs = environment.get_filesystem(); + let skills_outcome = Arc::new( + services + .skills_manager + .skills_for_config(&skills_input, Some(Arc::clone(&skill_fs))) + .await, + ); let turn_context = Arc::new(Session::make_turn_context( conversation_id, Some(Arc::clone(&auth_manager)), @@ -3851,6 +4224,35 @@ pub(crate) async fn make_session_and_context_with_rx() -> ( make_session_and_context_with_dynamic_tools_and_rx(Vec::new()).await } +#[tokio::test] +async fn fail_agent_identity_registration_emits_error_without_shutdown() { + let (session, _turn_context, rx_event) = make_session_and_context_with_rx().await; + + session + .fail_agent_identity_registration(anyhow::anyhow!("registration exploded")) + .await; + + let error_event = timeout(Duration::from_secs(1), rx_event.recv()) + .await + .expect("error event should arrive") + .expect("error event should be readable"); + match error_event.msg { + EventMsg::Error(ErrorEvent { + message, + codex_error_info, + }) => { + assert_eq!( + message, + "Agent identity registration failed while `features.use_agent_identity` is enabled: registration exploded".to_string() + ); + assert_eq!(codex_error_info, Some(CodexErrorInfo::Other)); + } + other => panic!("expected error event, got {other:?}"), + } + + assert!(rx_event.try_recv().is_err()); +} + #[tokio::test] async fn refresh_mcp_servers_is_deferred_until_next_turn() { let (session, turn_context) = make_session_and_context().await; @@ -4227,7 +4629,7 @@ async fn handle_output_item_done_records_image_save_history_message() { let turn_context = Arc::new(turn_context); let call_id = "ig_history_records_message"; let expected_saved_path = crate::stream_events_utils::image_generation_artifact_path( - turn_context.config.codex_home.as_path(), + &turn_context.config.codex_home, &session.conversation_id.to_string(), call_id, ); @@ -4251,7 +4653,7 @@ async fn handle_output_item_done_records_image_save_history_message() { let history = session.clone_history().await; let image_output_path = crate::stream_events_utils::image_generation_artifact_path( - turn_context.config.codex_home.as_path(), + &turn_context.config.codex_home, &session.conversation_id.to_string(), "", ); @@ -4279,7 +4681,7 @@ async fn handle_output_item_done_skips_image_save_message_when_save_fails() { let turn_context = Arc::new(turn_context); let call_id = "ig_history_no_message"; let expected_saved_path = crate::stream_events_utils::image_generation_artifact_path( - turn_context.config.codex_home.as_path(), + &turn_context.config.codex_home, &session.conversation_id.to_string(), call_id, ); @@ -4349,6 +4751,47 @@ async fn build_initial_context_restates_realtime_start_when_reference_context_is ); } +fn file_system_policy_with_unreadable_glob(turn_context: &TurnContext) -> FileSystemSandboxPolicy { + let mut policy = FileSystemSandboxPolicy::from_legacy_sandbox_policy( + turn_context.sandbox_policy.get(), + &turn_context.cwd, + ); + policy.entries.push(FileSystemSandboxEntry { + path: FileSystemPath::GlobPattern { + pattern: format!("{}/**/*.env", turn_context.cwd.as_path().display()), + }, + access: FileSystemAccessMode::None, + }); + policy +} + +#[tokio::test] +async fn turn_context_item_omits_legacy_equivalent_file_system_sandbox_policy() { + let (_session, mut turn_context) = make_session_and_context().await; + turn_context.file_system_sandbox_policy = FileSystemSandboxPolicy::from_legacy_sandbox_policy( + turn_context.sandbox_policy.get(), + &turn_context.cwd, + ); + + let item = turn_context.to_turn_context_item(); + + assert_eq!(item.file_system_sandbox_policy, None); +} + +#[tokio::test] +async fn turn_context_item_stores_split_file_system_sandbox_policy_when_different() { + let (_session, mut turn_context) = make_session_and_context().await; + let file_system_sandbox_policy = file_system_policy_with_unreadable_glob(&turn_context); + turn_context.file_system_sandbox_policy = file_system_sandbox_policy.clone(); + + let item = turn_context.to_turn_context_item(); + + assert_eq!( + item.file_system_sandbox_policy, + Some(file_system_sandbox_policy) + ); +} + #[tokio::test] async fn record_context_updates_and_set_reference_context_item_injects_full_context_when_baseline_missing() { @@ -4487,6 +4930,56 @@ async fn record_context_updates_and_set_reference_context_item_persists_baseline ); } +#[tokio::test] +async fn record_context_updates_and_set_reference_context_item_persists_split_file_system_policy_to_rollout() + { + let (session, mut turn_context) = make_session_and_context().await; + let file_system_sandbox_policy = file_system_policy_with_unreadable_glob(&turn_context); + turn_context.file_system_sandbox_policy = file_system_sandbox_policy.clone(); + let config = session.get_config().await; + let recorder = RolloutRecorder::new( + config.as_ref(), + RolloutRecorderParams::new( + ThreadId::default(), + /*forked_from_id*/ None, + SessionSource::Exec, + BaseInstructions::default(), + Vec::new(), + EventPersistenceMode::Limited, + ), + /*state_db_ctx*/ None, + /*state_builder*/ None, + ) + .await + .expect("create rollout recorder"); + let rollout_path = recorder.rollout_path().to_path_buf(); + { + let mut rollout = session.services.rollout.lock().await; + *rollout = Some(recorder); + } + + session + .record_context_updates_and_set_reference_context_item(&turn_context) + .await; + session.ensure_rollout_materialized().await; + session.flush_rollout().await.expect("rollout should flush"); + + let InitialHistory::Resumed(resumed) = RolloutRecorder::get_rollout_history(&rollout_path) + .await + .expect("read rollout history") + else { + panic!("expected resumed rollout history"); + }; + let persisted_file_system_sandbox_policy = resumed.history.iter().find_map(|item| match item { + RolloutItem::TurnContext(ctx) => ctx.file_system_sandbox_policy.clone(), + _ => None, + }); + assert_eq!( + persisted_file_system_sandbox_policy, + Some(file_system_sandbox_policy) + ); +} + #[tokio::test] async fn build_initial_context_prepends_model_switch_message() { let (session, turn_context) = make_session_and_context().await; @@ -5396,6 +5889,8 @@ async fn fatal_tool_error_stops_turn_and_reports_error() { crate::tools::router::ToolRouterParams { deferred_mcp_tools, mcp_tools: Some(tools), + unavailable_called_tools: Vec::new(), + parallel_mcp_server_names: HashSet::new(), discoverable_tools: None, dynamic_tools: turn_context.dynamic_tools.as_slice(), }, diff --git a/codex-rs/core/src/codex_tests_guardian.rs b/codex-rs/core/src/codex_tests_guardian.rs index cc84ee1d87..cad67dcc8f 100644 --- a/codex-rs/core/src/codex_tests_guardian.rs +++ b/codex-rs/core/src/codex_tests_guardian.rs @@ -95,7 +95,7 @@ async fn guardian_allows_shell_additional_permissions_requests_past_policy_valid config.model_provider.base_url = Some(format!("{}/v1", server.uri())); let config = Arc::new(config); let models_manager = Arc::new(crate::test_support::models_manager_with_provider( - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::clone(&session.services.auth_manager), config.model_provider.clone(), )); @@ -417,12 +417,12 @@ async fn guardian_subagent_does_not_inherit_parent_exec_policy_rules() { let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let models_manager = Arc::new(ModelsManager::new( - config.codex_home.clone(), + config.codex_home.to_path_buf(), auth_manager.clone(), /*model_catalog*/ None, CollaborationModesConfig::default(), )); - let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.clone())); + let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.to_path_buf())); let skills_manager = Arc::new(SkillsManager::new( config.codex_home.clone(), /*bundled_skills_enabled*/ true, @@ -433,7 +433,6 @@ async fn guardian_subagent_does_not_inherit_parent_exec_policy_rules() { let CodexSpawnOk { codex, .. } = Codex::spawn(CodexSpawnArgs { config, auth_manager, - analytics_events_client: None, models_manager, environment_manager: Arc::new(EnvironmentManager::new(/*exec_server_url*/ None)), skills_manager, @@ -452,6 +451,7 @@ async fn guardian_subagent_does_not_inherit_parent_exec_policy_rules() { inherited_exec_policy: Some(Arc::new(parent_exec_policy)), user_shell_override: None, parent_trace: None, + analytics_events_client: None, }) .await .expect("spawn guardian subagent"); diff --git a/codex-rs/core/src/codex_thread.rs b/codex-rs/core/src/codex_thread.rs index 86decbe6b2..3642d078c1 100644 --- a/codex-rs/core/src/codex_thread.rs +++ b/codex-rs/core/src/codex_thread.rs @@ -20,9 +20,12 @@ use codex_protocol::protocol::Op; use codex_protocol::protocol::SandboxPolicy; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::Submission; +use codex_protocol::protocol::ThreadMemoryMode; use codex_protocol::protocol::TokenUsage; +use codex_protocol::protocol::TokenUsageInfo; use codex_protocol::protocol::W3cTraceContext; use codex_protocol::user_input::UserInput; +use codex_utils_absolute_path::AbsolutePathBuf; use rmcp::model::ReadResourceRequestParams; use std::collections::HashMap; use std::path::PathBuf; @@ -39,7 +42,7 @@ pub struct ThreadConfigSnapshot { pub approval_policy: AskForApproval, pub approvals_reviewer: ApprovalsReviewer, pub sandbox_policy: SandboxPolicy, - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, pub ephemeral: bool, pub reasoning_effort: Option, pub personality: Option, @@ -95,6 +98,11 @@ impl CodexThread { self.codex.submit_with_trace(op, trace).await } + /// Persist whether this thread is eligible for future memory generation. + pub async fn set_thread_memory_mode(&self, mode: ThreadMemoryMode) -> anyhow::Result<()> { + self.codex.set_thread_memory_mode(mode).await + } + pub async fn steer_input( &self, input: Vec, @@ -137,6 +145,17 @@ impl CodexThread { self.codex.session.total_token_usage().await } + /// Returns the complete token usage snapshot currently cached for this thread. + /// + /// This accessor is intentionally narrower than direct session access: it lets + /// app-server lifecycle paths replay restored usage after resume or fork without + /// exposing broader session mutation authority. A caller that only reads + /// `total_token_usage` would drop last-turn usage and make the v2 + /// `thread/tokenUsage/updated` payload incomplete. + pub async fn token_usage_info(&self) -> Option { + self.codex.session.token_usage_info().await + } + /// Records a user-role session-prefix message without creating a new user turn boundary. pub(crate) async fn inject_user_message_without_turn(&self, message: String) { let message = ResponseItem::Message { @@ -193,6 +212,29 @@ impl CodexThread { Ok(submission_id) } + /// Append raw Responses API items to the thread's model-visible history. + pub async fn inject_response_items(&self, items: Vec) -> CodexResult<()> { + if items.is_empty() { + return Err(CodexErr::InvalidRequest( + "items must not be empty".to_string(), + )); + } + + let turn_context = self.codex.session.new_default_turn().await; + if self.codex.session.reference_context_item().await.is_none() { + self.codex + .session + .record_context_updates_and_set_reference_context_item(turn_context.as_ref()) + .await; + } + self.codex + .session + .record_conversation_items(turn_context.as_ref(), &items) + .await; + self.codex.session.flush_rollout().await?; + Ok(()) + } + pub fn rollout_path(&self) -> Option { self.rollout_path.clone() } diff --git a/codex-rs/core/src/compact.rs b/codex-rs/core/src/compact.rs index e132ce0213..eaf1d92109 100644 --- a/codex-rs/core/src/compact.rs +++ b/codex-rs/core/src/compact.rs @@ -1,7 +1,5 @@ use std::sync::Arc; use std::time::Instant; -use std::time::SystemTime; -use std::time::UNIX_EPOCH; use crate::Prompt; use crate::client::ModelClientSession; @@ -19,6 +17,7 @@ use codex_analytics::CompactionReason; use codex_analytics::CompactionStatus; use codex_analytics::CompactionStrategy; use codex_analytics::CompactionTrigger; +use codex_analytics::now_unix_seconds; use codex_features::Feature; use codex_model_provider_info::ModelProviderInfo; use codex_protocol::error::CodexErr; @@ -59,7 +58,7 @@ pub(crate) enum InitialContextInjection { } pub(crate) fn should_use_remote_compact_task(provider: &ModelProviderInfo) -> bool { - provider.is_openai() + provider.supports_remote_compaction() } pub(crate) async fn run_inline_auto_compact_task( @@ -372,13 +371,6 @@ pub(crate) fn compaction_status_from_result(result: &CodexResult) -> Compa } } -fn now_unix_seconds() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|duration| duration.as_secs()) - .unwrap_or_default() -} - pub fn content_items_to_text(content: &[ContentItem]) -> Option { let mut pieces = Vec::new(); for item in content { diff --git a/codex-rs/core/src/compact_tests.rs b/codex-rs/core/src/compact_tests.rs index cecf9ce9d7..f7a8d1dfff 100644 --- a/codex-rs/core/src/compact_tests.rs +++ b/codex-rs/core/src/compact_tests.rs @@ -1,4 +1,6 @@ use super::*; +use codex_model_provider_info::ModelProviderInfo; +use codex_model_provider_info::WireApi; use pretty_assertions::assert_eq; async fn process_compacted_history_with_test_session( @@ -185,6 +187,30 @@ fn build_token_limited_compacted_history_appends_summary_message() { assert_eq!(summary, summary_text); } +#[test] +fn should_use_remote_compact_task_for_azure_provider() { + let provider = ModelProviderInfo { + name: "Azure".into(), + base_url: Some("https://example.com/openai".into()), + env_key: Some("AZURE_OPENAI_API_KEY".into()), + env_key_instructions: None, + experimental_bearer_token: None, + auth: None, + wire_api: WireApi::Responses, + query_params: None, + http_headers: None, + env_http_headers: None, + request_max_retries: None, + stream_max_retries: None, + stream_idle_timeout_ms: None, + websocket_connect_timeout_ms: None, + requires_openai_auth: false, + supports_websockets: false, + }; + + assert!(should_use_remote_compact_task(&provider)); +} + #[tokio::test] async fn process_compacted_history_replaces_developer_messages() { let compacted_history = vec![ diff --git a/codex-rs/core/src/config/agent_roles.rs b/codex-rs/core/src/config/agent_roles.rs index 24d26ebf47..b1d28cf838 100644 --- a/codex-rs/core/src/config/agent_roles.rs +++ b/codex-rs/core/src/config/agent_roles.rs @@ -4,6 +4,7 @@ use crate::config_loader::ConfigLayerStackOrdering; use codex_config::config_toml::AgentRoleToml; use codex_config::config_toml::AgentsToml; use codex_config::config_toml::ConfigToml; +use codex_exec_server::ExecutorFileSystem; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_absolute_path::AbsolutePathBufGuard; use serde::Deserialize; @@ -14,7 +15,8 @@ use std::path::Path; use std::path::PathBuf; use toml::Value as TomlValue; -pub(crate) fn load_agent_roles( +pub(crate) async fn load_agent_roles( + fs: &dyn ExecutorFileSystem, cfg: &ConfigToml, config_layer_stack: &ConfigLayerStack, startup_warnings: &mut Vec, @@ -24,7 +26,7 @@ pub(crate) fn load_agent_roles( /*include_disabled*/ false, ); if layers.is_empty() { - return load_agent_roles_without_layers(cfg); + return load_agent_roles_without_layers(fs, cfg).await; } let mut roles: BTreeMap = BTreeMap::new(); @@ -40,13 +42,14 @@ pub(crate) fn load_agent_roles( }; if let Some(agents_toml) = agents_toml { for (declared_role_name, role_toml) in &agents_toml.roles { - let (role_name, role) = match read_declared_role(declared_role_name, role_toml) { - Ok(role) => role, - Err(err) => { - push_agent_role_warning(startup_warnings, err); - continue; - } - }; + let (role_name, role) = + match read_declared_role(fs, declared_role_name, role_toml).await { + Ok(role) => role, + Err(err) => { + push_agent_role_warning(startup_warnings, err); + continue; + } + }; if let Some(config_file) = role.config_file.clone() { declared_role_files.insert(config_file); } @@ -68,10 +71,13 @@ pub(crate) fn load_agent_roles( if let Some(config_folder) = layer.config_folder() { for (role_name, role) in discover_agent_roles_in_dir( - config_folder.as_path().join("agents").as_path(), + fs, + &config_folder.join("agents"), &declared_role_files, startup_warnings, - )? { + ) + .await? + { if layer_roles.contains_key(&role_name) { push_agent_role_warning( startup_warnings, @@ -113,13 +119,14 @@ fn push_agent_role_warning(startup_warnings: &mut Vec, err: std::io::Err startup_warnings.push(message); } -fn load_agent_roles_without_layers( +async fn load_agent_roles_without_layers( + fs: &dyn ExecutorFileSystem, cfg: &ConfigToml, ) -> std::io::Result> { let mut roles = BTreeMap::new(); if let Some(agents_toml) = cfg.agents.as_ref() { for (declared_role_name, role_toml) in &agents_toml.roles { - let (role_name, role) = read_declared_role(declared_role_name, role_toml)?; + let (role_name, role) = read_declared_role(fs, declared_role_name, role_toml).await?; validate_required_agent_role_description(&role_name, role.description.as_deref())?; if roles.insert(role_name.clone(), role).is_some() { @@ -134,14 +141,17 @@ fn load_agent_roles_without_layers( Ok(roles) } -fn read_declared_role( +async fn read_declared_role( + fs: &dyn ExecutorFileSystem, declared_role_name: &str, role_toml: &AgentRoleToml, ) -> std::io::Result<(String, AgentRoleConfig)> { - let mut role = agent_role_config_from_toml(declared_role_name, role_toml)?; + let mut role = agent_role_config_from_toml(fs, declared_role_name, role_toml).await?; let mut role_name = declared_role_name.to_string(); if let Some(config_file) = role.config_file.as_deref() { - let parsed_file = read_resolved_agent_role_file(config_file, Some(declared_role_name))?; + let config_file = AbsolutePathBuf::from_absolute_path(config_file)?; + let parsed_file = + read_resolved_agent_role_file(fs, &config_file, Some(declared_role_name)).await?; role_name = parsed_file.role_name; role.description = parsed_file.description.or(role.description); role.nickname_candidates = parsed_file.nickname_candidates.or(role.nickname_candidates); @@ -171,12 +181,17 @@ fn agents_toml_from_layer(layer_toml: &TomlValue) -> std::io::Result std::io::Result { - let config_file = role.config_file.as_ref().map(AbsolutePathBuf::to_path_buf); - validate_agent_role_config_file(role_name, config_file.as_deref())?; + let config_file = role + .config_file + .as_ref() + .map(AbsolutePathBuf::from_absolute_path) + .transpose()?; + validate_agent_role_config_file(fs, role_name, config_file.as_ref()).await?; let description = normalize_agent_role_description( &format!("agents.{role_name}.description"), role.description.as_deref(), @@ -188,7 +203,7 @@ fn agent_role_config_from_toml( Ok(AgentRoleConfig { description, - config_file, + config_file: config_file.map(AbsolutePathBuf::into_path_buf), nickname_candidates, }) } @@ -293,15 +308,17 @@ pub(crate) fn parse_agent_role_file_contents( }) } -fn read_resolved_agent_role_file( - path: &Path, +async fn read_resolved_agent_role_file( + fs: &dyn ExecutorFileSystem, + path: &AbsolutePathBuf, role_name_hint: Option<&str>, ) -> std::io::Result { - let contents = std::fs::read_to_string(path)?; + let contents = fs.read_file_text(path, /*sandbox*/ None).await?; + let config_base_dir = path.parent().unwrap_or_else(|| path.clone()); parse_agent_role_file_contents( &contents, - path, - path.parent().unwrap_or(path), + path.as_path(), + config_base_dir.as_path(), role_name_hint, ) } @@ -359,31 +376,35 @@ fn validate_agent_role_file_developer_instructions( } } -fn validate_agent_role_config_file( +async fn validate_agent_role_config_file( + fs: &dyn ExecutorFileSystem, role_name: &str, - config_file: Option<&Path>, + config_file: Option<&AbsolutePathBuf>, ) -> std::io::Result<()> { let Some(config_file) = config_file else { return Ok(()); }; - let metadata = std::fs::metadata(config_file).map_err(|e| { - std::io::Error::new( - std::io::ErrorKind::InvalidInput, - format!( - "agents.{role_name}.config_file must point to an existing file at {}: {e}", - config_file.display() - ), - ) - })?; - if metadata.is_file() { + let metadata = fs + .get_metadata(config_file, /*sandbox*/ None) + .await + .map_err(|e| { + std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!( + "agents.{role_name}.config_file must point to an existing file at {}: {e}", + config_file.as_path().display() + ), + ) + })?; + if metadata.is_file { Ok(()) } else { Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, format!( "agents.{role_name}.config_file must point to a file: {}", - config_file.display() + config_file.as_path().display() ), )) } @@ -441,19 +462,20 @@ fn normalize_agent_role_nickname_candidates( Ok(Some(normalized_candidates)) } -fn discover_agent_roles_in_dir( - agents_dir: &Path, +async fn discover_agent_roles_in_dir( + fs: &dyn ExecutorFileSystem, + agents_dir: &AbsolutePathBuf, declared_role_files: &BTreeSet, startup_warnings: &mut Vec, ) -> std::io::Result> { let mut roles = BTreeMap::new(); - for agent_file in collect_agent_role_files(agents_dir)? { - if declared_role_files.contains(&agent_file) { + for agent_file in collect_agent_role_files(fs, agents_dir).await? { + if declared_role_files.contains(agent_file.as_path()) { continue; } let parsed_file = - match read_resolved_agent_role_file(&agent_file, /*role_name_hint*/ None) { + match read_resolved_agent_role_file(fs, &agent_file, /*role_name_hint*/ None).await { Ok(parsed_file) => parsed_file, Err(err) => { push_agent_role_warning(startup_warnings, err); @@ -468,7 +490,7 @@ fn discover_agent_roles_in_dir( std::io::ErrorKind::InvalidInput, format!( "duplicate agent role name `{role_name}` discovered in {}", - agents_dir.display() + agents_dir.as_path().display() ), ), ); @@ -478,7 +500,7 @@ fn discover_agent_roles_in_dir( role_name, AgentRoleConfig { description: parsed_file.description, - config_file: Some(agent_file), + config_file: Some(agent_file.to_path_buf()), nickname_candidates: parsed_file.nickname_candidates, }, ); @@ -487,36 +509,36 @@ fn discover_agent_roles_in_dir( Ok(roles) } -fn collect_agent_role_files(dir: &Path) -> std::io::Result> { +async fn collect_agent_role_files( + fs: &dyn ExecutorFileSystem, + dir: &AbsolutePathBuf, +) -> std::io::Result> { let mut files = Vec::new(); - collect_agent_role_files_recursive(dir, &mut files)?; - files.sort(); - Ok(files) -} + let mut dirs = vec![dir.clone()]; + while let Some(dir) = dirs.pop() { + let entries = match fs.read_directory(&dir, /*sandbox*/ None).await { + Ok(entries) => entries, + Err(err) if err.kind() == ErrorKind::NotFound => continue, + Err(err) => return Err(err), + }; -fn collect_agent_role_files_recursive(dir: &Path, files: &mut Vec) -> std::io::Result<()> { - let read_dir = match std::fs::read_dir(dir) { - Ok(read_dir) => read_dir, - Err(err) if err.kind() == ErrorKind::NotFound => return Ok(()), - Err(err) => return Err(err), - }; - - for entry in read_dir { - let entry = entry?; - let path = entry.path(); - let file_type = entry.file_type()?; - if file_type.is_dir() { - collect_agent_role_files_recursive(&path, files)?; - continue; - } - if file_type.is_file() - && path - .extension() - .is_some_and(|extension| extension == "toml") - { - files.push(path); + for entry in entries { + let path = dir.join(entry.file_name); + if entry.is_directory { + dirs.push(path); + continue; + } + if entry.is_file + && path + .as_path() + .extension() + .is_some_and(|extension| extension == "toml") + { + files.push(path); + } } } - Ok(()) + files.sort(); + Ok(files) } diff --git a/codex-rs/core/src/config/config_tests.rs b/codex-rs/core/src/config/config_tests.rs index a205b2041c..5bdf768814 100644 --- a/codex-rs/core/src/config/config_tests.rs +++ b/codex-rs/core/src/config/config_tests.rs @@ -1,3 +1,5 @@ +use crate::agents_md::DEFAULT_AGENTS_MD_FILENAME; +use crate::agents_md::LOCAL_AGENTS_MD_FILENAME; use crate::config::edit::ConfigEdit; use crate::config::edit::ConfigEditsBuilder; use crate::config::edit::apply_blocking; @@ -42,6 +44,7 @@ use codex_config::types::SkillsConfig; use codex_config::types::ToolSuggestDiscoverableType; use codex_config::types::Tui; use codex_config::types::TuiNotificationSettings; +use codex_exec_server::LOCAL_FS; use codex_features::Feature; use codex_features::FeaturesToml; use codex_model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; @@ -81,11 +84,14 @@ fn stdio_mcp(command: &str) -> McpServerConfig { env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -102,11 +108,14 @@ fn http_mcp(url: &str) -> McpServerConfig { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -115,8 +124,8 @@ fn http_mcp(url: &str) -> McpServerConfig { } } -#[test] -fn load_config_normalizes_relative_cwd_override() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_normalizes_relative_cwd_override() -> std::io::Result<()> { let expected_cwd = AbsolutePathBuf::relative_to_current_dir("nested")?; let codex_home = tempdir()?; let config = Config::load_from_base_config_with_overrides( @@ -125,65 +134,62 @@ fn load_config_normalizes_relative_cwd_override() -> std::io::Result<()> { cwd: Some(PathBuf::from("nested")), ..Default::default() }, - codex_home.abs().into_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!(config.cwd, expected_cwd); Ok(()) } -#[test] -fn load_config_records_global_agents_path() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_loads_global_agents_instructions() -> std::io::Result<()> { let codex_home = tempdir()?; - let global_agents_path = codex_home.path().join(DEFAULT_PROJECT_DOC_FILENAME); - std::fs::write(&global_agents_path, "\n global instructions \n")?; + std::fs::write( + codex_home.path().join(DEFAULT_AGENTS_MD_FILENAME), + "\n global instructions \n", + )?; let config = Config::load_from_base_config_with_overrides( ConfigToml::default(), ConfigOverrides::default(), - codex_home.abs().into_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.user_instructions.as_deref(), Some("global instructions") ); - assert_eq!( - config.user_instructions_path.as_deref(), - Some(global_agents_path.as_path()) - ); Ok(()) } -#[test] -fn load_config_records_preferred_global_agents_override_path() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_prefers_global_agents_override_instructions() -> std::io::Result<()> { let codex_home = tempdir()?; std::fs::write( - codex_home.path().join(DEFAULT_PROJECT_DOC_FILENAME), + codex_home.path().join(DEFAULT_AGENTS_MD_FILENAME), "global instructions", )?; - let global_agents_override_path = codex_home.path().join(LOCAL_PROJECT_DOC_FILENAME); + let global_agents_override_path = codex_home.path().join(LOCAL_AGENTS_MD_FILENAME); std::fs::write(&global_agents_override_path, "local override instructions")?; let config = Config::load_from_base_config_with_overrides( ConfigToml::default(), ConfigOverrides::default(), - codex_home.abs().into_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.user_instructions.as_deref(), Some("local override instructions") ); - assert_eq!( - config.user_instructions_path.as_deref(), - Some(global_agents_override_path.as_path()) - ); Ok(()) } -#[test] -fn test_toml_parsing() { +#[tokio::test] +async fn test_toml_parsing() { let history_with_persistence = r#" [history] persistence = "save-all" @@ -247,8 +253,9 @@ consolidation_model = "gpt-5" let config = Config::load_from_base_config_with_overrides( memories_cfg, ConfigOverrides::default(), - tempdir().expect("tempdir").path().to_path_buf(), + tempdir().expect("tempdir").abs(), ) + .await .expect("load config from memories settings"); assert_eq!( config.memories, @@ -374,13 +381,14 @@ fn config_toml_deserializes_model_availability_nux() { ); } -#[test] -fn runtime_config_defaults_model_availability_nux() { +#[tokio::test] +async fn runtime_config_defaults_model_availability_nux() { let cfg = Config::load_from_base_config_with_overrides( ConfigToml::default(), ConfigOverrides::default(), - tempdir().expect("tempdir").path().to_path_buf(), + tempdir().expect("tempdir").abs(), ) + .await .expect("load config"); assert_eq!( @@ -421,6 +429,7 @@ allow_upstream_proxy = false "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([ ( ":minimal".to_string(), @@ -460,8 +469,9 @@ allow_upstream_proxy = false ); } -#[test] -fn permissions_profiles_network_populates_runtime_network_proxy_spec() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_network_populates_runtime_network_proxy_spec() -> std::io::Result<()> +{ let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -474,6 +484,7 @@ fn permissions_profiles_network_populates_runtime_network_proxy_spec() -> std::i "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":minimal".to_string(), FilesystemPermissionToml::Access(FileSystemAccessMode::Read), @@ -494,8 +505,9 @@ fn permissions_profiles_network_populates_runtime_network_proxy_spec() -> std::i cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; let network = config .permissions .network @@ -507,8 +519,9 @@ fn permissions_profiles_network_populates_runtime_network_proxy_spec() -> std::i Ok(()) } -#[test] -fn permissions_profiles_network_disabled_by_default_does_not_start_proxy() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_network_disabled_by_default_does_not_start_proxy() +-> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -521,6 +534,7 @@ fn permissions_profiles_network_disabled_by_default_does_not_start_proxy() -> st "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":minimal".to_string(), FilesystemPermissionToml::Access(FileSystemAccessMode::Read), @@ -544,15 +558,16 @@ fn permissions_profiles_network_disabled_by_default_does_not_start_proxy() -> st cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert!(config.permissions.network.is_none()); Ok(()) } -#[test] -fn default_permissions_profile_populates_runtime_sandbox_policy() -> std::io::Result<()> { +#[tokio::test] +async fn default_permissions_profile_populates_runtime_sandbox_policy() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::create_dir_all(cwd.path().join("docs"))?; @@ -565,6 +580,7 @@ fn default_permissions_profile_populates_runtime_sandbox_policy() -> std::io::Re "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([ ( ":minimal".to_string(), @@ -592,8 +608,9 @@ fn default_permissions_profile_populates_runtime_sandbox_policy() -> std::io::Re cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; let memories_root = codex_home.path().join("memories").abs(); assert_eq!( @@ -645,8 +662,84 @@ fn default_permissions_profile_populates_runtime_sandbox_policy() -> std::io::Re Ok(()) } -#[test] -fn permissions_profiles_require_default_permissions() -> std::io::Result<()> { +#[tokio::test] +async fn project_root_glob_none_compiles_to_filesystem_pattern_entry() -> std::io::Result<()> { + let codex_home = TempDir::new()?; + let cwd = TempDir::new()?; + tokio::fs::write(cwd.path().join(".git"), "gitdir: nowhere").await?; + + let config = Config::load_from_base_config_with_overrides( + ConfigToml { + default_permissions: Some("workspace".to_string()), + permissions: Some(PermissionsToml { + entries: BTreeMap::from([( + "workspace".to_string(), + PermissionProfileToml { + filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: Some(2), + entries: BTreeMap::from([( + ":project_roots".to_string(), + FilesystemPermissionToml::Scoped(BTreeMap::from([ + (".".to_string(), FileSystemAccessMode::Write), + ("**/*.env".to_string(), FileSystemAccessMode::None), + ])), + )]), + }), + network: None, + }, + )]), + }), + ..Default::default() + }, + ConfigOverrides { + cwd: Some(cwd.path().to_path_buf()), + ..Default::default() + }, + codex_home.abs(), + ) + .await?; + + assert_eq!( + config + .permissions + .file_system_sandbox_policy + .glob_scan_max_depth, + Some(2) + ); + let expected_pattern = AbsolutePathBuf::resolve_path_against_base("**/*.env", cwd.path()) + .to_string_lossy() + .into_owned(); + assert!( + config + .permissions + .file_system_sandbox_policy + .entries + .contains(&FileSystemSandboxEntry { + path: FileSystemPath::GlobPattern { + pattern: expected_pattern, + }, + access: FileSystemAccessMode::None, + }) + ); + assert!( + !config + .permissions + .file_system_sandbox_policy + .entries + .iter() + .any(|entry| matches!( + &entry.path, + FileSystemPath::Special { + value: FileSystemSpecialPath::ProjectRoots { subpath: Some(subpath) }, + } if subpath == std::path::Path::new("**/*.env") + )), + "glob should compile to a filesystem pattern entry, not a literal filesystem entry" + ); + Ok(()) +} + +#[tokio::test] +async fn permissions_profiles_require_default_permissions() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -658,6 +751,7 @@ fn permissions_profiles_require_default_permissions() -> std::io::Result<()> { "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":minimal".to_string(), FilesystemPermissionToml::Access(FileSystemAccessMode::Read), @@ -673,8 +767,9 @@ fn permissions_profiles_require_default_permissions() -> std::io::Result<()> { cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), ) + .await .expect_err("missing default_permissions should be rejected"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); @@ -685,8 +780,8 @@ fn permissions_profiles_require_default_permissions() -> std::io::Result<()> { Ok(()) } -#[test] -fn permissions_profiles_reject_writes_outside_workspace_root() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_reject_writes_outside_workspace_root() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -700,6 +795,7 @@ fn permissions_profiles_reject_writes_outside_workspace_root() -> std::io::Resul "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( external_write_path.to_string(), FilesystemPermissionToml::Access(FileSystemAccessMode::Write), @@ -715,8 +811,9 @@ fn permissions_profiles_reject_writes_outside_workspace_root() -> std::io::Resul cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), ) + .await .expect_err("writes outside the workspace root should be rejected"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); @@ -728,8 +825,8 @@ fn permissions_profiles_reject_writes_outside_workspace_root() -> std::io::Resul Ok(()) } -#[test] -fn permissions_profiles_reject_nested_entries_for_non_project_roots() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_reject_nested_entries_for_non_project_roots() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -742,6 +839,7 @@ fn permissions_profiles_reject_nested_entries_for_non_project_roots() -> std::io "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":minimal".to_string(), FilesystemPermissionToml::Scoped(BTreeMap::from([( @@ -760,8 +858,9 @@ fn permissions_profiles_reject_nested_entries_for_non_project_roots() -> std::io cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), ) + .await .expect_err("nested entries outside :project_roots should be rejected"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); @@ -772,7 +871,9 @@ fn permissions_profiles_reject_nested_entries_for_non_project_roots() -> std::io Ok(()) } -fn load_workspace_permission_profile(profile: PermissionProfileToml) -> std::io::Result { +async fn load_workspace_permission_profile( + profile: PermissionProfileToml, +) -> std::io::Result { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -789,21 +890,24 @@ fn load_workspace_permission_profile(profile: PermissionProfileToml) -> std::io: cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), ) + .await } -#[test] -fn permissions_profiles_allow_unknown_special_paths() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_allow_unknown_special_paths() -> std::io::Result<()> { let config = load_workspace_permission_profile(PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":future_special_path".to_string(), FilesystemPermissionToml::Access(FileSystemAccessMode::Read), )]), }), network: None, - })?; + }) + .await?; assert_eq!( config.permissions.file_system_sandbox_policy, @@ -837,10 +941,12 @@ fn permissions_profiles_allow_unknown_special_paths() -> std::io::Result<()> { Ok(()) } -#[test] -fn permissions_profiles_allow_unknown_special_paths_with_nested_entries() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_allow_unknown_special_paths_with_nested_entries() +-> std::io::Result<()> { let config = load_workspace_permission_profile(PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":future_special_path".to_string(), FilesystemPermissionToml::Scoped(BTreeMap::from([( @@ -850,7 +956,8 @@ fn permissions_profiles_allow_unknown_special_paths_with_nested_entries() -> std )]), }), network: None, - })?; + }) + .await?; assert_eq!( config.permissions.file_system_sandbox_policy, @@ -871,12 +978,13 @@ fn permissions_profiles_allow_unknown_special_paths_with_nested_entries() -> std Ok(()) } -#[test] -fn permissions_profiles_allow_missing_filesystem_with_warning() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_allow_missing_filesystem_with_warning() -> std::io::Result<()> { let config = load_workspace_permission_profile(PermissionProfileToml { filesystem: None, network: None, - })?; + }) + .await?; assert_eq!( config.permissions.file_system_sandbox_policy, @@ -902,14 +1010,16 @@ fn permissions_profiles_allow_missing_filesystem_with_warning() -> std::io::Resu Ok(()) } -#[test] -fn permissions_profiles_allow_empty_filesystem_with_warning() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_allow_empty_filesystem_with_warning() -> std::io::Result<()> { let config = load_workspace_permission_profile(PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::new(), }), network: None, - })?; + }) + .await?; assert_eq!( config.permissions.file_system_sandbox_policy, @@ -925,8 +1035,8 @@ fn permissions_profiles_allow_empty_filesystem_with_warning() -> std::io::Result Ok(()) } -#[test] -fn permissions_profiles_reject_project_root_parent_traversal() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_reject_project_root_parent_traversal() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -939,6 +1049,7 @@ fn permissions_profiles_reject_project_root_parent_traversal() -> std::io::Resul "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":project_roots".to_string(), FilesystemPermissionToml::Scoped(BTreeMap::from([( @@ -957,8 +1068,9 @@ fn permissions_profiles_reject_project_root_parent_traversal() -> std::io::Resul cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), ) + .await .expect_err("parent traversal should be rejected for project root subpaths"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); @@ -969,8 +1081,8 @@ fn permissions_profiles_reject_project_root_parent_traversal() -> std::io::Resul Ok(()) } -#[test] -fn permissions_profiles_allow_network_enablement() -> std::io::Result<()> { +#[tokio::test] +async fn permissions_profiles_allow_network_enablement() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; std::fs::write(cwd.path().join(".git"), "gitdir: nowhere")?; @@ -983,6 +1095,7 @@ fn permissions_profiles_allow_network_enablement() -> std::io::Result<()> { "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::from([( ":minimal".to_string(), FilesystemPermissionToml::Access(FileSystemAccessMode::Read), @@ -1001,8 +1114,9 @@ fn permissions_profiles_allow_network_enablement() -> std::io::Result<()> { cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert!( config.permissions.network_sandbox_policy.is_enabled(), @@ -1065,8 +1179,8 @@ fn tui_config_missing_notifications_field_defaults_to_enabled() { ); } -#[test] -fn test_sandbox_config_parsing() { +#[tokio::test] +async fn test_sandbox_config_parsing() { let sandbox_full_access = r#" sandbox_mode = "danger-full-access" @@ -1076,13 +1190,15 @@ network_access = false # This should be ignored. let sandbox_full_access_cfg = toml::from_str::(sandbox_full_access) .expect("TOML deserialization should succeed"); let sandbox_mode_override = None; - let resolution = sandbox_full_access_cfg.derive_sandbox_policy( - sandbox_mode_override, - /*profile_sandbox_mode*/ None, - WindowsSandboxLevel::Disabled, - &PathBuf::from("/tmp/test"), - /*sandbox_policy_constraint*/ None, - ); + let resolution = sandbox_full_access_cfg + .derive_sandbox_policy( + sandbox_mode_override, + /*profile_sandbox_mode*/ None, + WindowsSandboxLevel::Disabled, + /*active_project*/ None, + /*sandbox_policy_constraint*/ None, + ) + .await; assert_eq!(resolution, SandboxPolicy::DangerFullAccess); let sandbox_read_only = r#" @@ -1095,13 +1211,15 @@ network_access = true # This should be ignored. let sandbox_read_only_cfg = toml::from_str::(sandbox_read_only) .expect("TOML deserialization should succeed"); let sandbox_mode_override = None; - let resolution = sandbox_read_only_cfg.derive_sandbox_policy( - sandbox_mode_override, - /*profile_sandbox_mode*/ None, - WindowsSandboxLevel::Disabled, - &PathBuf::from("/tmp/test"), - /*sandbox_policy_constraint*/ None, - ); + let resolution = sandbox_read_only_cfg + .derive_sandbox_policy( + sandbox_mode_override, + /*profile_sandbox_mode*/ None, + WindowsSandboxLevel::Disabled, + /*active_project*/ None, + /*sandbox_policy_constraint*/ None, + ) + .await; assert_eq!(resolution, SandboxPolicy::new_read_only_policy()); let writable_root = test_absolute_path("/my/workspace"); @@ -1115,6 +1233,9 @@ writable_roots = [ ] exclude_tmpdir_env_var = true exclude_slash_tmp = true + +[projects."/tmp/test"] +trust_level = "trusted" "#, serde_json::json!(writable_root) ); @@ -1122,13 +1243,15 @@ exclude_slash_tmp = true let sandbox_workspace_write_cfg = toml::from_str::(&sandbox_workspace_write) .expect("TOML deserialization should succeed"); let sandbox_mode_override = None; - let resolution = sandbox_workspace_write_cfg.derive_sandbox_policy( - sandbox_mode_override, - /*profile_sandbox_mode*/ None, - WindowsSandboxLevel::Disabled, - &PathBuf::from("/tmp/test"), - /*sandbox_policy_constraint*/ None, - ); + let resolution = sandbox_workspace_write_cfg + .derive_sandbox_policy( + sandbox_mode_override, + /*profile_sandbox_mode*/ None, + WindowsSandboxLevel::Disabled, + /*active_project*/ None, + /*sandbox_policy_constraint*/ None, + ) + .await; if cfg!(target_os = "windows") { assert_eq!(resolution, SandboxPolicy::new_read_only_policy()); } else { @@ -1154,9 +1277,6 @@ writable_roots = [ ] exclude_tmpdir_env_var = true exclude_slash_tmp = true - -[projects."/tmp/test"] -trust_level = "trusted" "#, serde_json::json!(writable_root) ); @@ -1164,13 +1284,15 @@ trust_level = "trusted" let sandbox_workspace_write_cfg = toml::from_str::(&sandbox_workspace_write) .expect("TOML deserialization should succeed"); let sandbox_mode_override = None; - let resolution = sandbox_workspace_write_cfg.derive_sandbox_policy( - sandbox_mode_override, - /*profile_sandbox_mode*/ None, - WindowsSandboxLevel::Disabled, - &PathBuf::from("/tmp/test"), - /*sandbox_policy_constraint*/ None, - ); + let resolution = sandbox_workspace_write_cfg + .derive_sandbox_policy( + sandbox_mode_override, + /*profile_sandbox_mode*/ None, + WindowsSandboxLevel::Disabled, + /*active_project*/ None, + /*sandbox_policy_constraint*/ None, + ) + .await; if cfg!(target_os = "windows") { assert_eq!(resolution, SandboxPolicy::new_read_only_policy()); } else { @@ -1187,8 +1309,8 @@ trust_level = "trusted" } } -#[test] -fn legacy_sandbox_mode_config_builds_split_policies_without_drift() -> std::io::Result<()> { +#[tokio::test] +async fn legacy_sandbox_mode_config_builds_split_policies_without_drift() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cwd = TempDir::new()?; let extra_root = test_absolute_path("/tmp/legacy-extra-root"); @@ -1229,8 +1351,9 @@ exclude_slash_tmp = true cwd: Some(cwd.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; let sandbox_policy = config.permissions.sandbox_policy.get(); assert_eq!( @@ -1391,8 +1514,8 @@ fn filter_mcp_servers_by_allowlist_blocks_all_when_empty() { ); } -#[test] -fn add_dir_override_extends_workspace_writable_roots() -> std::io::Result<()> { +#[tokio::test] +async fn add_dir_override_extends_workspace_writable_roots() -> std::io::Result<()> { let temp_dir = TempDir::new()?; let frontend = temp_dir.path().join("frontend"); let backend = temp_dir.path().join("backend"); @@ -1409,8 +1532,9 @@ fn add_dir_override_extends_workspace_writable_roots() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( ConfigToml::default(), overrides, - temp_dir.path().to_path_buf(), - )?; + temp_dir.path().abs(), + ) + .await?; let expected_backend = backend.abs(); if cfg!(target_os = "windows") { @@ -1438,8 +1562,8 @@ fn add_dir_override_extends_workspace_writable_roots() -> std::io::Result<()> { Ok(()) } -#[test] -fn sqlite_home_defaults_to_codex_home_for_workspace_write() -> std::io::Result<()> { +#[tokio::test] +async fn sqlite_home_defaults_to_codex_home_for_workspace_write() -> std::io::Result<()> { let codex_home = TempDir::new()?; let config = Config::load_from_base_config_with_overrides( ConfigToml::default(), @@ -1447,16 +1571,17 @@ fn sqlite_home_defaults_to_codex_home_for_workspace_write() -> std::io::Result<( sandbox_mode: Some(SandboxMode::WorkspaceWrite), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!(config.sqlite_home, codex_home.path().to_path_buf()); Ok(()) } -#[test] -fn workspace_write_always_includes_memories_root_once() -> std::io::Result<()> { +#[tokio::test] +async fn workspace_write_always_includes_memories_root_once() -> std::io::Result<()> { let codex_home = TempDir::new()?; let memories_root = codex_home.path().join("memories"); let config = Config::load_from_base_config_with_overrides( @@ -1471,8 +1596,9 @@ fn workspace_write_always_includes_memories_root_once() -> std::io::Result<()> { sandbox_mode: Some(SandboxMode::WorkspaceWrite), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; if cfg!(target_os = "windows") { match config.permissions.sandbox_policy.get() { @@ -1505,16 +1631,17 @@ fn workspace_write_always_includes_memories_root_once() -> std::io::Result<()> { Ok(()) } -#[test] -fn config_defaults_to_file_cli_auth_store_mode() -> std::io::Result<()> { +#[tokio::test] +async fn config_defaults_to_file_cli_auth_store_mode() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml::default(); let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.cli_auth_credentials_store_mode, @@ -1524,8 +1651,8 @@ fn config_defaults_to_file_cli_auth_store_mode() -> std::io::Result<()> { Ok(()) } -#[test] -fn config_honors_explicit_keyring_auth_store_mode() -> std::io::Result<()> { +#[tokio::test] +async fn config_resolves_explicit_keyring_auth_store_mode() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { cli_auth_credentials_store: Some(AuthCredentialsStoreMode::Keyring), @@ -1535,38 +1662,97 @@ fn config_honors_explicit_keyring_auth_store_mode() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.cli_auth_credentials_store_mode, - AuthCredentialsStoreMode::Keyring, + resolve_cli_auth_credentials_store_mode( + AuthCredentialsStoreMode::Keyring, + env!("CARGO_PKG_VERSION"), + ), ); Ok(()) } -#[test] -fn config_defaults_to_auto_oauth_store_mode() -> std::io::Result<()> { +#[tokio::test] +async fn config_resolves_default_oauth_store_mode() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml::default(); let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.mcp_oauth_credentials_store_mode, - OAuthCredentialsStoreMode::Auto, + resolve_mcp_oauth_credentials_store_mode( + OAuthCredentialsStoreMode::Auto, + env!("CARGO_PKG_VERSION"), + ), ); Ok(()) } #[test] -fn feedback_enabled_defaults_to_true() -> std::io::Result<()> { +fn local_dev_builds_force_file_cli_auth_store_modes() { + assert_eq!( + resolve_cli_auth_credentials_store_mode( + AuthCredentialsStoreMode::Keyring, + LOCAL_DEV_BUILD_VERSION, + ), + AuthCredentialsStoreMode::File, + ); + assert_eq!( + resolve_cli_auth_credentials_store_mode( + AuthCredentialsStoreMode::Auto, + LOCAL_DEV_BUILD_VERSION, + ), + AuthCredentialsStoreMode::File, + ); + assert_eq!( + resolve_cli_auth_credentials_store_mode( + AuthCredentialsStoreMode::Ephemeral, + LOCAL_DEV_BUILD_VERSION, + ), + AuthCredentialsStoreMode::Ephemeral, + ); + assert_eq!( + resolve_cli_auth_credentials_store_mode(AuthCredentialsStoreMode::Keyring, "1.2.3"), + AuthCredentialsStoreMode::Keyring, + ); +} + +#[test] +fn local_dev_builds_force_file_mcp_oauth_store_modes() { + assert_eq!( + resolve_mcp_oauth_credentials_store_mode( + OAuthCredentialsStoreMode::Keyring, + LOCAL_DEV_BUILD_VERSION, + ), + OAuthCredentialsStoreMode::File, + ); + assert_eq!( + resolve_mcp_oauth_credentials_store_mode( + OAuthCredentialsStoreMode::Auto, + LOCAL_DEV_BUILD_VERSION, + ), + OAuthCredentialsStoreMode::File, + ); + assert_eq!( + resolve_mcp_oauth_credentials_store_mode(OAuthCredentialsStoreMode::Keyring, "1.2.3"), + OAuthCredentialsStoreMode::Keyring, + ); +} + +#[tokio::test] +async fn feedback_enabled_defaults_to_true() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { feedback: Some(FeedbackConfigToml::default()), @@ -1576,8 +1762,9 @@ fn feedback_enabled_defaults_to_true() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!(config.feedback_enabled, true); @@ -1717,8 +1904,8 @@ profile = "project" Ok(()) } -#[test] -fn profile_sandbox_mode_overrides_base() -> std::io::Result<()> { +#[tokio::test] +async fn profile_sandbox_mode_overrides_base() -> std::io::Result<()> { let codex_home = TempDir::new()?; let mut profiles = HashMap::new(); profiles.insert( @@ -1738,8 +1925,9 @@ fn profile_sandbox_mode_overrides_base() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert!(matches!( config.permissions.sandbox_policy.get(), @@ -1749,8 +1937,8 @@ fn profile_sandbox_mode_overrides_base() -> std::io::Result<()> { Ok(()) } -#[test] -fn cli_override_takes_precedence_over_profile_sandbox_mode() -> std::io::Result<()> { +#[tokio::test] +async fn cli_override_takes_precedence_over_profile_sandbox_mode() -> std::io::Result<()> { let codex_home = TempDir::new()?; let mut profiles = HashMap::new(); profiles.insert( @@ -1771,11 +1959,8 @@ fn cli_override_takes_precedence_over_profile_sandbox_mode() -> std::io::Result< ..Default::default() }; - let config = Config::load_from_base_config_with_overrides( - cfg, - overrides, - codex_home.path().to_path_buf(), - )?; + let config = + Config::load_from_base_config_with_overrides(cfg, overrides, codex_home.abs()).await?; if cfg!(target_os = "windows") { assert!(matches!( @@ -1792,8 +1977,8 @@ fn cli_override_takes_precedence_over_profile_sandbox_mode() -> std::io::Result< Ok(()) } -#[test] -fn feature_table_overrides_legacy_flags() -> std::io::Result<()> { +#[tokio::test] +async fn feature_table_overrides_legacy_flags() -> std::io::Result<()> { let codex_home = TempDir::new()?; let mut entries = BTreeMap::new(); entries.insert("apply_patch_freeform".to_string(), false); @@ -1805,8 +1990,9 @@ fn feature_table_overrides_legacy_flags() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert!(!config.features.enabled(Feature::ApplyPatchFreeform)); assert!(!config.include_apply_patch_tool); @@ -1814,8 +2000,8 @@ fn feature_table_overrides_legacy_flags() -> std::io::Result<()> { Ok(()) } -#[test] -fn legacy_toggles_map_to_features() -> std::io::Result<()> { +#[tokio::test] +async fn legacy_toggles_map_to_features() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { experimental_use_unified_exec_tool: Some(true), @@ -1826,8 +2012,9 @@ fn legacy_toggles_map_to_features() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert!(config.features.enabled(Feature::ApplyPatchFreeform)); assert!(config.features.enabled(Feature::UnifiedExec)); @@ -1839,8 +2026,8 @@ fn legacy_toggles_map_to_features() -> std::io::Result<()> { Ok(()) } -#[test] -fn responses_websocket_features_do_not_change_wire_api() -> std::io::Result<()> { +#[tokio::test] +async fn responses_websocket_features_do_not_change_wire_api() -> std::io::Result<()> { for feature_key in ["responses_websockets", "responses_websockets_v2"] { let codex_home = TempDir::new()?; let mut entries = BTreeMap::new(); @@ -1853,8 +2040,9 @@ fn responses_websocket_features_do_not_change_wire_api() -> std::io::Result<()> let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!(config.model_provider.wire_api, WireApi::Responses); } @@ -1862,8 +2050,8 @@ fn responses_websocket_features_do_not_change_wire_api() -> std::io::Result<()> Ok(()) } -#[test] -fn config_honors_explicit_file_oauth_store_mode() -> std::io::Result<()> { +#[tokio::test] +async fn config_honors_explicit_file_oauth_store_mode() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { mcp_oauth_credentials_store: Some(OAuthCredentialsStoreMode::File), @@ -1873,8 +2061,9 @@ fn config_honors_explicit_file_oauth_store_mode() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.mcp_oauth_credentials_store_mode, @@ -1897,6 +2086,7 @@ async fn managed_config_overrides_oauth_store_mode() -> anyhow::Result<()> { let cwd = codex_home.path().abs(); let config_layer_stack = load_config_layers_state( + LOCAL_FS.as_ref(), codex_home.path(), Some(cwd), &Vec::new(), @@ -1918,11 +2108,15 @@ async fn managed_config_overrides_oauth_store_mode() -> anyhow::Result<()> { let final_config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( final_config.mcp_oauth_credentials_store_mode, - OAuthCredentialsStoreMode::Keyring, + resolve_mcp_oauth_credentials_store_mode( + OAuthCredentialsStoreMode::Keyring, + env!("CARGO_PKG_VERSION"), + ), ); Ok(()) @@ -1953,11 +2147,14 @@ async fn replace_mcp_servers_round_trips_entries() -> anyhow::Result<()> { env_vars: Vec::new(), cwd: None, }, + experimental_environment: Some("remote".to_string()), enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: Some(Duration::from_secs(3)), tool_timeout_sec: Some(Duration::from_secs(5)), + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -1993,6 +2190,7 @@ async fn replace_mcp_servers_round_trips_entries() -> anyhow::Result<()> { } assert_eq!(docs.startup_timeout_sec, Some(Duration::from_secs(3))); assert_eq!(docs.tool_timeout_sec, Some(Duration::from_secs(5))); + assert_eq!(docs.experimental_environment.as_deref(), Some("remote")); assert!(docs.enabled); let empty = BTreeMap::new(); @@ -2022,6 +2220,7 @@ async fn managed_config_wins_over_cli_overrides() -> anyhow::Result<()> { let cwd = codex_home.path().abs(); let config_layer_stack = load_config_layers_state( + LOCAL_FS.as_ref(), codex_home.path(), Some(cwd), &[("model".to_string(), TomlValue::String("cli".to_string()))], @@ -2070,23 +2269,28 @@ fn mcp_servers_toml_parses_per_tool_approval_overrides() { [mcp_servers.docs] command = "docs-server" name = "Docs" +default_tools_approval_mode = "prompt" [mcp_servers.docs.tools.search] approval_mode = "approve" "#, ) .expect("TOML deserialization should succeed"); - let tool = config + let server = config .mcp_servers .get("docs") - .and_then(|server| server.tools.get("search")) - .expect("docs/search tool config exists"); + .expect("docs server config exists"); assert_eq!( - tool, - &McpServerToolConfig { + server.default_tools_approval_mode, + Some(AppToolApproval::Prompt) + ); + + assert_eq!( + server.tools.get("search"), + Some(&McpServerToolConfig { approval_mode: Some(AppToolApproval::Approve), - } + }) ); } @@ -2133,25 +2337,26 @@ approval_mode = "approve" ); } -#[test] -fn to_mcp_config_preserves_apps_feature_from_config() -> std::io::Result<()> { +#[tokio::test] +async fn to_mcp_config_preserves_apps_feature_from_config() -> std::io::Result<()> { let codex_home = TempDir::new()?; let mut config = Config::load_from_base_config_with_overrides( ConfigToml::default(), ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; let plugins_manager = PluginsManager::new(codex_home.path().to_path_buf()); - let mcp_config = config.to_mcp_config(&plugins_manager); + let mcp_config = config.to_mcp_config(&plugins_manager).await; assert!(mcp_config.apps_enabled); let _ = config.features.disable(Feature::Apps); - let mcp_config = config.to_mcp_config(&plugins_manager); + let mcp_config = config.to_mcp_config(&plugins_manager).await; assert!(!mcp_config.apps_enabled); let _ = config.features.enable(Feature::Apps); - let mcp_config = config.to_mcp_config(&plugins_manager); + let mcp_config = config.to_mcp_config(&plugins_manager).await; assert!(mcp_config.apps_enabled); Ok(()) @@ -2199,11 +2404,14 @@ async fn replace_mcp_servers_serializes_env_sorted() -> anyhow::Result<()> { env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2272,11 +2480,14 @@ async fn replace_mcp_servers_serializes_env_vars() -> anyhow::Result<()> { env_vars: vec!["ALPHA".to_string(), "BETA".to_string()], cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2325,11 +2536,14 @@ async fn replace_mcp_servers_serializes_cwd() -> anyhow::Result<()> { env_vars: Vec::new(), cwd: Some(cwd_path.clone()), }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2376,11 +2590,14 @@ async fn replace_mcp_servers_streamable_http_serializes_bearer_token() -> anyhow http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: Some(Duration::from_secs(2)), tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2443,11 +2660,14 @@ async fn replace_mcp_servers_streamable_http_serializes_custom_headers() -> anyh "DOCS_AUTH".to_string(), )])), }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: Some(Duration::from_secs(2)), tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2522,11 +2742,14 @@ async fn replace_mcp_servers_streamable_http_removes_optional_sections() -> anyh "DOCS_AUTH".to_string(), )])), }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: Some(Duration::from_secs(2)), tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2554,11 +2777,14 @@ async fn replace_mcp_servers_streamable_http_removes_optional_sections() -> anyh http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2621,11 +2847,14 @@ async fn replace_mcp_servers_streamable_http_isolates_headers_between_servers() "DOCS_AUTH".to_string(), )])), }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: Some(Duration::from_secs(2)), tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2643,11 +2872,14 @@ async fn replace_mcp_servers_streamable_http_isolates_headers_between_servers() env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2728,11 +2960,14 @@ async fn replace_mcp_servers_serializes_disabled_flag() -> anyhow::Result<()> { env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: false, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2775,11 +3010,14 @@ async fn replace_mcp_servers_serializes_required_flag() -> anyhow::Result<()> { env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: true, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -2822,11 +3060,14 @@ async fn replace_mcp_servers_serializes_tool_filters() -> anyhow::Result<()> { env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: Some(vec!["allowed".to_string()]), disabled_tools: Some(vec!["blocked".to_string()]), scopes: None, @@ -2873,11 +3114,14 @@ async fn replace_mcp_servers_streamable_http_serializes_oauth_resource() -> anyh http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -3171,13 +3415,13 @@ impl PrecedenceTestFixture { self.cwd.path().to_path_buf() } - fn codex_home(&self) -> PathBuf { - self.codex_home.path().to_path_buf() + fn codex_home(&self) -> AbsolutePathBuf { + self.codex_home.abs() } } -#[test] -fn cli_override_sets_compact_prompt() -> std::io::Result<()> { +#[tokio::test] +async fn cli_override_sets_compact_prompt() -> std::io::Result<()> { let codex_home = TempDir::new()?; let overrides = ConfigOverrides { compact_prompt: Some("Use the compact override".to_string()), @@ -3187,8 +3431,9 @@ fn cli_override_sets_compact_prompt() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( ConfigToml::default(), overrides, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.compact_prompt.as_deref(), @@ -3198,8 +3443,8 @@ fn cli_override_sets_compact_prompt() -> std::io::Result<()> { Ok(()) } -#[test] -fn loads_compact_prompt_from_file() -> std::io::Result<()> { +#[tokio::test] +async fn loads_compact_prompt_from_file() -> std::io::Result<()> { let codex_home = TempDir::new()?; let workspace = codex_home.path().join("workspace"); std::fs::create_dir_all(&workspace)?; @@ -3217,11 +3462,8 @@ fn loads_compact_prompt_from_file() -> std::io::Result<()> { ..Default::default() }; - let config = Config::load_from_base_config_with_overrides( - cfg, - overrides, - codex_home.path().to_path_buf(), - )?; + let config = + Config::load_from_base_config_with_overrides(cfg, overrides, codex_home.abs()).await?; assert_eq!( config.compact_prompt.as_deref(), @@ -3231,8 +3473,8 @@ fn loads_compact_prompt_from_file() -> std::io::Result<()> { Ok(()) } -#[test] -fn load_config_uses_requirements_guardian_policy_config() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_uses_requirements_guardian_policy_config() -> std::io::Result<()> { let codex_home = TempDir::new()?; let config_layer_stack = ConfigLayerStack::new( Vec::new(), @@ -3247,14 +3489,16 @@ fn load_config_uses_requirements_guardian_policy_config() -> std::io::Result<()> .map_err(std::io::Error::other)?; let config = Config::load_config_with_layer_stack( + LOCAL_FS.as_ref(), ConfigToml::default(), ConfigOverrides { cwd: Some(codex_home.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), config_layer_stack, - )?; + ) + .await?; assert_eq!( config.guardian_policy_config.as_deref(), @@ -3264,8 +3508,8 @@ fn load_config_uses_requirements_guardian_policy_config() -> std::io::Result<()> Ok(()) } -#[test] -fn load_config_ignores_empty_requirements_guardian_policy_config() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_ignores_empty_requirements_guardian_policy_config() -> std::io::Result<()> { let codex_home = TempDir::new()?; let config_layer_stack = ConfigLayerStack::new( Vec::new(), @@ -3278,22 +3522,24 @@ fn load_config_ignores_empty_requirements_guardian_policy_config() -> std::io::R .map_err(std::io::Error::other)?; let config = Config::load_config_with_layer_stack( + LOCAL_FS.as_ref(), ConfigToml::default(), ConfigOverrides { cwd: Some(codex_home.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), config_layer_stack, - )?; + ) + .await?; assert_eq!(config.guardian_policy_config, None); Ok(()) } -#[test] -fn load_config_rejects_missing_agent_role_config_file() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_rejects_missing_agent_role_config_file() -> std::io::Result<()> { let codex_home = TempDir::new()?; let missing_path = codex_home.path().join("agents").join("researcher.toml"); let cfg = ConfigToml { @@ -3316,8 +3562,9 @@ fn load_config_rejects_missing_agent_role_config_file() -> std::io::Result<()> { let result = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - ); + codex_home.abs(), + ) + .await; let err = result.expect_err("missing role config file should be rejected"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); let message = err.to_string(); @@ -4158,8 +4405,8 @@ model = "gpt-5-mini" Ok(()) } -#[test] -fn load_config_normalizes_agent_role_nickname_candidates() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_normalizes_agent_role_nickname_candidates() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { agents: Some(AgentsToml { @@ -4184,8 +4431,9 @@ fn load_config_normalizes_agent_role_nickname_candidates() -> std::io::Result<() let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config @@ -4199,8 +4447,8 @@ fn load_config_normalizes_agent_role_nickname_candidates() -> std::io::Result<() Ok(()) } -#[test] -fn load_config_rejects_empty_agent_role_nickname_candidates() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_rejects_empty_agent_role_nickname_candidates() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { agents: Some(AgentsToml { @@ -4222,8 +4470,9 @@ fn load_config_rejects_empty_agent_role_nickname_candidates() -> std::io::Result let result = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - ); + codex_home.abs(), + ) + .await; let err = result.expect_err("empty nickname candidates should be rejected"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); assert!( @@ -4234,8 +4483,8 @@ fn load_config_rejects_empty_agent_role_nickname_candidates() -> std::io::Result Ok(()) } -#[test] -fn load_config_rejects_duplicate_agent_role_nickname_candidates() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_rejects_duplicate_agent_role_nickname_candidates() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { agents: Some(AgentsToml { @@ -4257,8 +4506,9 @@ fn load_config_rejects_duplicate_agent_role_nickname_candidates() -> std::io::Re let result = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - ); + codex_home.abs(), + ) + .await; let err = result.expect_err("duplicate nickname candidates should be rejected"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); assert!( @@ -4269,8 +4519,8 @@ fn load_config_rejects_duplicate_agent_role_nickname_candidates() -> std::io::Re Ok(()) } -#[test] -fn load_config_rejects_unsafe_agent_role_nickname_candidates() -> std::io::Result<()> { +#[tokio::test] +async fn load_config_rejects_unsafe_agent_role_nickname_candidates() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { agents: Some(AgentsToml { @@ -4292,8 +4542,9 @@ fn load_config_rejects_unsafe_agent_role_nickname_candidates() -> std::io::Resul let result = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - ); + codex_home.abs(), + ) + .await; let err = result.expect_err("unsafe nickname candidates should be rejected"); assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); assert!(err.to_string().contains( @@ -4303,8 +4554,8 @@ fn load_config_rejects_unsafe_agent_role_nickname_candidates() -> std::io::Resul Ok(()) } -#[test] -fn model_catalog_json_loads_from_path() -> std::io::Result<()> { +#[tokio::test] +async fn model_catalog_json_loads_from_path() -> std::io::Result<()> { let codex_home = TempDir::new()?; let catalog_path = codex_home.path().join("catalog.json"); let mut catalog = bundled_models_response() @@ -4323,15 +4574,16 @@ fn model_catalog_json_loads_from_path() -> std::io::Result<()> { let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!(config.model_catalog, Some(catalog)); Ok(()) } -#[test] -fn model_catalog_json_rejects_empty_catalog() -> std::io::Result<()> { +#[tokio::test] +async fn model_catalog_json_rejects_empty_catalog() -> std::io::Result<()> { let codex_home = TempDir::new()?; let catalog_path = codex_home.path().join("catalog.json"); std::fs::write(&catalog_path, r#"{"models":[]}"#)?; @@ -4344,8 +4596,9 @@ fn model_catalog_json_rejects_empty_catalog() -> std::io::Result<()> { let err = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), + codex_home.abs(), ) + .await .expect_err("empty custom catalog should fail config load"); assert_eq!(err.kind(), ErrorKind::InvalidData); @@ -4470,8 +4723,8 @@ model_verbosity = "high" /// /// Note that profiles are the recommended way to specify a group of /// configuration options together. -#[test] -fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { +#[tokio::test] +async fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let o3_profile_overrides = ConfigOverrides { @@ -4483,7 +4736,8 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { fixture.cfg.clone(), o3_profile_overrides, fixture.codex_home(), - )?; + ) + .await?; assert_eq!( Config { model: Some("o3".to_string()), @@ -4509,16 +4763,18 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(/*initial_value*/ None), user_instructions: None, - user_instructions_path: None, notify: None, cwd: fixture.cwd(), cli_auth_credentials_store_mode: Default::default(), mcp_servers: Constrained::allow_any(HashMap::new()), - mcp_oauth_credentials_store_mode: Default::default(), + mcp_oauth_credentials_store_mode: resolve_mcp_oauth_credentials_store_mode( + Default::default(), + LOCAL_DEV_BUILD_VERSION, + ), mcp_oauth_callback_port: None, mcp_oauth_callback_url: None, model_providers: fixture.model_provider_map.clone(), - project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, + project_doc_max_bytes: AGENTS_MD_MAX_BYTES, project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, agent_max_threads: DEFAULT_AGENT_MAX_THREADS, @@ -4527,8 +4783,8 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { memories: MemoriesConfig::default(), agent_job_max_runtime_seconds: DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS, codex_home: fixture.codex_home(), - sqlite_home: fixture.codex_home(), - log_dir: fixture.codex_home().join("log"), + sqlite_home: fixture.codex_home().to_path_buf(), + log_dir: fixture.codex_home().join("log").to_path_buf(), config_layer_stack: Default::default(), startup_warnings: Vec::new(), history: History::default(), @@ -4600,8 +4856,8 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { Ok(()) } -#[test] -fn metrics_exporter_defaults_to_statsig_when_missing() -> std::io::Result<()> { +#[tokio::test] +async fn metrics_exporter_defaults_to_statsig_when_missing() -> std::io::Result<()> { let fixture = create_test_fixture()?; let config = Config::load_from_base_config_with_overrides( @@ -4611,14 +4867,15 @@ fn metrics_exporter_defaults_to_statsig_when_missing() -> std::io::Result<()> { ..Default::default() }, fixture.codex_home(), - )?; + ) + .await?; assert_eq!(config.otel.metrics_exporter, OtelExporterKind::Statsig); Ok(()) } -#[test] -fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { +#[tokio::test] +async fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let gpt3_profile_overrides = ConfigOverrides { @@ -4630,7 +4887,8 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { fixture.cfg.clone(), gpt3_profile_overrides, fixture.codex_home(), - )?; + ) + .await?; let expected_gpt3_profile_config = Config { model: Some("gpt-3.5-turbo".to_string()), review_model: None, @@ -4655,16 +4913,18 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(/*initial_value*/ None), user_instructions: None, - user_instructions_path: None, notify: None, cwd: fixture.cwd(), cli_auth_credentials_store_mode: Default::default(), mcp_servers: Constrained::allow_any(HashMap::new()), - mcp_oauth_credentials_store_mode: Default::default(), + mcp_oauth_credentials_store_mode: resolve_mcp_oauth_credentials_store_mode( + Default::default(), + LOCAL_DEV_BUILD_VERSION, + ), mcp_oauth_callback_port: None, mcp_oauth_callback_url: None, model_providers: fixture.model_provider_map.clone(), - project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, + project_doc_max_bytes: AGENTS_MD_MAX_BYTES, project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, agent_max_threads: DEFAULT_AGENT_MAX_THREADS, @@ -4673,8 +4933,8 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { memories: MemoriesConfig::default(), agent_job_max_runtime_seconds: DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS, codex_home: fixture.codex_home(), - sqlite_home: fixture.codex_home(), - log_dir: fixture.codex_home().join("log"), + sqlite_home: fixture.codex_home().to_path_buf(), + log_dir: fixture.codex_home().join("log").to_path_buf(), config_layer_stack: Default::default(), startup_warnings: Vec::new(), history: History::default(), @@ -4755,14 +5015,15 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { fixture.cfg.clone(), default_profile_overrides, fixture.codex_home(), - )?; + ) + .await?; assert_eq!(expected_gpt3_profile_config, default_profile_config); Ok(()) } -#[test] -fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { +#[tokio::test] +async fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let zdr_profile_overrides = ConfigOverrides { @@ -4774,7 +5035,8 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { fixture.cfg.clone(), zdr_profile_overrides, fixture.codex_home(), - )?; + ) + .await?; let expected_zdr_profile_config = Config { model: Some("o3".to_string()), review_model: None, @@ -4799,16 +5061,18 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(/*initial_value*/ None), user_instructions: None, - user_instructions_path: None, notify: None, cwd: fixture.cwd(), cli_auth_credentials_store_mode: Default::default(), mcp_servers: Constrained::allow_any(HashMap::new()), - mcp_oauth_credentials_store_mode: Default::default(), + mcp_oauth_credentials_store_mode: resolve_mcp_oauth_credentials_store_mode( + Default::default(), + LOCAL_DEV_BUILD_VERSION, + ), mcp_oauth_callback_port: None, mcp_oauth_callback_url: None, model_providers: fixture.model_provider_map.clone(), - project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, + project_doc_max_bytes: AGENTS_MD_MAX_BYTES, project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, agent_max_threads: DEFAULT_AGENT_MAX_THREADS, @@ -4817,8 +5081,8 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { memories: MemoriesConfig::default(), agent_job_max_runtime_seconds: DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS, codex_home: fixture.codex_home(), - sqlite_home: fixture.codex_home(), - log_dir: fixture.codex_home().join("log"), + sqlite_home: fixture.codex_home().to_path_buf(), + log_dir: fixture.codex_home().join("log").to_path_buf(), config_layer_stack: Default::default(), startup_warnings: Vec::new(), history: History::default(), @@ -4891,8 +5155,8 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { Ok(()) } -#[test] -fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { +#[tokio::test] +async fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { let fixture = create_test_fixture()?; let gpt5_profile_overrides = ConfigOverrides { @@ -4904,7 +5168,8 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { fixture.cfg.clone(), gpt5_profile_overrides, fixture.codex_home(), - )?; + ) + .await?; let expected_gpt5_profile_config = Config { model: Some("gpt-5.1".to_string()), review_model: None, @@ -4929,16 +5194,18 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { approvals_reviewer: ApprovalsReviewer::User, enforce_residency: Constrained::allow_any(/*initial_value*/ None), user_instructions: None, - user_instructions_path: None, notify: None, cwd: fixture.cwd(), cli_auth_credentials_store_mode: Default::default(), mcp_servers: Constrained::allow_any(HashMap::new()), - mcp_oauth_credentials_store_mode: Default::default(), + mcp_oauth_credentials_store_mode: resolve_mcp_oauth_credentials_store_mode( + Default::default(), + LOCAL_DEV_BUILD_VERSION, + ), mcp_oauth_callback_port: None, mcp_oauth_callback_url: None, model_providers: fixture.model_provider_map.clone(), - project_doc_max_bytes: PROJECT_DOC_MAX_BYTES, + project_doc_max_bytes: AGENTS_MD_MAX_BYTES, project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, agent_max_threads: DEFAULT_AGENT_MAX_THREADS, @@ -4947,8 +5214,8 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { memories: MemoriesConfig::default(), agent_job_max_runtime_seconds: DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS, codex_home: fixture.codex_home(), - sqlite_home: fixture.codex_home(), - log_dir: fixture.codex_home().join("log"), + sqlite_home: fixture.codex_home().to_path_buf(), + log_dir: fixture.codex_home().join("log").to_path_buf(), config_layer_stack: Default::default(), startup_warnings: Vec::new(), history: History::default(), @@ -5021,8 +5288,9 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { Ok(()) } -#[test] -fn test_requirements_web_search_mode_allowlist_does_not_warn_when_unset() -> anyhow::Result<()> { +#[tokio::test] +async fn test_requirements_web_search_mode_allowlist_does_not_warn_when_unset() -> anyhow::Result<()> +{ let fixture = create_test_fixture()?; let requirements_toml = crate::config_loader::ConfigRequirementsToml { @@ -5067,6 +5335,7 @@ fn test_requirements_web_search_mode_allowlist_does_not_warn_when_unset() -> any .expect("config layer stack"); let config = Config::load_config_with_layer_stack( + LOCAL_FS.as_ref(), fixture.cfg.clone(), ConfigOverrides { cwd: Some(fixture.cwd_path()), @@ -5074,7 +5343,8 @@ fn test_requirements_web_search_mode_allowlist_does_not_warn_when_unset() -> any }, fixture.codex_home(), config_layer_stack, - )?; + ) + .await?; assert!( !config @@ -5237,9 +5507,9 @@ fn test_set_default_oss_provider_rejects_legacy_ollama_chat_provider() -> std::i Ok(()) } -#[test] -fn test_load_config_rejects_legacy_ollama_chat_provider_with_helpful_error() -> std::io::Result<()> -{ +#[tokio::test] +async fn test_load_config_rejects_legacy_ollama_chat_provider_with_helpful_error() +-> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg = ConfigToml { model_provider: Some(LEGACY_OLLAMA_CHAT_PROVIDER_ID.to_string()), @@ -5249,8 +5519,9 @@ fn test_load_config_rejects_legacy_ollama_chat_provider_with_helpful_error() -> let result = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - ); + codex_home.abs(), + ) + .await; assert!(result.is_err()); let error = result.unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::NotFound); @@ -5263,8 +5534,8 @@ fn test_load_config_rejects_legacy_ollama_chat_provider_with_helpful_error() -> Ok(()) } -#[test] -fn test_untrusted_project_gets_workspace_write_sandbox() -> anyhow::Result<()> { +#[tokio::test] +async fn test_untrusted_project_gets_workspace_write_sandbox() -> anyhow::Result<()> { let config_with_untrusted = r#" [projects."/tmp/test"] trust_level = "untrusted" @@ -5272,14 +5543,19 @@ trust_level = "untrusted" let cfg = toml::from_str::(config_with_untrusted) .expect("TOML deserialization should succeed"); + let active_project = ProjectConfig { + trust_level: Some(TrustLevel::Untrusted), + }; - let resolution = cfg.derive_sandbox_policy( - /*sandbox_mode_override*/ None, - /*profile_sandbox_mode*/ None, - WindowsSandboxLevel::Disabled, - &PathBuf::from("/tmp/test"), - /*sandbox_policy_constraint*/ None, - ); + let resolution = cfg + .derive_sandbox_policy( + /*sandbox_mode_override*/ None, + /*profile_sandbox_mode*/ None, + WindowsSandboxLevel::Disabled, + Some(&active_project), + /*sandbox_policy_constraint*/ None, + ) + .await; // Verify that untrusted projects get WorkspaceWrite (or ReadOnly on Windows due to downgrade) if cfg!(target_os = "windows") { @@ -5297,9 +5573,9 @@ trust_level = "untrusted" Ok(()) } -#[test] -fn derive_sandbox_policy_falls_back_to_constraint_value_for_implicit_defaults() -> anyhow::Result<()> -{ +#[tokio::test] +async fn derive_sandbox_policy_falls_back_to_constraint_value_for_implicit_defaults() +-> anyhow::Result<()> { let project_dir = TempDir::new()?; let project_path = project_dir.path().to_path_buf(); let project_key = project_path.to_string_lossy().to_string(); @@ -5312,6 +5588,9 @@ fn derive_sandbox_policy_falls_back_to_constraint_value_for_implicit_defaults() )])), ..Default::default() }; + let active_project = ProjectConfig { + trust_level: Some(TrustLevel::Trusted), + }; let constrained = Constrained::new(SandboxPolicy::DangerFullAccess, |candidate| { if matches!(candidate, SandboxPolicy::DangerFullAccess) { Ok(()) @@ -5325,21 +5604,23 @@ fn derive_sandbox_policy_falls_back_to_constraint_value_for_implicit_defaults() } })?; - let resolution = cfg.derive_sandbox_policy( - /*sandbox_mode_override*/ None, - /*profile_sandbox_mode*/ None, - WindowsSandboxLevel::Disabled, - &project_path, - Some(&constrained), - ); + let resolution = cfg + .derive_sandbox_policy( + /*sandbox_mode_override*/ None, + /*profile_sandbox_mode*/ None, + WindowsSandboxLevel::Disabled, + Some(&active_project), + Some(&constrained), + ) + .await; assert_eq!(resolution, SandboxPolicy::DangerFullAccess); Ok(()) } -#[test] -fn derive_sandbox_policy_preserves_windows_downgrade_for_unsupported_fallback() -> anyhow::Result<()> -{ +#[tokio::test] +async fn derive_sandbox_policy_preserves_windows_downgrade_for_unsupported_fallback() +-> anyhow::Result<()> { let project_dir = TempDir::new()?; let project_path = project_dir.path().to_path_buf(); let project_key = project_path.to_string_lossy().to_string(); @@ -5352,6 +5633,9 @@ fn derive_sandbox_policy_preserves_windows_downgrade_for_unsupported_fallback() )])), ..Default::default() }; + let active_project = ProjectConfig { + trust_level: Some(TrustLevel::Trusted), + }; let constrained = Constrained::new(SandboxPolicy::new_workspace_write_policy(), |candidate| { if matches!(candidate, SandboxPolicy::WorkspaceWrite { .. }) { Ok(()) @@ -5365,13 +5649,15 @@ fn derive_sandbox_policy_preserves_windows_downgrade_for_unsupported_fallback() } })?; - let resolution = cfg.derive_sandbox_policy( - /*sandbox_mode_override*/ None, - /*profile_sandbox_mode*/ None, - WindowsSandboxLevel::Disabled, - &project_path, - Some(&constrained), - ); + let resolution = cfg + .derive_sandbox_policy( + /*sandbox_mode_override*/ None, + /*profile_sandbox_mode*/ None, + WindowsSandboxLevel::Disabled, + Some(&active_project), + Some(&constrained), + ) + .await; if cfg!(target_os = "windows") { assert_eq!(resolution, SandboxPolicy::new_read_only_policy()); @@ -5499,8 +5785,8 @@ fn config_toml_deserializes_mcp_oauth_callback_url() { ); } -#[test] -fn config_loads_mcp_oauth_callback_port_from_toml() -> std::io::Result<()> { +#[tokio::test] +async fn config_loads_mcp_oauth_callback_port_from_toml() -> std::io::Result<()> { let codex_home = TempDir::new()?; let toml = r#" model = "gpt-5.1" @@ -5512,15 +5798,16 @@ mcp_oauth_callback_port = 5678 let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!(config.mcp_oauth_callback_port, Some(5678)); Ok(()) } -#[test] -fn config_loads_allow_login_shell_from_toml() -> std::io::Result<()> { +#[tokio::test] +async fn config_loads_allow_login_shell_from_toml() -> std::io::Result<()> { let codex_home = TempDir::new()?; let cfg: ConfigToml = toml::from_str( r#" @@ -5533,15 +5820,16 @@ allow_login_shell = false let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert!(!config.permissions.allow_login_shell); Ok(()) } -#[test] -fn config_loads_mcp_oauth_callback_url_from_toml() -> std::io::Result<()> { +#[tokio::test] +async fn config_loads_mcp_oauth_callback_url_from_toml() -> std::io::Result<()> { let codex_home = TempDir::new()?; let toml = r#" model = "gpt-5.1" @@ -5553,8 +5841,9 @@ mcp_oauth_callback_url = "https://example.com/callback" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.mcp_oauth_callback_url.as_deref(), @@ -5563,8 +5852,8 @@ mcp_oauth_callback_url = "https://example.com/callback" Ok(()) } -#[test] -fn test_untrusted_project_gets_unless_trusted_approval_policy() -> anyhow::Result<()> { +#[tokio::test] +async fn test_untrusted_project_gets_unless_trusted_approval_policy() -> anyhow::Result<()> { let codex_home = TempDir::new()?; let test_project_dir = TempDir::new()?; let test_path = test_project_dir.path(); @@ -5583,8 +5872,9 @@ fn test_untrusted_project_gets_unless_trusted_approval_policy() -> anyhow::Resul cwd: Some(test_path.to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; // Verify that untrusted projects get UnlessTrusted approval policy assert_eq!( @@ -6279,8 +6569,8 @@ async fn feature_requirements_reject_collab_legacy_alias() { ); } -#[test] -fn tool_suggest_discoverables_load_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn tool_suggest_discoverables_load_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" [tool_suggest] @@ -6317,8 +6607,9 @@ discoverables = [ let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.tool_suggest, @@ -6338,8 +6629,8 @@ discoverables = [ Ok(()) } -#[test] -fn experimental_realtime_start_instructions_load_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn experimental_realtime_start_instructions_load_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" experimental_realtime_start_instructions = "start instructions from config" @@ -6356,8 +6647,9 @@ experimental_realtime_start_instructions = "start instructions from config" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.experimental_realtime_start_instructions.as_deref(), @@ -6366,8 +6658,8 @@ experimental_realtime_start_instructions = "start instructions from config" Ok(()) } -#[test] -fn experimental_realtime_ws_base_url_loads_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn experimental_realtime_ws_base_url_loads_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" experimental_realtime_ws_base_url = "http://127.0.0.1:8011" @@ -6384,8 +6676,9 @@ experimental_realtime_ws_base_url = "http://127.0.0.1:8011" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.experimental_realtime_ws_base_url.as_deref(), @@ -6394,8 +6687,8 @@ experimental_realtime_ws_base_url = "http://127.0.0.1:8011" Ok(()) } -#[test] -fn experimental_realtime_ws_backend_prompt_loads_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn experimental_realtime_ws_backend_prompt_loads_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" experimental_realtime_ws_backend_prompt = "prompt from config" @@ -6412,8 +6705,9 @@ experimental_realtime_ws_backend_prompt = "prompt from config" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.experimental_realtime_ws_backend_prompt.as_deref(), @@ -6422,8 +6716,8 @@ experimental_realtime_ws_backend_prompt = "prompt from config" Ok(()) } -#[test] -fn experimental_realtime_ws_startup_context_loads_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn experimental_realtime_ws_startup_context_loads_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" experimental_realtime_ws_startup_context = "startup context from config" @@ -6440,8 +6734,9 @@ experimental_realtime_ws_startup_context = "startup context from config" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.experimental_realtime_ws_startup_context.as_deref(), @@ -6450,8 +6745,8 @@ experimental_realtime_ws_startup_context = "startup context from config" Ok(()) } -#[test] -fn experimental_realtime_ws_model_loads_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn experimental_realtime_ws_model_loads_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" experimental_realtime_ws_model = "realtime-test-model" @@ -6468,8 +6763,9 @@ experimental_realtime_ws_model = "realtime-test-model" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.experimental_realtime_ws_model.as_deref(), @@ -6478,8 +6774,8 @@ experimental_realtime_ws_model = "realtime-test-model" Ok(()) } -#[test] -fn realtime_config_partial_table_uses_realtime_defaults() -> std::io::Result<()> { +#[tokio::test] +async fn realtime_config_partial_table_uses_realtime_defaults() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" [realtime] @@ -6492,8 +6788,9 @@ voice = "marin" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.realtime, @@ -6505,8 +6802,8 @@ voice = "marin" Ok(()) } -#[test] -fn realtime_loads_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn realtime_loads_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" [realtime] @@ -6532,8 +6829,9 @@ voice = "cedar" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!( config.realtime, @@ -6547,8 +6845,8 @@ voice = "cedar" Ok(()) } -#[test] -fn realtime_audio_loads_from_config_toml() -> std::io::Result<()> { +#[tokio::test] +async fn realtime_audio_loads_from_config_toml() -> std::io::Result<()> { let cfg: ConfigToml = toml::from_str( r#" [audio] @@ -6569,8 +6867,9 @@ speaker = "Desk Speakers" let config = Config::load_from_base_config_with_overrides( cfg, ConfigOverrides::default(), - codex_home.path().to_path_buf(), - )?; + codex_home.abs(), + ) + .await?; assert_eq!(config.realtime_audio.microphone.as_deref(), Some("USB Mic")); assert_eq!( diff --git a/codex-rs/core/src/config/edit.rs b/codex-rs/core/src/config/edit.rs index 0f1189b22d..5adc0f998c 100644 --- a/codex-rs/core/src/config/edit.rs +++ b/codex-rs/core/src/config/edit.rs @@ -222,15 +222,28 @@ mod document_helpers { if !config.enabled { entry["enabled"] = value(false); } + if let Some(environment) = &config.experimental_environment { + entry["experimental_environment"] = value(environment.clone()); + } if config.required { entry["required"] = value(true); } + if config.supports_parallel_tool_calls { + entry["supports_parallel_tool_calls"] = value(true); + } if let Some(timeout) = config.startup_timeout_sec { entry["startup_timeout_sec"] = value(timeout.as_secs_f64()); } if let Some(timeout) = config.tool_timeout_sec { entry["tool_timeout_sec"] = value(timeout.as_secs_f64()); } + if let Some(approval_mode) = config.default_tools_approval_mode { + entry["default_tools_approval_mode"] = value(match approval_mode { + AppToolApproval::Auto => "auto", + AppToolApproval::Prompt => "prompt", + AppToolApproval::Approve => "approve", + }); + } if let Some(enabled_tools) = &config.enabled_tools && !enabled_tools.is_empty() { diff --git a/codex-rs/core/src/config/edit_tests.rs b/codex-rs/core/src/config/edit_tests.rs index af1251b34f..4f340d89b3 100644 --- a/codex-rs/core/src/config/edit_tests.rs +++ b/codex-rs/core/src/config/edit_tests.rs @@ -575,11 +575,14 @@ fn blocking_replace_mcp_servers_round_trips() { env_vars: vec!["FOO".to_string()], cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: true, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: Some(vec!["one".to_string(), "two".to_string()]), disabled_tools: None, scopes: None, @@ -601,11 +604,14 @@ fn blocking_replace_mcp_servers_round_trips() { ), env_http_headers: None, }, + experimental_environment: None, enabled: false, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: Some(std::time::Duration::from_secs(5)), tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: Some(vec!["forbidden".to_string()]), scopes: None, @@ -638,6 +644,7 @@ Z-Header = \"z\" command = \"cmd\" args = [\"--flag\"] env_vars = [\"FOO\"] +supports_parallel_tool_calls = true enabled_tools = [\"one\", \"two\"] [mcp_servers.stdio.env] @@ -663,11 +670,14 @@ fn blocking_replace_mcp_servers_serializes_tool_approval_overrides() { env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: Some(AppToolApproval::Prompt), enabled_tools: None, disabled_tools: None, scopes: None, @@ -692,6 +702,7 @@ fn blocking_replace_mcp_servers_serializes_tool_approval_overrides() { let expected = "\ [mcp_servers.docs] command = \"docs-server\" +default_tools_approval_mode = \"prompt\" [mcp_servers.docs.tools.search] approval_mode = \"approve\" @@ -723,11 +734,14 @@ foo = { command = "cmd" } env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -774,11 +788,14 @@ foo = { command = "cmd" } # keep me env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: false, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -824,11 +841,14 @@ foo = { command = "cmd", args = ["--flag"] } # keep me env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -875,11 +895,14 @@ foo = { command = "cmd" } env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: false, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 9b2b06e0a8..47a8bdf304 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -1,3 +1,4 @@ +use crate::agents_md::AgentsMdManager; use crate::config::edit::ConfigEdit; use crate::config::edit::ConfigEditsBuilder; use crate::config_loader::CloudRequirementsLoader; @@ -15,8 +16,6 @@ use crate::config_loader::load_config_layers_state; use crate::config_loader::project_trust_key; use crate::memories::memory_root; use crate::path_utils::normalize_for_native_workdir; -use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME; -use crate::project_doc::LOCAL_PROJECT_DOC_FILENAME; use crate::unified_exec::DEFAULT_MAX_BACKGROUND_TERMINAL_TIMEOUT_MS; use crate::unified_exec::MIN_EMPTY_YIELD_TIME_MS; use crate::windows_sandbox::WindowsSandboxLevelExt; @@ -48,6 +47,8 @@ use codex_config::types::ToolSuggestDiscoverable; use codex_config::types::TuiNotificationSettings; use codex_config::types::UriBasedFileOpener; use codex_config::types::WindowsSandboxModeToml; +use codex_exec_server::ExecutorFileSystem; +use codex_exec_server::LOCAL_FS; use codex_features::Feature; use codex_features::FeatureConfigSource; use codex_features::FeatureOverrides; @@ -55,6 +56,7 @@ use codex_features::FeatureToml; use codex_features::Features; use codex_features::FeaturesToml; use codex_features::MultiAgentV2ConfigToml; +use codex_git_utils::resolve_root_git_project_for_trust; use codex_login::AuthManagerConfig; use codex_mcp::McpConfig; use codex_model_provider_info::LEGACY_OLLAMA_CHAT_PROVIDER_ID; @@ -120,10 +122,11 @@ pub use codex_git_utils::GhostSnapshotConfig; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of /// the context window. -pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB +pub(crate) const AGENTS_MD_MAX_BYTES: usize = 32 * 1024; // 32 KiB pub(crate) const DEFAULT_AGENT_MAX_THREADS: Option = Some(6); pub(crate) const DEFAULT_AGENT_MAX_DEPTH: i32 = 1; pub(crate) const DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS: Option = None; +const LOCAL_DEV_BUILD_VERSION: &str = "0.0.0"; pub const CONFIG_TOML_FILE: &str = "config.toml"; @@ -141,14 +144,41 @@ fn resolve_sqlite_home_env(resolved_cwd: &Path) -> Option { } } +fn resolve_cli_auth_credentials_store_mode( + configured: AuthCredentialsStoreMode, + package_version: &str, +) -> AuthCredentialsStoreMode { + match (package_version, configured) { + ( + LOCAL_DEV_BUILD_VERSION, + AuthCredentialsStoreMode::Keyring | AuthCredentialsStoreMode::Auto, + ) => AuthCredentialsStoreMode::File, + (_, mode) => mode, + } +} + +fn resolve_mcp_oauth_credentials_store_mode( + configured: OAuthCredentialsStoreMode, + package_version: &str, +) -> OAuthCredentialsStoreMode { + match (package_version, configured) { + ( + LOCAL_DEV_BUILD_VERSION, + OAuthCredentialsStoreMode::Keyring | OAuthCredentialsStoreMode::Auto, + ) => OAuthCredentialsStoreMode::File, + (_, mode) => mode, + } +} + #[cfg(test)] -pub(crate) fn test_config() -> Config { +pub(crate) async fn test_config() -> Config { let codex_home = tempfile::tempdir().expect("create temp dir"); Config::load_from_base_config_with_overrides( ConfigToml::default(), ConfigOverrides::default(), - codex_home.path().to_path_buf(), + AbsolutePathBuf::from_absolute_path(codex_home.path()).expect("temp dir should resolve"), ) + .await .expect("load default test config") } @@ -244,9 +274,6 @@ pub struct Config { /// User-provided instructions from AGENTS.md. pub user_instructions: Option, - /// Path to the global AGENTS file loaded into `user_instructions`. - pub user_instructions_path: Option, - /// Base instructions override. pub base_instructions: Option, @@ -398,7 +425,7 @@ pub struct Config { /// Directory containing all Codex state (defaults to `~/.codex` but can be /// overridden by the `CODEX_HOME` environment variable). - pub codex_home: PathBuf, + pub codex_home: AbsolutePathBuf, /// Directory where Codex stores the SQLite state DB. pub sqlite_home: PathBuf, @@ -589,7 +616,7 @@ impl Default for MultiAgentV2Config { impl AuthManagerConfig for Config { fn codex_home(&self) -> PathBuf { - self.codex_home.clone() + self.codex_home.to_path_buf() } fn cli_auth_credentials_store_mode(&self) -> AuthCredentialsStoreMode { @@ -651,7 +678,10 @@ impl ConfigBuilder { cloud_requirements, fallback_cwd, } = self; - let codex_home = codex_home.map_or_else(find_codex_home, std::io::Result::Ok)?; + let codex_home = match codex_home { + Some(codex_home) => AbsolutePathBuf::from_absolute_path(codex_home)?, + None => find_codex_home()?, + }; let cli_overrides = cli_overrides.unwrap_or_default(); let mut harness_overrides = harness_overrides.unwrap_or_default(); let loader_overrides = loader_overrides.unwrap_or_default(); @@ -662,6 +692,7 @@ impl ConfigBuilder { }; harness_overrides.cwd = Some(cwd.to_path_buf()); let config_layer_stack = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &cli_overrides, @@ -691,11 +722,13 @@ impl ConfigBuilder { } }; Config::load_config_with_layer_stack( + LOCAL_FS.as_ref(), config_toml, harness_overrides, codex_home, config_layer_stack, ) + .await } #[cfg(test)] @@ -717,8 +750,11 @@ impl Config { } } - pub fn to_mcp_config(&self, plugins_manager: &crate::plugins::PluginsManager) -> McpConfig { - let loaded_plugins = plugins_manager.plugins_for_config(self); + pub async fn to_mcp_config( + &self, + plugins_manager: &crate::plugins::PluginsManager, + ) -> McpConfig { + let loaded_plugins = plugins_manager.plugins_for_config(self).await; let mut configured_mcp_servers = self.mcp_servers.get().clone(); for (name, plugin_server) in loaded_plugins.effective_mcp_servers() { configured_mcp_servers.entry(name).or_insert(plugin_server); @@ -726,7 +762,7 @@ impl Config { McpConfig { chatgpt_base_url: self.chatgpt_base_url.clone(), - codex_home: self.codex_home.clone(), + codex_home: self.codex_home.to_path_buf(), mcp_oauth_credentials_store_mode: self.mcp_oauth_credentials_store_mode, mcp_oauth_callback_port: self.mcp_oauth_callback_port, mcp_oauth_callback_url: self.mcp_oauth_callback_url.clone(), @@ -753,16 +789,20 @@ impl Config { } /// Load a default configuration when user config files are invalid. - pub fn load_default_with_cli_overrides( + pub async fn load_default_with_cli_overrides( cli_overrides: Vec<(String, TomlValue)>, ) -> std::io::Result { let codex_home = find_codex_home()?; - Self::load_default_with_cli_overrides_for_codex_home(codex_home, cli_overrides) + Self::load_default_with_cli_overrides_for_codex_home( + codex_home.to_path_buf(), + cli_overrides, + ) + .await } /// Load a default configuration for a specific Codex home without reading /// user, project, or system config layers. - pub fn load_default_with_cli_overrides_for_codex_home( + pub async fn load_default_with_cli_overrides_for_codex_home( codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>, ) -> std::io::Result { @@ -774,13 +814,16 @@ impl Config { })?; let cli_layer = crate::config_loader::build_cli_overrides_layer(&cli_overrides); crate::config_loader::merge_toml_values(&mut merged, &cli_layer); + let codex_home = AbsolutePathBuf::from_absolute_path_checked(codex_home)?; let config_toml = deserialize_config_toml_with_base(merged, &codex_home)?; Self::load_config_with_layer_stack( + LOCAL_FS.as_ref(), config_toml, ConfigOverrides::default(), codex_home, ConfigLayerStack::default(), ) + .await } /// This is a secondary way of creating [Config], which is appropriate when @@ -812,6 +855,7 @@ pub async fn load_config_as_toml_with_cli_overrides( cli_overrides: Vec<(String, TomlValue)>, ) -> std::io::Result { let config_layer_stack = load_config_layers_state( + LOCAL_FS.as_ref(), codex_home, cwd.cloned(), &cli_overrides, @@ -982,6 +1026,7 @@ pub async fn load_global_mcp_servers( // MCP servers defined in in-repo .codex/ folders. let cwd: Option = None; let config_layer_stack = load_config_layers_state( + LOCAL_FS.as_ref(), codex_home, cwd, &cli_overrides, @@ -1376,22 +1421,32 @@ pub(crate) fn resolve_web_search_mode_for_turn( impl Config { #[cfg(test)] - fn load_from_base_config_with_overrides( + async fn load_from_base_config_with_overrides( cfg: ConfigToml, overrides: ConfigOverrides, - codex_home: PathBuf, + codex_home: AbsolutePathBuf, ) -> std::io::Result { // Note this ignores requirements.toml enforcement for tests. let config_layer_stack = ConfigLayerStack::default(); - Self::load_config_with_layer_stack(cfg, overrides, codex_home, config_layer_stack) + Self::load_config_with_layer_stack( + LOCAL_FS.as_ref(), + cfg, + overrides, + codex_home, + config_layer_stack, + ) + .await } - pub(crate) fn load_config_with_layer_stack( + pub(crate) async fn load_config_with_layer_stack( + fs: &dyn ExecutorFileSystem, cfg: ConfigToml, overrides: ConfigOverrides, - codex_home: PathBuf, + codex_home: AbsolutePathBuf, config_layer_stack: ConfigLayerStack, ) -> std::io::Result { + // Keep the large config-construction future off small test thread stacks. + Box::pin(async move { validate_model_providers(&cfg.model_providers) .map_err(|message| std::io::Error::new(std::io::ErrorKind::InvalidInput, message))?; // Ensure that every field of ConfigRequirements is applied to the final @@ -1408,10 +1463,8 @@ impl Config { network: network_requirements, } = config_layer_stack.requirements().clone(); - let (user_instructions, user_instructions_path) = - Self::load_instructions(Some(&codex_home)) - .map(|loaded| (Some(loaded.contents), Some(loaded.path))) - .unwrap_or((None, None)); + let user_instructions = AgentsMdManager::load_global_instructions(Some(&codex_home)) + .map(|loaded| loaded.contents); let mut startup_warnings = Vec::new(); // Destructure ConfigOverrides fully to ensure all overrides are applied. @@ -1508,8 +1561,12 @@ impl Config { .into_iter() .map(|path| AbsolutePathBuf::resolve_path_against_base(path, resolved_cwd.as_path())) .collect(); + let repo_root = resolve_root_git_project_for_trust(fs, &resolved_cwd).await; let active_project = cfg - .get_active_project(resolved_cwd.as_path()) + .get_active_project( + resolved_cwd.as_path(), + repo_root.as_ref().map(AbsolutePathBuf::as_path), + ) .unwrap_or(ProjectConfig { trust_level: None }); let permission_config_syntax = resolve_permission_config_syntax( &config_layer_stack, @@ -1541,7 +1598,6 @@ impl Config { }; let memories_root = memory_root(&codex_home); std::fs::create_dir_all(&memories_root)?; - let memories_root = AbsolutePathBuf::from_absolute_path(&memories_root)?; if !additional_writable_roots .iter() .any(|existing| existing == &memories_root) @@ -1579,6 +1635,7 @@ impl Config { compile_permission_profile( permissions, default_permissions, + resolved_cwd.as_path(), &mut startup_warnings, )?; let mut sandbox_policy = file_system_sandbox_policy @@ -1600,13 +1657,15 @@ impl Config { ) } else { let configured_network_proxy_config = NetworkProxyConfig::default(); - let mut sandbox_policy = cfg.derive_sandbox_policy( - sandbox_mode, - config_profile.sandbox_mode, - windows_sandbox_level, - resolved_cwd.as_path(), - Some(&constrained_sandbox_policy), - ); + let mut sandbox_policy = cfg + .derive_sandbox_policy( + sandbox_mode, + config_profile.sandbox_mode, + windows_sandbox_level, + Some(&active_project), + Some(&constrained_sandbox_policy), + ) + .await; if let SandboxPolicy::WorkspaceWrite { writable_roots, .. } = &mut sandbox_policy { for path in &additional_writable_roots { if !writable_roots.iter().any(|existing| existing == path) { @@ -1672,7 +1731,8 @@ impl Config { let multi_agent_v2 = resolve_multi_agent_v2_config(&cfg, &config_profile); let agent_roles = - agent_roles::load_agent_roles(&cfg, &config_layer_stack, &mut startup_warnings)?; + agent_roles::load_agent_roles(fs, &cfg, &config_layer_stack, &mut startup_warnings) + .await?; let openai_base_url = cfg .openai_base_url @@ -1821,8 +1881,12 @@ impl Config { .model_instructions_file .as_ref() .or(cfg.model_instructions_file.as_ref()); - let file_base_instructions = - Self::try_read_non_empty_file(model_instructions_path, "model instructions file")?; + let file_base_instructions = Self::try_read_non_empty_file( + fs, + model_instructions_path, + "model instructions file", + ) + .await?; let base_instructions = base_instructions.or(file_base_instructions); let developer_instructions = developer_instructions.or(cfg.developer_instructions); let include_permissions_instructions = config_profile @@ -1853,9 +1917,11 @@ impl Config { .as_ref() .or(cfg.experimental_compact_prompt_file.as_ref()); let file_compact_prompt = Self::try_read_non_empty_file( + fs, experimental_compact_prompt_path, "experimental compact prompt file", - )?; + ) + .await?; let compact_prompt = compact_prompt.or(file_compact_prompt); let js_repl_node_path = js_repl_node_path_override .or(config_profile.js_repl_node_path.map(Into::into)) @@ -1889,11 +1955,7 @@ impl Config { .log_dir .as_ref() .map(AbsolutePathBuf::to_path_buf) - .unwrap_or_else(|| { - let mut p = codex_home.clone(); - p.push("log"); - p - }); + .unwrap_or_else(|| codex_home.join("log").to_path_buf()); let sqlite_home = cfg .sqlite_home .as_ref() @@ -1965,9 +2027,10 @@ impl Config { if effective_sandbox_policy == original_sandbox_policy { file_system_sandbox_policy } else { - FileSystemSandboxPolicy::from_legacy_sandbox_policy( + FileSystemSandboxPolicy::from_legacy_sandbox_policy_preserving_deny_entries( &effective_sandbox_policy, resolved_cwd.as_path(), + &file_system_sandbox_policy, ) }; let effective_file_system_sandbox_policy = effective_file_system_sandbox_policy @@ -2003,7 +2066,6 @@ impl Config { enforce_residency: enforce_residency.value, notify: cfg.notify, user_instructions, - user_instructions_path, base_instructions, personality, developer_instructions, @@ -2014,15 +2076,21 @@ impl Config { include_environment_context, // The config.toml omits "_mode" because it's a config file. However, "_mode" // is important in code to differentiate the mode from the store implementation. - cli_auth_credentials_store_mode: cfg.cli_auth_credentials_store.unwrap_or_default(), + cli_auth_credentials_store_mode: resolve_cli_auth_credentials_store_mode( + cfg.cli_auth_credentials_store.unwrap_or_default(), + env!("CARGO_PKG_VERSION"), + ), mcp_servers, // The config.toml omits "_mode" because it's a config file. However, "_mode" // is important in code to differentiate the mode from the store implementation. - mcp_oauth_credentials_store_mode: cfg.mcp_oauth_credentials_store.unwrap_or_default(), + mcp_oauth_credentials_store_mode: resolve_mcp_oauth_credentials_store_mode( + cfg.mcp_oauth_credentials_store.unwrap_or_default(), + env!("CARGO_PKG_VERSION"), + ), mcp_oauth_callback_port: cfg.mcp_oauth_callback_port, mcp_oauth_callback_url: cfg.mcp_oauth_callback_url.clone(), model_providers, - project_doc_max_bytes: cfg.project_doc_max_bytes.unwrap_or(PROJECT_DOC_MAX_BYTES), + project_doc_max_bytes: cfg.project_doc_max_bytes.unwrap_or(AGENTS_MD_MAX_BYTES), project_doc_fallback_filenames: cfg .project_doc_fallback_filenames .unwrap_or_default() @@ -2169,30 +2237,15 @@ impl Config { }, }; Ok(config) - } - - fn load_instructions(codex_dir: Option<&Path>) -> Option { - let base = codex_dir?; - for candidate in [LOCAL_PROJECT_DOC_FILENAME, DEFAULT_PROJECT_DOC_FILENAME] { - let mut path = base.to_path_buf(); - path.push(candidate); - if let Ok(contents) = std::fs::read_to_string(&path) { - let trimmed = contents.trim(); - if !trimmed.is_empty() { - return Some(LoadedUserInstructions { - contents: trimmed.to_string(), - path, - }); - } - } - } - None + }) + .await } /// If `path` is `Some`, attempts to read the file at the given path and /// returns its contents as a trimmed `String`. If the file is empty, or /// is `Some` but cannot be read, returns an `Err`. - fn try_read_non_empty_file( + async fn try_read_non_empty_file( + fs: &dyn ExecutorFileSystem, path: Option<&AbsolutePathBuf>, context: &str, ) -> std::io::Result> { @@ -2200,12 +2253,15 @@ impl Config { return Ok(None); }; - let contents = std::fs::read_to_string(path).map_err(|e| { - std::io::Error::new( - e.kind(), - format!("failed to read {context} {}: {e}", path.display()), - ) - })?; + let contents = fs + .read_file_text(path, /*sandbox*/ None) + .await + .map_err(|e| { + std::io::Error::new( + e.kind(), + format!("failed to read {context} {}: {e}", path.display()), + ) + })?; let s = contents.trim().to_string(); if s.is_empty() { @@ -2245,7 +2301,11 @@ impl Config { } pub fn managed_network_requirements_enabled(&self) -> bool { - self.config_layer_stack + !matches!( + self.permissions.sandbox_policy.get(), + SandboxPolicy::DangerFullAccess + ) && self + .config_layer_stack .requirements_toml() .network .is_some() @@ -2256,11 +2316,6 @@ impl Config { } } -struct LoadedUserInstructions { - contents: String, - path: PathBuf, -} - pub(crate) fn uses_deprecated_instructions_file(config_layer_stack: &ConfigLayerStack) -> bool { config_layer_stack .layers_high_to_low() @@ -2305,7 +2360,7 @@ fn toml_uses_deprecated_instructions_file(value: &TomlValue) -> bool { /// value will be canonicalized and this function will Err otherwise. /// - If `CODEX_HOME` is not set, this function does not verify that the /// directory exists. -pub fn find_codex_home() -> std::io::Result { +pub fn find_codex_home() -> std::io::Result { codex_utils_home_dir::find_codex_home() } diff --git a/codex-rs/core/src/config/network_proxy_spec.rs b/codex-rs/core/src/config/network_proxy_spec.rs index b67c41a442..acabe24f20 100644 --- a/codex-rs/core/src/config/network_proxy_spec.rs +++ b/codex-rs/core/src/config/network_proxy_spec.rs @@ -20,8 +20,6 @@ use codex_protocol::protocol::SandboxPolicy; use std::collections::HashSet; use std::sync::Arc; -const GLOBAL_ALLOWLIST_PATTERN: &str = "*"; - #[derive(Debug, Clone, PartialEq, Eq)] pub struct NetworkProxySpec { base_config: NetworkProxyConfig, @@ -225,8 +223,6 @@ impl NetworkProxySpec { let allowlist_expansion_enabled = Self::allowlist_expansion_enabled(sandbox_policy, hard_deny_allowlist_misses); let denylist_expansion_enabled = Self::denylist_expansion_enabled(sandbox_policy); - let danger_full_access_denylist_only = - Self::danger_full_access_denylist_only_enabled(requirements, sandbox_policy); if let Some(enabled) = requirements.enabled { config.network.enabled = enabled; @@ -257,43 +253,37 @@ impl NetworkProxySpec { constraints.dangerously_allow_all_unix_sockets = Some(dangerously_allow_all_unix_sockets); } - if danger_full_access_denylist_only { - config - .network - .set_allowed_domains(vec![GLOBAL_ALLOWLIST_PATTERN.to_string()]); - } else { - let managed_allowed_domains = if hard_deny_allowlist_misses { - Some( - requirements - .domains - .as_ref() - .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) - .unwrap_or_default(), - ) - } else { + let managed_allowed_domains = if hard_deny_allowlist_misses { + Some( requirements .domains .as_ref() .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) + .unwrap_or_default(), + ) + } else { + requirements + .domains + .as_ref() + .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) + }; + if let Some(managed_allowed_domains) = managed_allowed_domains { + // Managed requirements seed the baseline allowlist. User additions + // can extend that baseline unless managed-only mode pins the + // effective allowlist to the managed set. + let effective_allowed_domains = if allowlist_expansion_enabled { + Self::merge_domain_lists( + managed_allowed_domains.clone(), + config.network.allowed_domains().as_deref().unwrap_or(&[]), + ) + } else { + managed_allowed_domains.clone() }; - if let Some(managed_allowed_domains) = managed_allowed_domains { - // Managed requirements seed the baseline allowlist. User additions - // can extend that baseline unless managed-only mode pins the - // effective allowlist to the managed set. - let effective_allowed_domains = if allowlist_expansion_enabled { - Self::merge_domain_lists( - managed_allowed_domains.clone(), - config.network.allowed_domains().as_deref().unwrap_or(&[]), - ) - } else { - managed_allowed_domains.clone() - }; - config - .network - .set_allowed_domains(effective_allowed_domains); - constraints.allowed_domains = Some(managed_allowed_domains); - constraints.allowlist_expansion_enabled = Some(allowlist_expansion_enabled); - } + config + .network + .set_allowed_domains(effective_allowed_domains); + constraints.allowed_domains = Some(managed_allowed_domains); + constraints.allowlist_expansion_enabled = Some(allowlist_expansion_enabled); } let managed_denied_domains = requirements .domains @@ -312,7 +302,7 @@ impl NetworkProxySpec { constraints.denied_domains = Some(managed_denied_domains); constraints.denylist_expansion_enabled = Some(denylist_expansion_enabled); } - if requirements.unix_sockets.is_some() && !danger_full_access_denylist_only { + if requirements.unix_sockets.is_some() { let allow_unix_sockets = requirements .unix_sockets .as_ref() @@ -327,14 +317,6 @@ impl NetworkProxySpec { config.network.allow_local_binding = allow_local_binding; constraints.allow_local_binding = Some(allow_local_binding); } - if danger_full_access_denylist_only { - config.network.allow_upstream_proxy = true; - constraints.allow_upstream_proxy = Some(true); - config.network.dangerously_allow_all_unix_sockets = true; - constraints.dangerously_allow_all_unix_sockets = Some(true); - config.network.allow_local_binding = true; - constraints.allow_local_binding = Some(true); - } (config, constraints) } @@ -353,16 +335,6 @@ impl NetworkProxySpec { requirements.managed_allowed_domains_only.unwrap_or(false) } - fn danger_full_access_denylist_only_enabled( - requirements: &NetworkConstraints, - sandbox_policy: &SandboxPolicy, - ) -> bool { - matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) - && requirements - .danger_full_access_denylist_only - .unwrap_or(false) - } - fn denylist_expansion_enabled(sandbox_policy: &SandboxPolicy) -> bool { matches!( sandbox_policy, diff --git a/codex-rs/core/src/config/network_proxy_spec_tests.rs b/codex-rs/core/src/config/network_proxy_spec_tests.rs index ff351d1e73..5ba4bd1536 100644 --- a/codex-rs/core/src/config/network_proxy_spec_tests.rs +++ b/codex-rs/core/src/config/network_proxy_spec_tests.rs @@ -1,11 +1,8 @@ use super::*; use crate::config_loader::NetworkDomainPermissionToml; use crate::config_loader::NetworkDomainPermissionsToml; -use crate::config_loader::NetworkUnixSocketPermissionToml; -use crate::config_loader::NetworkUnixSocketPermissionsToml; use codex_network_proxy::NetworkDomainPermission; use pretty_assertions::assert_eq; -use std::collections::BTreeMap; fn domain_permissions( entries: impl IntoIterator, @@ -183,196 +180,6 @@ fn danger_full_access_keeps_managed_allowlist_and_denylist_fixed() { assert_eq!(spec.constraints.denylist_expansion_enabled, Some(false)); } -#[test] -fn danger_full_access_denylist_only_allows_all_domains_and_enforces_managed_denies() { - let mut config = NetworkProxyConfig::default(); - config - .network - .set_allowed_domains(vec!["evil.com".to_string()]); - config - .network - .set_denied_domains(vec!["more-blocked.example.com".to_string()]); - let requirements = NetworkConstraints { - allow_upstream_proxy: Some(false), - dangerously_allow_all_unix_sockets: Some(false), - domains: Some(domain_permissions([ - ("*.example.com", NetworkDomainPermissionToml::Allow), - ("blocked.example.com", NetworkDomainPermissionToml::Deny), - ])), - danger_full_access_denylist_only: Some(true), - unix_sockets: Some(NetworkUnixSocketPermissionsToml { - entries: BTreeMap::from([( - "/tmp/managed.sock".to_string(), - NetworkUnixSocketPermissionToml::Allow, - )]), - }), - allow_local_binding: Some(false), - ..Default::default() - }; - - let spec = NetworkProxySpec::from_config_and_constraints( - config, - Some(requirements), - &SandboxPolicy::DangerFullAccess, - ) - .expect("denylist-only yolo mode should allow all domains except managed denies"); - - assert_eq!( - spec.config.network.allowed_domains(), - Some(vec!["*".to_string()]) - ); - assert_eq!( - spec.config.network.denied_domains(), - Some(vec!["blocked.example.com".to_string()]) - ); - assert!(spec.config.network.allow_upstream_proxy); - assert!(spec.config.network.dangerously_allow_all_unix_sockets); - assert!(spec.config.network.allow_local_binding); - assert_eq!(spec.constraints.allow_upstream_proxy, Some(true)); - assert_eq!( - spec.constraints.dangerously_allow_all_unix_sockets, - Some(true) - ); - assert_eq!(spec.constraints.allow_unix_sockets, None); - assert_eq!(spec.constraints.allow_local_binding, Some(true)); - assert_eq!(spec.constraints.allowed_domains, None); - assert_eq!(spec.constraints.allowlist_expansion_enabled, None); - assert_eq!( - spec.constraints.denied_domains, - Some(vec!["blocked.example.com".to_string()]) - ); - assert_eq!(spec.constraints.denylist_expansion_enabled, Some(false)); -} - -#[test] -fn danger_full_access_denylist_only_does_not_change_workspace_write_behavior() { - let mut config = NetworkProxyConfig::default(); - config - .network - .set_allowed_domains(vec!["api.example.com".to_string()]); - config - .network - .set_denied_domains(vec!["blocked.example.com".to_string()]); - let requirements = NetworkConstraints { - allow_upstream_proxy: Some(false), - dangerously_allow_all_unix_sockets: Some(false), - domains: Some(domain_permissions([ - ("*.example.com", NetworkDomainPermissionToml::Allow), - ( - "managed-blocked.example.com", - NetworkDomainPermissionToml::Deny, - ), - ])), - danger_full_access_denylist_only: Some(true), - unix_sockets: Some(NetworkUnixSocketPermissionsToml { - entries: BTreeMap::from([( - "/tmp/managed.sock".to_string(), - NetworkUnixSocketPermissionToml::Allow, - )]), - }), - allow_local_binding: Some(false), - ..Default::default() - }; - - let spec = NetworkProxySpec::from_config_and_constraints( - config, - Some(requirements), - &SandboxPolicy::new_workspace_write_policy(), - ) - .expect("denylist-only yolo flag should not affect workspace-write mode"); - - assert_eq!( - spec.config.network.allowed_domains(), - Some(vec![ - "*.example.com".to_string(), - "api.example.com".to_string() - ]) - ); - assert_eq!( - spec.config.network.denied_domains(), - Some(vec![ - "managed-blocked.example.com".to_string(), - "blocked.example.com".to_string() - ]) - ); - assert!(!spec.config.network.allow_upstream_proxy); - assert!(!spec.config.network.dangerously_allow_all_unix_sockets); - assert_eq!( - spec.config.network.allow_unix_sockets(), - vec!["/tmp/managed.sock".to_string()] - ); - assert!(!spec.config.network.allow_local_binding); - assert_eq!(spec.constraints.allow_upstream_proxy, Some(false)); - assert_eq!( - spec.constraints.dangerously_allow_all_unix_sockets, - Some(false) - ); - assert_eq!( - spec.constraints.allow_unix_sockets, - Some(vec!["/tmp/managed.sock".to_string()]) - ); - assert_eq!(spec.constraints.allow_local_binding, Some(false)); - assert_eq!( - spec.constraints.allowed_domains, - Some(vec!["*.example.com".to_string()]) - ); - assert_eq!(spec.constraints.allowlist_expansion_enabled, Some(true)); - assert_eq!( - spec.constraints.denied_domains, - Some(vec!["managed-blocked.example.com".to_string()]) - ); - assert_eq!(spec.constraints.denylist_expansion_enabled, Some(true)); -} - -#[test] -fn recompute_for_sandbox_policy_rebuilds_denylist_only_full_access_policy() { - let requirements = NetworkConstraints { - domains: Some(domain_permissions([( - "blocked.example.com", - NetworkDomainPermissionToml::Deny, - )])), - danger_full_access_denylist_only: Some(true), - ..Default::default() - }; - let spec = NetworkProxySpec::from_config_and_constraints( - NetworkProxyConfig::default(), - Some(requirements), - &SandboxPolicy::new_workspace_write_policy(), - ) - .expect("workspace-write policy should load"); - - assert_eq!(spec.config.network.allowed_domains(), None); - assert_eq!( - spec.config.network.denied_domains(), - Some(vec!["blocked.example.com".to_string()]) - ); - - let spec = spec - .recompute_for_sandbox_policy(&SandboxPolicy::DangerFullAccess) - .expect("full-access policy should load"); - - assert_eq!( - spec.config.network.allowed_domains(), - Some(vec!["*".to_string()]) - ); - assert_eq!( - spec.config.network.denied_domains(), - Some(vec!["blocked.example.com".to_string()]) - ); - assert!(spec.config.network.allow_local_binding); - - let spec = spec - .recompute_for_sandbox_policy(&SandboxPolicy::new_workspace_write_policy()) - .expect("workspace-write policy should reload"); - - assert_eq!(spec.config.network.allowed_domains(), None); - assert_eq!( - spec.config.network.denied_domains(), - Some(vec!["blocked.example.com".to_string()]) - ); - assert!(!spec.config.network.allow_local_binding); -} - #[test] fn managed_allowed_domains_only_disables_default_mode_allowlist_expansion() { let mut config = NetworkProxyConfig::default(); diff --git a/codex-rs/core/src/config/permissions.rs b/codex-rs/core/src/config/permissions.rs index 826d0139bf..943c82c0e9 100644 --- a/codex-rs/core/src/config/permissions.rs +++ b/codex-rs/core/src/config/permissions.rs @@ -5,12 +5,14 @@ use std::path::Path; use std::path::PathBuf; use codex_config::permissions_toml::FilesystemPermissionToml; +use codex_config::permissions_toml::FilesystemPermissionsToml; use codex_config::permissions_toml::NetworkToml; use codex_config::permissions_toml::PermissionProfileToml; use codex_config::permissions_toml::PermissionsToml; use codex_network_proxy::NetworkProxyConfig; #[cfg(test)] use codex_network_proxy::NetworkUnixSocketPermission as ProxyNetworkUnixSocketPermission; +use codex_protocol::permissions::FileSystemAccessMode; use codex_protocol::permissions::FileSystemPath; use codex_protocol::permissions::FileSystemSandboxEntry; use codex_protocol::permissions::FileSystemSandboxPolicy; @@ -42,6 +44,7 @@ pub(crate) fn resolve_permission_profile<'a>( pub(crate) fn compile_permission_profile( permissions: &PermissionsToml, profile_name: &str, + policy_cwd: &Path, startup_warnings: &mut Vec, ) -> io::Result<(FileSystemSandboxPolicy, NetworkSandboxPolicy)> { let profile = resolve_permission_profile(permissions, profile_name)?; @@ -54,8 +57,31 @@ pub(crate) fn compile_permission_profile( missing_filesystem_entries_warning(profile_name), ); } else { + if cfg!(not(target_os = "macos")) { + for pattern in unsupported_read_write_glob_paths(filesystem) { + push_warning( + startup_warnings, + format!( + "Filesystem glob `{pattern}` uses `read` or `write` access, which is not fully supported by this platform's sandboxing. Use an exact path or trailing `/**` subtree rule instead. `none` deny-read globs are supported." + ), + ); + } + for pattern in unbounded_unreadable_globstar_paths(filesystem) { + push_warning( + startup_warnings, + format!( + "Filesystem deny-read glob `{pattern}` uses `**`. Non-macOS sandboxing does not support unbounded `**` natively; set `glob_scan_max_depth` in this filesystem profile to cap Linux glob expansion and silence this warning, or enumerate explicit depths such as `*.env`, `*/*.env`, and `*/*/*.env`." + ), + ); + } + } for (path, permission) in &filesystem.entries { - compile_filesystem_permission(path, permission, &mut entries, startup_warnings)?; + entries.extend(compile_filesystem_permission( + path, + permission, + policy_cwd, + startup_warnings, + )?); } } } else { @@ -64,13 +90,17 @@ pub(crate) fn compile_permission_profile( missing_filesystem_entries_warning(profile_name), ); } + let glob_scan_max_depth = validate_glob_scan_max_depth( + profile + .filesystem + .as_ref() + .and_then(|filesystem| filesystem.glob_scan_max_depth), + )?; let network_sandbox_policy = compile_network_sandbox_policy(profile.network.as_ref()); - - Ok(( - FileSystemSandboxPolicy::restricted(entries), - network_sandbox_policy, - )) + let mut file_system_sandbox_policy = FileSystemSandboxPolicy::restricted(entries); + file_system_sandbox_policy.glob_scan_max_depth = glob_scan_max_depth; + Ok((file_system_sandbox_policy, network_sandbox_policy)) } /// Returns a list of paths that must be readable by shell tools in order @@ -118,24 +148,71 @@ fn compile_network_sandbox_policy(network: Option<&NetworkToml>) -> NetworkSandb fn compile_filesystem_permission( path: &str, permission: &FilesystemPermissionToml, - entries: &mut Vec, + policy_cwd: &Path, startup_warnings: &mut Vec, -) -> io::Result<()> { +) -> io::Result> { + let mut entries = Vec::new(); match permission { - FilesystemPermissionToml::Access(access) => entries.push(FileSystemSandboxEntry { - path: compile_filesystem_path(path, startup_warnings)?, - access: *access, - }), + FilesystemPermissionToml::Access(access) => { + entries.push(FileSystemSandboxEntry { + path: compile_filesystem_access_path(path, *access, startup_warnings)?, + access: *access, + }); + } FilesystemPermissionToml::Scoped(scoped_entries) => { for (subpath, access) in scoped_entries { - entries.push(FileSystemSandboxEntry { - path: compile_scoped_filesystem_path(path, subpath, startup_warnings)?, - access: *access, - }); + let has_glob = contains_glob_chars(subpath); + let can_compile_as_pattern = match parse_special_path(path) { + Some(FileSystemSpecialPath::ProjectRoots { .. }) | None => true, + Some(_) => false, + }; + if has_glob && *access == FileSystemAccessMode::None && can_compile_as_pattern { + // Scoped glob syntax is a first-class filesystem policy + // pattern entry. Literal scoped paths continue through the + // exact-path parser so existing path semantics stay intact. + let entry = FileSystemSandboxEntry { + path: FileSystemPath::GlobPattern { + pattern: compile_scoped_filesystem_pattern( + path, subpath, *access, policy_cwd, + )?, + }, + access: *access, + }; + entries.push(entry); + } else { + let subpath = compile_read_write_glob_path(subpath, *access)?; + entries.push(FileSystemSandboxEntry { + path: compile_scoped_filesystem_path(path, subpath, startup_warnings)?, + access: *access, + }); + } } } } - Ok(()) + Ok(entries) +} + +fn compile_filesystem_access_path( + path: &str, + access: FileSystemAccessMode, + startup_warnings: &mut Vec, +) -> io::Result { + if !contains_glob_chars(path) { + return compile_filesystem_path(path, startup_warnings); + } + + if access == FileSystemAccessMode::None { + // At this point `path` is an unscoped filesystem table key. Top-level + // glob deny entries still go through the absolute-path parser before + // becoming policy patterns; relative project-root glob syntax is + // handled by `compile_scoped_filesystem_pattern`. + return Ok(FileSystemPath::GlobPattern { + pattern: parse_absolute_path(path)?.to_string_lossy().into_owned(), + }); + } + + let path = compile_read_write_glob_path(path, access)?; + compile_filesystem_path(path, startup_warnings) } fn compile_filesystem_path( @@ -186,6 +263,133 @@ fn compile_scoped_filesystem_path( Ok(FileSystemPath::Path { path }) } +fn compile_scoped_filesystem_pattern( + path: &str, + subpath: &str, + access: FileSystemAccessMode, + policy_cwd: &Path, +) -> io::Result { + // Pattern entries currently mean deny-read only. Supporting broader access + // modes here would imply glob-based read/write allow semantics that the + // sandbox policy does not express yet. + if access != FileSystemAccessMode::None { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("filesystem glob subpath `{subpath}` only supports `none` access"), + )); + } + let subpath = parse_relative_subpath(subpath)?; + + match parse_special_path(path) { + Some(FileSystemSpecialPath::ProjectRoots { .. }) => { + // `:project_roots` is represented as a special path, but current + // filesystem-policy resolution defines it relative to the session + // cwd. Use the same policy cwd here so glob entries and exact + // scoped entries resolve consistently. + Ok( + AbsolutePathBuf::resolve_path_against_base(&subpath, policy_cwd) + .to_string_lossy() + .to_string(), + ) + } + Some(_) => Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("filesystem path `{path}` does not support nested entries"), + )), + None => { + let base = parse_absolute_path(path)?; + Ok(base.join(&subpath).to_string_lossy().to_string()) + } + } +} + +fn compile_read_write_glob_path(path: &str, access: FileSystemAccessMode) -> io::Result<&str> { + if !contains_glob_chars(path) { + return Ok(path); + } + + let path_without_trailing_glob = remove_trailing_glob_suffix(path); + if !contains_glob_chars(path_without_trailing_glob) { + return Ok(path_without_trailing_glob); + } + + Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!( + "filesystem glob path `{path}` only supports `none` access; use an exact path or trailing `/**` for `{access}` subtree access" + ), + )) +} + +fn unsupported_read_write_glob_paths(filesystem: &FilesystemPermissionsToml) -> Vec { + let mut patterns = Vec::new(); + for (path, permission) in &filesystem.entries { + match permission { + FilesystemPermissionToml::Access(access) => { + if *access != FileSystemAccessMode::None + && contains_glob_chars(remove_trailing_glob_suffix(path)) + { + patterns.push(path.clone()); + } + } + FilesystemPermissionToml::Scoped(scoped_entries) => { + for (subpath, access) in scoped_entries { + if *access != FileSystemAccessMode::None + && contains_glob_chars(remove_trailing_glob_suffix(subpath)) + { + patterns.push(format!("{path}/{subpath}")); + } + } + } + } + } + patterns +} + +fn unbounded_unreadable_globstar_paths(filesystem: &FilesystemPermissionsToml) -> Vec { + if filesystem.glob_scan_max_depth.is_some() { + return Vec::new(); + } + + let mut patterns = Vec::new(); + for (path, permission) in &filesystem.entries { + match permission { + FilesystemPermissionToml::Access(FileSystemAccessMode::None) => { + if path.contains("**") { + patterns.push(path.clone()); + } + } + FilesystemPermissionToml::Access(_) => {} + FilesystemPermissionToml::Scoped(scoped_entries) => { + for (subpath, access) in scoped_entries { + if *access == FileSystemAccessMode::None && subpath.contains("**") { + patterns.push(format!("{path}/{subpath}")); + } + } + } + } + } + patterns +} + +fn validate_glob_scan_max_depth(max_depth: Option) -> io::Result> { + match max_depth { + Some(0) => Err(io::Error::new( + io::ErrorKind::InvalidInput, + "glob_scan_max_depth must be at least 1", + )), + _ => Ok(max_depth), + } +} + +fn contains_glob_chars(path: &str) -> bool { + path.chars().any(|ch| matches!(ch, '*' | '?' | '[' | ']')) +} + +fn remove_trailing_glob_suffix(path: &str) -> &str { + path.strip_suffix("/**").unwrap_or(path) +} + // WARNING: keep this parser forward-compatible. // Adding a new `:special_path` must not make older Codex versions reject the // config. Unknown values intentionally round-trip through diff --git a/codex-rs/core/src/config/permissions_tests.rs b/codex-rs/core/src/config/permissions_tests.rs index e9191dbfa7..e22376b214 100644 --- a/codex-rs/core/src/config/permissions_tests.rs +++ b/codex-rs/core/src/config/permissions_tests.rs @@ -2,6 +2,7 @@ use super::*; use crate::config::Config; use crate::config::ConfigOverrides; use codex_config::config_toml::ConfigToml; +use codex_config::permissions_toml::FilesystemPermissionToml; use codex_config::permissions_toml::FilesystemPermissionsToml; use codex_config::permissions_toml::NetworkDomainPermissionToml; use codex_config::permissions_toml::NetworkDomainPermissionsToml; @@ -10,6 +11,11 @@ use codex_config::permissions_toml::NetworkUnixSocketPermissionToml; use codex_config::permissions_toml::NetworkUnixSocketPermissionsToml; use codex_config::permissions_toml::PermissionProfileToml; use codex_config::permissions_toml::PermissionsToml; +use codex_protocol::permissions::FileSystemAccessMode; +use codex_protocol::permissions::FileSystemPath; +use codex_protocol::permissions::FileSystemSandboxEntry; +use codex_protocol::permissions::FileSystemSandboxPolicy; +use codex_protocol::permissions::FileSystemSpecialPath; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use std::collections::BTreeMap; @@ -24,8 +30,8 @@ fn normalize_absolute_path_for_platform_simplifies_windows_verbatim_paths() { assert_eq!(parsed, PathBuf::from(r"D:\c\x\worktrees\2508\swift-base")); } -#[test] -fn restricted_read_implicitly_allows_helper_executables() -> std::io::Result<()> { +#[tokio::test] +async fn restricted_read_implicitly_allows_helper_executables() -> std::io::Result<()> { let temp_dir = TempDir::new()?; let cwd = temp_dir.path().join("workspace"); let codex_home = temp_dir.path().join(".codex"); @@ -49,6 +55,7 @@ fn restricted_read_implicitly_allows_helper_executables() -> std::io::Result<()> "workspace".to_string(), PermissionProfileToml { filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, entries: BTreeMap::new(), }), network: None, @@ -63,8 +70,9 @@ fn restricted_read_implicitly_allows_helper_executables() -> std::io::Result<()> main_execve_wrapper_exe: Some(execve_wrapper), ..Default::default() }, - codex_home, - )?; + AbsolutePathBuf::from_absolute_path(&codex_home)?, + ) + .await?; let expected_zsh = AbsolutePathBuf::try_from(zsh_path)?; let expected_allowed_arg0_dir = AbsolutePathBuf::try_from(allowed_arg0_dir)?; @@ -215,3 +223,132 @@ fn network_toml_overlays_unix_socket_permissions_by_path() { }) ); } + +#[test] +fn read_write_glob_warnings_skip_supported_deny_read_globs_and_trailing_subpaths() { + let filesystem = FilesystemPermissionsToml { + glob_scan_max_depth: None, + entries: BTreeMap::from([ + ( + "/tmp/**/*.log".to_string(), + FilesystemPermissionToml::Access(FileSystemAccessMode::Read), + ), + ( + "/tmp/cache/**".to_string(), + FilesystemPermissionToml::Access(FileSystemAccessMode::Write), + ), + ( + ":project_roots".to_string(), + FilesystemPermissionToml::Scoped(BTreeMap::from([ + ("**/*.env".to_string(), FileSystemAccessMode::None), + ("docs/**".to_string(), FileSystemAccessMode::Read), + ("src/**/*.rs".to_string(), FileSystemAccessMode::Write), + ])), + ), + ]), + }; + + assert_eq!( + unsupported_read_write_glob_paths(&filesystem), + vec![ + "/tmp/**/*.log".to_string(), + ":project_roots/src/**/*.rs".to_string() + ], + "`none` glob patterns are supported as deny-read rules; only `read`/`write` globs should warn" + ); +} + +#[test] +fn unreadable_globstar_warning_is_suppressed_when_scan_depth_is_configured() { + let filesystem = FilesystemPermissionsToml { + glob_scan_max_depth: None, + entries: BTreeMap::from([( + ":project_roots".to_string(), + FilesystemPermissionToml::Scoped(BTreeMap::from([ + ("**/*.env".to_string(), FileSystemAccessMode::None), + ("*.pem".to_string(), FileSystemAccessMode::None), + ])), + )]), + }; + + assert_eq!( + unbounded_unreadable_globstar_paths(&filesystem), + vec![":project_roots/**/*.env".to_string()] + ); + + let configured_filesystem = FilesystemPermissionsToml { + glob_scan_max_depth: Some(2), + ..filesystem + }; + assert_eq!( + unbounded_unreadable_globstar_paths(&configured_filesystem), + Vec::::new() + ); +} + +#[test] +fn glob_scan_max_depth_must_be_positive() { + let err = validate_glob_scan_max_depth(Some(0)) + .expect_err("zero depth would silently skip deny-read glob expansion"); + + assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); + assert_eq!(err.to_string(), "glob_scan_max_depth must be at least 1"); + assert_eq!( + validate_glob_scan_max_depth(Some(2)).expect("depth should be valid"), + Some(2) + ); +} + +#[test] +fn read_write_trailing_glob_suffix_compiles_as_subpath() -> std::io::Result<()> { + let cwd = TempDir::new()?; + let mut startup_warnings = Vec::new(); + let (file_system_policy, _) = compile_permission_profile( + &PermissionsToml { + entries: BTreeMap::from([( + "workspace".to_string(), + PermissionProfileToml { + filesystem: Some(FilesystemPermissionsToml { + glob_scan_max_depth: None, + entries: BTreeMap::from([( + ":project_roots".to_string(), + FilesystemPermissionToml::Scoped(BTreeMap::from([( + "docs/**".to_string(), + FileSystemAccessMode::Read, + )])), + )]), + }), + network: None, + }, + )]), + }, + "workspace", + cwd.path(), + &mut startup_warnings, + )?; + + assert_eq!( + file_system_policy, + FileSystemSandboxPolicy::restricted(vec![FileSystemSandboxEntry { + path: FileSystemPath::Special { + value: FileSystemSpecialPath::project_roots(Some("docs".into())), + }, + access: FileSystemAccessMode::Read, + }]), + "trailing /** should compile as a subtree path instead of a glob pattern" + ); + Ok(()) +} + +#[test] +fn read_write_glob_patterns_still_reject_non_subpath_globs() { + let err = compile_read_write_glob_path("src/**/*.rs", FileSystemAccessMode::Read) + .expect_err("non-subpath read/write glob should be rejected"); + + assert_eq!(err.kind(), std::io::ErrorKind::InvalidInput); + assert!( + err.to_string() + .contains("filesystem glob path `src/**/*.rs` only supports `none` access"), + "{err}" + ); +} diff --git a/codex-rs/core/src/config/service.rs b/codex-rs/core/src/config/service.rs index 6c7f071f2b..51288e60b9 100644 --- a/codex-rs/core/src/config/service.rs +++ b/codex-rs/core/src/config/service.rs @@ -29,6 +29,7 @@ use codex_app_server_protocol::OverriddenMetadata; use codex_app_server_protocol::WriteStatus; use codex_config::CONFIG_TOML_FILE; use codex_config::config_toml::ConfigToml; +use codex_exec_server::LOCAL_FS; use codex_utils_absolute_path::AbsolutePathBuf; use serde_json::Value as JsonValue; use std::borrow::Cow; @@ -424,6 +425,7 @@ impl ConfigService { async fn load_thread_agnostic_config(&self) -> std::io::Result { let cwd: Option = None; load_config_layers_state( + LOCAL_FS.as_ref(), &self.codex_home, cwd, &self.cli_overrides, diff --git a/codex-rs/core/src/config/service_tests.rs b/codex-rs/core/src/config/service_tests.rs index ae62326605..faaffb34df 100644 --- a/codex-rs/core/src/config/service_tests.rs +++ b/codex-rs/core/src/config/service_tests.rs @@ -164,6 +164,49 @@ async fn write_value_supports_nested_app_paths() -> Result<()> { Ok(()) } +#[tokio::test] +async fn write_value_supports_custom_mcp_server_default_tool_approval_mode() -> Result<()> { + let tmp = tempdir().expect("tempdir"); + std::fs::write( + tmp.path().join(CONFIG_TOML_FILE), + "[mcp_servers.docs]\ncommand = \"docs-server\"\n", + )?; + + let service = ConfigService::without_managed_config_for_tests(tmp.path().to_path_buf()); + service + .write_value(ConfigValueWriteParams { + file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()), + key_path: "mcp_servers.docs.default_tools_approval_mode".to_string(), + value: serde_json::json!("approve"), + merge_strategy: MergeStrategy::Replace, + expected_version: None, + }) + .await + .expect("write mcp server default_tools_approval_mode succeeds"); + + let contents = std::fs::read_to_string(tmp.path().join(CONFIG_TOML_FILE))?; + assert!(contents.contains("default_tools_approval_mode = \"approve\"")); + + let read = service + .read(ConfigReadParams { + include_layers: false, + cwd: None, + }) + .await + .expect("config read succeeds"); + + assert_eq!( + read.config + .additional + .get("mcp_servers") + .and_then(|servers| servers.get("docs")) + .and_then(|docs| docs.get("default_tools_approval_mode")), + Some(&serde_json::json!("approve")) + ); + + Ok(()) +} + #[tokio::test] async fn read_includes_origins_and_layers() { let tmp = tempdir().expect("tempdir"); diff --git a/codex-rs/core/src/config_loader/README.md b/codex-rs/core/src/config_loader/README.md index 04b72e4ca1..44a514a10a 100644 --- a/codex-rs/core/src/config_loader/README.md +++ b/codex-rs/core/src/config_loader/README.md @@ -10,7 +10,7 @@ This module is the canonical place to **load and describe Codex configuration la Exported from `codex_core::config_loader`: -- `load_config_layers_state(codex_home, cwd_opt, cli_overrides, overrides, cloud_requirements) -> ConfigLayerStack` +- `load_config_layers_state(fs, codex_home, cwd_opt, cli_overrides, overrides, cloud_requirements) -> ConfigLayerStack` - `ConfigLayerStack` - `effective_config() -> toml::Value` - `origins() -> HashMap` @@ -38,18 +38,22 @@ computing the effective config and origins metadata. This is what Most callers want the effective config plus metadata: ```rust -use codex_core::config_loader::{load_config_layers_state, LoaderOverrides}; +use codex_core::config_loader::{ + CloudRequirementsLoader, LoaderOverrides, load_config_layers_state, +}; +use codex_exec_server::LOCAL_FS; use codex_utils_absolute_path::AbsolutePathBuf; use toml::Value as TomlValue; let cli_overrides: Vec<(String, TomlValue)> = Vec::new(); let cwd = AbsolutePathBuf::current_dir()?; let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &cli_overrides, LoaderOverrides::default(), - None, + CloudRequirementsLoader::default(), ).await?; let effective = layers.effective_config(); diff --git a/codex-rs/core/src/config_loader/layer_io.rs b/codex-rs/core/src/config_loader/layer_io.rs index af77bdafa5..6bd9a9130f 100644 --- a/codex-rs/core/src/config_loader/layer_io.rs +++ b/codex-rs/core/src/config_loader/layer_io.rs @@ -5,11 +5,11 @@ use super::macos::ManagedAdminConfigLayer; use super::macos::load_managed_admin_config_layer; use codex_config::config_error_from_toml; use codex_config::io_error_from_config_error; +use codex_exec_server::ExecutorFileSystem; use codex_utils_absolute_path::AbsolutePathBuf; use std::io; use std::path::Path; use std::path::PathBuf; -use tokio::fs; use toml::Value as TomlValue; #[cfg(unix)] @@ -36,6 +36,7 @@ pub(super) struct LoadedConfigLayers { } pub(super) async fn load_config_layers_internal( + fs: &dyn ExecutorFileSystem, codex_home: &Path, overrides: LoaderOverrides, ) -> io::Result { @@ -57,7 +58,7 @@ pub(super) async fn load_config_layers_internal( )?; let managed_config = - read_config_from_path(&managed_config_path, /*log_missing_as_info*/ false) + read_config_from_path(fs, &managed_config_path, /*log_missing_as_info*/ false) .await? .map(|managed_config| MangedConfigFromFile { managed_config, @@ -89,15 +90,16 @@ fn map_managed_admin_layer(layer: ManagedAdminConfigLayer) -> ManagedConfigFromM } pub(super) async fn read_config_from_path( - path: impl AsRef, + fs: &dyn ExecutorFileSystem, + path: &AbsolutePathBuf, log_missing_as_info: bool, ) -> io::Result> { - match fs::read_to_string(path.as_ref()).await { + match fs.read_file_text(path, /*sandbox*/ None).await { Ok(contents) => match toml::from_str::(&contents) { Ok(value) => Ok(Some(value)), Err(err) => { - tracing::error!("Failed to parse {}: {err}", path.as_ref().display()); - let config_error = config_error_from_toml(path.as_ref(), &contents, err.clone()); + tracing::error!("Failed to parse {}: {err}", path.as_path().display()); + let config_error = config_error_from_toml(path.as_path(), &contents, err.clone()); Err(io_error_from_config_error( io::ErrorKind::InvalidData, config_error, @@ -107,14 +109,14 @@ pub(super) async fn read_config_from_path( }, Err(err) if err.kind() == io::ErrorKind::NotFound => { if log_missing_as_info { - tracing::info!("{} not found, using defaults", path.as_ref().display()); + tracing::info!("{} not found, using defaults", path.as_path().display()); } else { - tracing::debug!("{} not found", path.as_ref().display()); + tracing::debug!("{} not found", path.as_path().display()); } Ok(None) } Err(err) => { - tracing::error!("Failed to read {}: {err}", path.as_ref().display()); + tracing::error!("Failed to read {}: {err}", path.as_path().display()); Err(err) } } diff --git a/codex-rs/core/src/config_loader/mod.rs b/codex-rs/core/src/config_loader/mod.rs index 36fc956bf4..fb02e670ef 100644 --- a/codex-rs/core/src/config_loader/mod.rs +++ b/codex-rs/core/src/config_loader/mod.rs @@ -11,6 +11,7 @@ use codex_config::CONFIG_TOML_FILE; use codex_config::ConfigRequirementsWithSources; use codex_config::config_toml::ConfigToml; use codex_config::config_toml::ProjectConfig; +use codex_exec_server::ExecutorFileSystem; use codex_git_utils::resolve_root_git_project_for_trust; use codex_protocol::config_types::ApprovalsReviewer; use codex_protocol::config_types::SandboxMode; @@ -118,6 +119,7 @@ pub(crate) async fn first_layer_config_error_from_entries( /// thread-agnostic config loading (e.g., for the app server's `/config` /// endpoint) should `cwd` be `None`. pub async fn load_config_layers_state( + fs: &dyn ExecutorFileSystem, codex_home: &Path, cwd: Option, cli_overrides: &[(String, TomlValue)], @@ -142,11 +144,12 @@ pub async fn load_config_layers_state( // Honor the system requirements.toml location. let requirements_toml_file = system_requirements_toml_file()?; - load_requirements_toml(&mut config_requirements_toml, requirements_toml_file).await?; + load_requirements_toml(fs, &mut config_requirements_toml, &requirements_toml_file).await?; // Make a best-effort to support the legacy `managed_config.toml` as a // requirements specification. - let loaded_config_layers = layer_io::load_config_layers_internal(codex_home, overrides).await?; + let loaded_config_layers = + layer_io::load_config_layers_internal(fs, codex_home, overrides).await?; load_requirements_from_legacy_scheme( &mut config_requirements_toml, loaded_config_layers.clone(), @@ -173,7 +176,7 @@ pub async fn load_config_layers_state( // if it exists. let system_config_toml_file = system_config_toml_file()?; let system_layer = - load_config_toml_for_required_layer(&system_config_toml_file, |config_toml| { + load_config_toml_for_required_layer(fs, &system_config_toml_file, |config_toml| { ConfigLayerEntry::new( ConfigLayerSource::System { file: system_config_toml_file.clone(), @@ -188,7 +191,7 @@ pub async fn load_config_layers_state( // exists, but is malformed, then this error should be propagated to the // user. let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home); - let user_layer = load_config_toml_for_required_layer(&user_file, |config_toml| { + let user_layer = load_config_toml_for_required_layer(fs, &user_file, |config_toml| { ConfigLayerEntry::new( ConfigLayerSource::User { file: user_file.clone(), @@ -222,6 +225,7 @@ pub async fn load_config_layers_state( } }; let project_trust_context = match project_trust_context( + fs, &merged_so_far, &cwd, &project_root_markers, @@ -247,6 +251,7 @@ pub async fn load_config_layers_state( } }; let project_layers = load_project_layers( + fs, &cwd, &project_trust_context.project_root, &project_trust_context, @@ -320,22 +325,23 @@ pub async fn load_config_layers_state( /// - If there is an error reading the file or parsing the TOML, returns an /// error. async fn load_config_toml_for_required_layer( - config_toml: impl AsRef, + fs: &dyn ExecutorFileSystem, + toml_file: &AbsolutePathBuf, create_entry: impl FnOnce(TomlValue) -> ConfigLayerEntry, ) -> io::Result { - let toml_file = config_toml.as_ref(); - let toml_value = match tokio::fs::read_to_string(toml_file).await { + let toml_value = match fs.read_file_text(toml_file, /*sandbox*/ None).await { Ok(contents) => { let config: TomlValue = toml::from_str(&contents).map_err(|err| { - let config_error = config_error_from_toml(toml_file, &contents, err.clone()); + let config_error = + config_error_from_toml(toml_file.as_path(), &contents, err.clone()); io_error_from_config_error(io::ErrorKind::InvalidData, config_error, Some(err)) })?; - let config_parent = toml_file.parent().ok_or_else(|| { + let config_parent = toml_file.as_path().parent().ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!( "Config file {} has no parent directory", - toml_file.display() + toml_file.as_path().display() ), ) })?; @@ -347,7 +353,10 @@ async fn load_config_toml_for_required_layer( } else { Err(io::Error::new( e.kind(), - format!("Failed to read config file {}: {e}", toml_file.display()), + format!( + "Failed to read config file {}: {e}", + toml_file.as_path().display() + ), )) } } @@ -360,12 +369,14 @@ async fn load_config_toml_for_required_layer( /// `requirements.toml` location to `config_requirements_toml` by filling in /// any unset fields. async fn load_requirements_toml( + fs: &dyn ExecutorFileSystem, config_requirements_toml: &mut ConfigRequirementsWithSources, - requirements_toml_file: impl AsRef, + requirements_toml_file: &AbsolutePathBuf, ) -> io::Result<()> { - let requirements_toml_file = - AbsolutePathBuf::from_absolute_path(requirements_toml_file.as_ref())?; - match tokio::fs::read_to_string(&requirements_toml_file).await { + match fs + .read_file_text(requirements_toml_file, /*sandbox*/ None) + .await + { Ok(contents) => { let requirements_config: ConfigRequirementsToml = toml::from_str(&contents).map_err(|e| { @@ -373,7 +384,7 @@ async fn load_requirements_toml( io::ErrorKind::InvalidData, format!( "Error parsing requirements file {}: {e}", - requirements_toml_file.as_ref().display(), + requirements_toml_file.as_path().display(), ), ) })?; @@ -390,7 +401,7 @@ async fn load_requirements_toml( e.kind(), format!( "Failed to read requirements file {}: {e}", - requirements_toml_file.as_ref().display(), + requirements_toml_file.as_path().display(), ), )); } @@ -632,6 +643,7 @@ fn project_layer_entry( } async fn project_trust_context( + fs: &dyn ExecutorFileSystem, merged_config: &TomlValue, cwd: &AbsolutePathBuf, project_root_markers: &[String], @@ -646,12 +658,14 @@ async fn project_trust_context( .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))? }; - let project_root = find_project_root(cwd, project_root_markers).await?; + let project_root = find_project_root(fs, cwd, project_root_markers).await?; let projects = project_trust_config.projects.unwrap_or_default(); let project_root_key = project_trust_key(project_root.as_path()); - let repo_root = resolve_root_git_project_for_trust(cwd.as_path()); - let repo_root_key = repo_root.as_ref().map(|root| project_trust_key(root)); + let repo_root = resolve_root_git_project_for_trust(fs, cwd).await; + let repo_root_key = repo_root + .as_ref() + .map(|root| project_trust_key(root.as_path())); let projects_trust = projects .into_iter() @@ -742,6 +756,7 @@ fn copy_shape_from_original(original: &TomlValue, resolved: &TomlValue) -> TomlV } async fn find_project_root( + fs: &dyn ExecutorFileSystem, cwd: &AbsolutePathBuf, project_root_markers: &[String], ) -> io::Result { @@ -749,11 +764,15 @@ async fn find_project_root( return Ok(cwd.clone()); } - for ancestor in cwd.as_path().ancestors() { + for ancestor in cwd.ancestors() { for marker in project_root_markers { let marker_path = ancestor.join(marker); - if tokio::fs::metadata(&marker_path).await.is_ok() { - return AbsolutePathBuf::from_absolute_path(ancestor); + if fs + .get_metadata(&marker_path, /*sandbox*/ None) + .await + .is_ok() + { + return Ok(ancestor); } } } @@ -766,6 +785,7 @@ async fn find_project_root( /// starting from folders closest to `project_root` (which is the lowest /// precedence) to those closest to `cwd` (which is the highest precedence). async fn load_project_layers( + fs: &dyn ExecutorFileSystem, cwd: &AbsolutePathBuf, project_root: &AbsolutePathBuf, trust_context: &ProjectTrustContext, @@ -775,13 +795,12 @@ async fn load_project_layers( let codex_home_normalized = normalize_path(codex_home_abs.as_path()).unwrap_or_else(|_| codex_home_abs.to_path_buf()); let mut dirs = cwd - .as_path() .ancestors() .scan(false, |done, a| { if *done { None } else { - if a == project_root.as_path() { + if &a == project_root { *done = true; } Some(a) @@ -792,25 +811,24 @@ async fn load_project_layers( let mut layers = Vec::new(); for dir in dirs { - let dot_codex = dir.join(".codex"); - if !tokio::fs::metadata(&dot_codex) + let dot_codex_abs = dir.join(".codex"); + if !fs + .get_metadata(&dot_codex_abs, /*sandbox*/ None) .await - .map(|meta| meta.is_dir()) + .map(|metadata| metadata.is_directory) .unwrap_or(false) { continue; } - let layer_dir = AbsolutePathBuf::from_absolute_path(dir)?; - let decision = trust_context.decision_for_dir(&layer_dir); - let dot_codex_abs = AbsolutePathBuf::from_absolute_path(&dot_codex)?; + let decision = trust_context.decision_for_dir(&dir); let dot_codex_normalized = normalize_path(dot_codex_abs.as_path()).unwrap_or_else(|_| dot_codex_abs.to_path_buf()); if dot_codex_abs == codex_home_abs || dot_codex_normalized == codex_home_normalized { continue; } let config_file = dot_codex_abs.join(CONFIG_TOML_FILE); - match tokio::fs::read_to_string(&config_file).await { + match fs.read_file_text(&config_file, /*sandbox*/ None).await { Ok(contents) => { let config: TomlValue = match toml::from_str(&contents) { Ok(config) => config, @@ -827,7 +845,7 @@ async fn load_project_layers( layers.push(project_layer_entry( trust_context, &dot_codex_abs, - &layer_dir, + &dir, TomlValue::Table(toml::map::Map::new()), /*config_toml_exists*/ true, )); @@ -839,7 +857,7 @@ async fn load_project_layers( let entry = project_layer_entry( trust_context, &dot_codex_abs, - &layer_dir, + &dir, config, /*config_toml_exists*/ true, ); @@ -853,7 +871,7 @@ async fn load_project_layers( layers.push(project_layer_entry( trust_context, &dot_codex_abs, - &layer_dir, + &dir, TomlValue::Table(toml::map::Map::new()), /*config_toml_exists*/ false, )); diff --git a/codex-rs/core/src/config_loader/tests.rs b/codex-rs/core/src/config_loader/tests.rs index ed69682e86..c706e91062 100644 --- a/codex-rs/core/src/config_loader/tests.rs +++ b/codex-rs/core/src/config_loader/tests.rs @@ -16,6 +16,7 @@ use crate::config_loader::version_for_toml; use codex_config::CONFIG_TOML_FILE; use codex_config::config_toml::ConfigToml; use codex_config::config_toml::ProjectConfig; +use codex_exec_server::LOCAL_FS; use codex_protocol::config_types::TrustLevel; use codex_protocol::config_types::WebSearchMode; use codex_protocol::protocol::AskForApproval; @@ -92,6 +93,7 @@ async fn returns_config_error_for_invalid_user_config_toml() { let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let err = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], @@ -119,6 +121,7 @@ async fn returns_config_error_for_invalid_managed_config_toml() { let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let err = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], @@ -203,6 +206,7 @@ extra = true let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let state = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], @@ -235,6 +239,7 @@ async fn returns_empty_when_all_layers_missing() { let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let layers = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], @@ -327,6 +332,7 @@ flag = false let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let state = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], @@ -428,6 +434,7 @@ allowed_sandbox_modes = ["read-only"] ); let state = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(AbsolutePathBuf::try_from(tmp.path())?), &[] as &[(String, TomlValue)], @@ -489,6 +496,7 @@ allowed_approval_policies = ["never"] ); let state = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(AbsolutePathBuf::try_from(tmp.path())?), &[] as &[(String, TomlValue)], @@ -529,8 +537,14 @@ personality = true ) .await?; + let requirements_file = AbsolutePathBuf::try_from(requirements_file)?; let mut config_requirements_toml = ConfigRequirementsWithSources::default(); - load_requirements_toml(&mut config_requirements_toml, &requirements_file).await?; + load_requirements_toml( + LOCAL_FS.as_ref(), + &mut config_requirements_toml, + &requirements_file, + ) + .await?; assert_eq!( config_requirements_toml @@ -620,6 +634,7 @@ allowed_approval_policies = ["on-request"] ), ); let state = load_config_layers_state( + LOCAL_FS.as_ref(), tmp.path(), Some(AbsolutePathBuf::try_from(tmp.path())?), &[] as &[(String, TomlValue)], @@ -691,7 +706,12 @@ allowed_approval_policies = ["on-request"] guardian_policy_config: None, }, ); - load_requirements_toml(&mut config_requirements_toml, &requirements_file).await?; + load_requirements_toml( + LOCAL_FS.as_ref(), + &mut config_requirements_toml, + &AbsolutePathBuf::try_from(requirements_file)?, + ) + .await?; assert_eq!( config_requirements_toml @@ -735,6 +755,7 @@ async fn load_config_layers_includes_cloud_requirements() -> anyhow::Result<()> let cloud_requirements = CloudRequirementsLoader::new(async move { Ok(Some(requirements)) }); let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &[] as &[(String, TomlValue)], @@ -771,6 +792,7 @@ async fn load_config_layers_fails_when_cloud_requirements_loader_fails() -> anyh let cwd = AbsolutePathBuf::from_absolute_path(tmp.path())?; let err = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &[] as &[(String, TomlValue)], @@ -823,6 +845,7 @@ async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> { .await?; let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &[] as &[(String, TomlValue)], @@ -967,6 +990,7 @@ async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> s .await?; let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &[] as &[(String, TomlValue)], @@ -1006,6 +1030,7 @@ async fn codex_home_is_not_loaded_as_project_layer_from_home_dir() -> std::io::R let cwd = AbsolutePathBuf::from_absolute_path(&home_dir)?; let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &[] as &[(String, TomlValue)], @@ -1062,6 +1087,7 @@ async fn codex_home_within_project_tree_is_not_double_loaded() -> std::io::Resul let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &project_dot_codex, Some(cwd), &[] as &[(String, TomlValue)], @@ -1132,6 +1158,7 @@ async fn project_layers_disabled_when_untrusted_or_unknown() -> std::io::Result< .await?; let layers_untrusted = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home_untrusted, Some(cwd.clone()), &[] as &[(String, TomlValue)], @@ -1170,6 +1197,7 @@ async fn project_layers_disabled_when_untrusted_or_unknown() -> std::io::Result< .await?; let layers_unknown = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home_unknown, Some(cwd), &[] as &[(String, TomlValue)], @@ -1328,6 +1356,7 @@ async fn invalid_project_config_ignored_when_untrusted_or_unknown() -> std::io:: } let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd.clone()), &[] as &[(String, TomlValue)], @@ -1390,6 +1419,7 @@ async fn cli_overrides_with_relative_paths_do_not_break_trust_check() -> std::io )]; load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &cli_overrides, @@ -1432,6 +1462,7 @@ async fn project_root_markers_supports_alternate_markers() -> std::io::Result<() let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; let layers = load_config_layers_state( + LOCAL_FS.as_ref(), &codex_home, Some(cwd), &[] as &[(String, TomlValue)], diff --git a/codex-rs/core/src/connectors.rs b/codex-rs/core/src/connectors.rs index ad3447eaf3..103ce251fc 100644 --- a/codex-rs/core/src/connectors.rs +++ b/codex-rs/core/src/connectors.rs @@ -1,8 +1,5 @@ -use std::collections::BTreeSet; use std::collections::HashMap; use std::collections::HashSet; -use std::env; -use std::path::PathBuf; use std::sync::Arc; use std::sync::LazyLock; use std::sync::Mutex as StdMutex; @@ -28,7 +25,6 @@ use crate::codex::INITIAL_SUBMIT_ID; use crate::config::Config; use crate::config_loader::AppsRequirementsToml; use crate::mcp::McpManager; -use crate::plugins::AppConnectorId; use crate::plugins::PluginsManager; use crate::plugins::list_tool_suggest_discoverable_plugins; use codex_config::types::AppToolApproval; @@ -38,18 +34,15 @@ use codex_features::Feature; use codex_login::AuthManager; use codex_login::CodexAuth; use codex_login::default_client::create_client; -use codex_login::default_client::is_first_party_chat_originator; use codex_login::default_client::originator; use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; use codex_mcp::McpConnectionManager; -use codex_mcp::SandboxState; use codex_mcp::ToolInfo; use codex_mcp::ToolPluginProvenance; use codex_mcp::codex_apps_tools_cache_key; use codex_mcp::compute_auth_statuses; use codex_mcp::with_codex_apps_mcp; -pub use codex_connectors::CONNECTORS_CACHE_TTL; const CONNECTORS_READY_TIMEOUT_ON_EMPTY_TOOLS: Duration = Duration::from_secs(30); const DIRECTORY_CONNECTORS_TIMEOUT: Duration = Duration::from_secs(60); @@ -124,15 +117,18 @@ pub(crate) async fn list_tool_suggest_discoverable_tools_with_auth( ) -> anyhow::Result> { let directory_connectors = list_directory_connectors_for_tool_suggest_with_auth(config, auth).await?; - let connector_ids = tool_suggest_connector_ids(config); - let discoverable_connectors = filter_tool_suggest_discoverable_connectors( - directory_connectors, - accessible_connectors, - &connector_ids, - ) - .into_iter() - .map(DiscoverableTool::from); - let discoverable_plugins = list_tool_suggest_discoverable_plugins(config)? + let connector_ids = tool_suggest_connector_ids(config).await; + let discoverable_connectors = + codex_connectors::filter::filter_tool_suggest_discoverable_connectors( + directory_connectors, + accessible_connectors, + &connector_ids, + originator().value.as_str(), + ) + .into_iter() + .map(DiscoverableTool::from); + let discoverable_plugins = list_tool_suggest_discoverable_plugins(config) + .await? .into_iter() .map(DiscoverableTool::from); Ok(discoverable_connectors @@ -153,7 +149,12 @@ pub async fn list_cached_accessible_connectors_from_mcp_tools( return Some(Vec::new()); } let cache_key = accessible_connectors_cache_key(config, auth.as_ref()); - read_cached_accessible_connectors(&cache_key).map(filter_disallowed_connectors) + read_cached_accessible_connectors(&cache_key).map(|connectors| { + codex_connectors::filter::filter_disallowed_connectors( + connectors, + originator().value.as_str(), + ) + }) } pub(crate) fn refresh_accessible_connectors_cache_from_mcp_tools( @@ -166,8 +167,10 @@ pub(crate) fn refresh_accessible_connectors_cache_from_mcp_tools( } let cache_key = accessible_connectors_cache_key(config, auth); - let accessible_connectors = - filter_disallowed_connectors(accessible_connectors_from_mcp_tools(mcp_tools)); + let accessible_connectors = codex_connectors::filter::filter_disallowed_connectors( + accessible_connectors_from_mcp_tools(mcp_tools), + originator().value.as_str(), + ); write_cached_accessible_connectors(cache_key, &accessible_connectors); } @@ -199,12 +202,15 @@ pub async fn list_accessible_connectors_from_mcp_tools_with_options_and_status( }); } let cache_key = accessible_connectors_cache_key(config, auth.as_ref()); - let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.clone())); + let plugins_manager = Arc::new(PluginsManager::new(config.codex_home.to_path_buf())); let mcp_manager = McpManager::new(Arc::clone(&plugins_manager)); - let tool_plugin_provenance = mcp_manager.tool_plugin_provenance(config); + let tool_plugin_provenance = mcp_manager.tool_plugin_provenance(config).await; if !force_refetch && let Some(cached_connectors) = read_cached_accessible_connectors(&cache_key) { - let cached_connectors = filter_disallowed_connectors(cached_connectors); + let cached_connectors = codex_connectors::filter::filter_disallowed_connectors( + cached_connectors, + originator().value.as_str(), + ); let cached_connectors = with_app_plugin_sources(cached_connectors, &tool_plugin_provenance); return Ok(AccessibleConnectorsStatus { connectors: cached_connectors, @@ -212,7 +218,7 @@ pub async fn list_accessible_connectors_from_mcp_tools_with_options_and_status( }); } - let mcp_config = config.to_mcp_config(plugins_manager.as_ref()); + let mcp_config = config.to_mcp_config(plugins_manager.as_ref()).await; let mcp_servers = with_codex_apps_mcp(HashMap::new(), auth.as_ref(), &mcp_config); if mcp_servers.is_empty() { return Ok(AccessibleConnectorsStatus { @@ -227,13 +233,6 @@ pub async fn list_accessible_connectors_from_mcp_tools_with_options_and_status( let (tx_event, rx_event) = unbounded(); drop(rx_event); - let sandbox_state = SandboxState { - sandbox_policy: SandboxPolicy::new_read_only_policy(), - codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(), - sandbox_cwd: env::current_dir().unwrap_or_else(|_| PathBuf::from("/")), - use_legacy_landlock: config.features.use_legacy_landlock(), - }; - let (mcp_connection_manager, cancel_token) = McpConnectionManager::new( &mcp_servers, config.mcp_oauth_credentials_store_mode, @@ -241,8 +240,8 @@ pub async fn list_accessible_connectors_from_mcp_tools_with_options_and_status( &config.permissions.approval_policy, INITIAL_SUBMIT_ID.to_owned(), tx_event, - sandbox_state, - config.codex_home.clone(), + SandboxPolicy::new_read_only_policy(), + config.codex_home.to_path_buf(), codex_apps_tools_cache_key(auth.as_ref()), ToolPluginProvenance::default(), ) @@ -302,8 +301,10 @@ pub async fn list_accessible_connectors_from_mcp_tools_with_options_and_status( cancel_token.cancel(); } - let accessible_connectors = - filter_disallowed_connectors(accessible_connectors_from_mcp_tools(&tools)); + let accessible_connectors = codex_connectors::filter::filter_disallowed_connectors( + accessible_connectors_from_mcp_tools(&tools), + originator().value.as_str(), + ); if codex_apps_ready || !accessible_connectors.is_empty() { write_cached_accessible_connectors(cache_key, &accessible_connectors); } @@ -366,38 +367,15 @@ fn write_cached_accessible_connectors( .unwrap_or_else(std::sync::PoisonError::into_inner); *cache_guard = Some(CachedAccessibleConnectors { key: cache_key, - expires_at: Instant::now() + CONNECTORS_CACHE_TTL, + expires_at: Instant::now() + codex_connectors::CONNECTORS_CACHE_TTL, connectors: connectors.to_vec(), }); } -fn filter_tool_suggest_discoverable_connectors( - directory_connectors: Vec, - accessible_connectors: &[AppInfo], - discoverable_connector_ids: &HashSet, -) -> Vec { - let accessible_connector_ids: HashSet<&str> = accessible_connectors - .iter() - .filter(|connector| connector.is_accessible) - .map(|connector| connector.id.as_str()) - .collect(); - - let mut connectors = filter_disallowed_connectors(directory_connectors) - .into_iter() - .filter(|connector| !accessible_connector_ids.contains(connector.id.as_str())) - .filter(|connector| discoverable_connector_ids.contains(connector.id.as_str())) - .collect::>(); - connectors.sort_by(|left, right| { - left.name - .cmp(&right.name) - .then_with(|| left.id.cmp(&right.id)) - }); - connectors -} - -fn tool_suggest_connector_ids(config: &Config) -> HashSet { - let mut connector_ids = PluginsManager::new(config.codex_home.clone()) +async fn tool_suggest_connector_ids(config: &Config) -> HashSet { + let mut connector_ids = PluginsManager::new(config.codex_home.to_path_buf()) .plugins_for_config(config) + .await .capability_summaries() .iter() .flat_map(|plugin| plugin.app_connector_ids.iter()) @@ -501,14 +479,6 @@ async fn chatgpt_get_request_with_token( } } -pub fn connector_display_label(connector: &AppInfo) -> String { - format_connector_label(&connector.name, &connector.id) -} - -pub fn connector_mention_slug(connector: &AppInfo) -> String { - sanitize_slug(&connector_display_label(connector)) -} - pub(crate) fn accessible_connectors_from_mcp_tools( mcp_tools: &HashMap, ) -> Vec { @@ -519,114 +489,14 @@ pub(crate) fn accessible_connectors_from_mcp_tools( return None; } let connector_id = tool.connector_id.as_deref()?; - Some(( - connector_id.to_string(), - normalize_connector_value(tool.connector_name.as_deref()), - normalize_connector_value(tool.connector_description.as_deref()), - tool.plugin_display_names.clone(), - )) - }); - collect_accessible_connectors(tools) -} - -pub fn merge_connectors( - connectors: Vec, - accessible_connectors: Vec, -) -> Vec { - let mut merged: HashMap = connectors - .into_iter() - .map(|mut connector| { - connector.is_accessible = false; - (connector.id.clone(), connector) + Some(codex_connectors::accessible::AccessibleConnectorTool { + connector_id: connector_id.to_string(), + connector_name: tool.connector_name.clone(), + connector_description: tool.connector_description.clone(), + plugin_display_names: tool.plugin_display_names.clone(), }) - .collect(); - - for mut connector in accessible_connectors { - connector.is_accessible = true; - let connector_id = connector.id.clone(); - if let Some(existing) = merged.get_mut(&connector_id) { - existing.is_accessible = true; - if existing.name == existing.id && connector.name != connector.id { - existing.name = connector.name; - } - if existing.description.is_none() && connector.description.is_some() { - existing.description = connector.description; - } - if existing.logo_url.is_none() && connector.logo_url.is_some() { - existing.logo_url = connector.logo_url; - } - if existing.logo_url_dark.is_none() && connector.logo_url_dark.is_some() { - existing.logo_url_dark = connector.logo_url_dark; - } - if existing.distribution_channel.is_none() && connector.distribution_channel.is_some() { - existing.distribution_channel = connector.distribution_channel; - } - existing - .plugin_display_names - .extend(connector.plugin_display_names); - } else { - merged.insert(connector_id, connector); - } - } - - let mut merged = merged.into_values().collect::>(); - for connector in &mut merged { - if connector.install_url.is_none() { - connector.install_url = Some(connector_install_url(&connector.name, &connector.id)); - } - connector.plugin_display_names.sort_unstable(); - connector.plugin_display_names.dedup(); - } - merged.sort_by(|left, right| { - right - .is_accessible - .cmp(&left.is_accessible) - .then_with(|| left.name.cmp(&right.name)) - .then_with(|| left.id.cmp(&right.id)) }); - merged -} - -pub fn merge_plugin_apps( - connectors: Vec, - plugin_apps: Vec, -) -> Vec { - let mut merged = connectors; - let mut connector_ids = merged - .iter() - .map(|connector| connector.id.clone()) - .collect::>(); - - for connector_id in plugin_apps { - if connector_ids.insert(connector_id.0.clone()) { - merged.push(plugin_app_to_app_info(connector_id)); - } - } - - merged.sort_by(|left, right| { - right - .is_accessible - .cmp(&left.is_accessible) - .then_with(|| left.name.cmp(&right.name)) - .then_with(|| left.id.cmp(&right.id)) - }); - merged -} - -pub fn merge_plugin_apps_with_accessible( - plugin_apps: Vec, - accessible_connectors: Vec, -) -> Vec { - let accessible_connector_ids: HashSet<&str> = accessible_connectors - .iter() - .map(|connector| connector.id.as_str()) - .collect(); - let plugin_connectors = plugin_apps - .into_iter() - .filter(|connector_id| accessible_connector_ids.contains(connector_id.0.as_str())) - .map(plugin_app_to_app_info) - .collect::>(); - merge_connectors(plugin_connectors, accessible_connectors) + codex_connectors::accessible::collect_accessible_connectors(tools) } pub fn with_app_enabled_state(mut connectors: Vec, config: &Config) -> Vec { @@ -699,45 +569,6 @@ pub(crate) fn codex_app_tool_is_enabled(config: &Config, tool_info: &ToolInfo) - .enabled } -const DISALLOWED_CONNECTOR_IDS: &[&str] = &[ - "asdk_app_6938a94a61d881918ef32cb999ff937c", - "connector_2b0a9009c9c64bf9933a3dae3f2b1254", - "connector_3f8d1a79f27c4c7ba1a897ab13bf37dc", - "connector_68de829bf7648191acd70a907364c67c", - "connector_68e004f14af881919eb50893d3d9f523", - "connector_69272cb413a081919685ec3c88d1744e", -]; -const FIRST_PARTY_CHAT_DISALLOWED_CONNECTOR_IDS: &[&str] = - &["connector_0f9c9d4592e54d0a9a12b3f44a1e2010"]; -const DISALLOWED_CONNECTOR_PREFIX: &str = "connector_openai_"; - -pub fn filter_disallowed_connectors(connectors: Vec) -> Vec { - filter_disallowed_connectors_for_originator(connectors, originator().value.as_str()) -} - -fn filter_disallowed_connectors_for_originator( - connectors: Vec, - originator_value: &str, -) -> Vec { - connectors - .into_iter() - .filter(|connector| { - is_connector_id_allowed_for_originator(connector.id.as_str(), originator_value) - }) - .collect() -} - -fn is_connector_id_allowed_for_originator(connector_id: &str, originator_value: &str) -> bool { - let disallowed_connector_ids = if is_first_party_chat_originator(originator_value) { - FIRST_PARTY_CHAT_DISALLOWED_CONNECTOR_IDS - } else { - DISALLOWED_CONNECTOR_IDS - }; - - !connector_id.starts_with(DISALLOWED_CONNECTOR_PREFIX) - && !disallowed_connector_ids.contains(&connector_id) -} - fn read_apps_config(config: &Config) -> Option { let apps_config = read_user_apps_config(config); let had_apps_config = apps_config.is_some(); @@ -859,125 +690,6 @@ fn app_tool_policy_from_apps_config( AppToolPolicy { enabled, approval } } -fn collect_accessible_connectors(tools: I) -> Vec -where - I: IntoIterator, Option, Vec)>, -{ - let mut connectors: HashMap)> = HashMap::new(); - for (connector_id, connector_name, connector_description, plugin_display_names) in tools { - let connector_name = connector_name.unwrap_or_else(|| connector_id.clone()); - if let Some((existing, existing_plugin_display_names)) = connectors.get_mut(&connector_id) { - if existing.name == connector_id && connector_name != connector_id { - existing.name = connector_name; - } - if existing.description.is_none() && connector_description.is_some() { - existing.description = connector_description; - } - existing_plugin_display_names.extend(plugin_display_names); - } else { - connectors.insert( - connector_id.clone(), - ( - AppInfo { - id: connector_id.clone(), - name: connector_name, - description: connector_description, - logo_url: None, - logo_url_dark: None, - distribution_channel: None, - branding: None, - app_metadata: None, - labels: None, - install_url: None, - is_accessible: true, - is_enabled: true, - plugin_display_names: Vec::new(), - }, - plugin_display_names - .into_iter() - .collect::>(), - ), - ); - } - } - let mut accessible: Vec = connectors - .into_values() - .map(|(mut connector, plugin_display_names)| { - connector.plugin_display_names = plugin_display_names.into_iter().collect(); - connector.install_url = Some(connector_install_url(&connector.name, &connector.id)); - connector - }) - .collect(); - accessible.sort_by(|left, right| { - right - .is_accessible - .cmp(&left.is_accessible) - .then_with(|| left.name.cmp(&right.name)) - .then_with(|| left.id.cmp(&right.id)) - }); - accessible -} - -fn plugin_app_to_app_info(connector_id: AppConnectorId) -> AppInfo { - // Leave the placeholder name as the connector id so merge_connectors() can - // replace it with canonical app metadata from directory fetches or - // connector_name values from codex_apps tool discovery. - let connector_id = connector_id.0; - let name = connector_id.clone(); - AppInfo { - id: connector_id.clone(), - name: name.clone(), - description: None, - logo_url: None, - logo_url_dark: None, - distribution_channel: None, - branding: None, - app_metadata: None, - labels: None, - install_url: Some(connector_install_url(&name, &connector_id)), - is_accessible: false, - is_enabled: true, - plugin_display_names: Vec::new(), - } -} - -fn normalize_connector_value(value: Option<&str>) -> Option { - value - .map(str::trim) - .filter(|value| !value.is_empty()) - .map(str::to_string) -} - -pub fn connector_install_url(name: &str, connector_id: &str) -> String { - let slug = sanitize_slug(name); - format!("https://chatgpt.com/apps/{slug}/{connector_id}") -} - -pub fn sanitize_name(name: &str) -> String { - sanitize_slug(name).replace("-", "_") -} - -fn sanitize_slug(name: &str) -> String { - let mut normalized = String::with_capacity(name.len()); - for character in name.chars() { - if character.is_ascii_alphanumeric() { - normalized.push(character.to_ascii_lowercase()); - } else { - normalized.push('-'); - } - } - let normalized = normalized.trim_matches('-'); - if normalized.is_empty() { - "app".to_string() - } else { - normalized.to_string() - } -} - -fn format_connector_label(name: &str, _id: &str) -> String { - name.to_string() -} - #[cfg(test)] #[path = "connectors_tests.rs"] mod tests; diff --git a/codex-rs/core/src/connectors_tests.rs b/codex-rs/core/src/connectors_tests.rs index 3c6504111a..e462c7939e 100644 --- a/codex-rs/core/src/connectors_tests.rs +++ b/codex-rs/core/src/connectors_tests.rs @@ -11,6 +11,13 @@ use codex_config::types::AppConfig; use codex_config::types::AppToolConfig; use codex_config::types::AppToolsConfig; use codex_config::types::AppsDefaultConfig; +use codex_connectors::filter::filter_disallowed_connectors; +use codex_connectors::filter::filter_tool_suggest_discoverable_connectors; +use codex_connectors::merge::merge_connectors; +use codex_connectors::merge::plugin_connector_to_app_info; +use codex_connectors::metadata::connector_install_url; +use codex_connectors::metadata::connector_mention_slug; +use codex_connectors::metadata::sanitize_name; use codex_features::Feature; use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; use codex_mcp::ToolInfo; @@ -138,7 +145,7 @@ fn with_accessible_connectors_cache_cleared(f: impl FnOnce() -> R) -> R { #[test] fn merge_connectors_replaces_plugin_placeholder_name_with_accessible_name() { - let plugin = plugin_app_to_app_info(AppConnectorId("calendar".to_string())); + let plugin = plugin_connector_to_app_info("calendar".to_string()); let accessible = google_calendar_accessible_connector(&[]); let merged = merge_connectors(vec![plugin], vec![accessible]); @@ -281,7 +288,7 @@ async fn refresh_accessible_connectors_cache_from_mcp_tools_writes_latest_instal #[test] fn merge_connectors_unions_and_dedupes_plugin_display_names() { - let mut plugin = plugin_app_to_app_info(AppConnectorId("calendar".to_string())); + let mut plugin = plugin_connector_to_app_info("calendar".to_string()); plugin.plugin_display_names = plugin_names(&["sample", "alpha", "sample"]); let accessible = google_calendar_accessible_connector(&["beta", "alpha"]); @@ -975,33 +982,40 @@ fn app_tool_policy_matches_prefix_stripped_tool_name_for_tool_config() { #[test] fn filter_disallowed_connectors_allows_non_disallowed_connectors() { - let filtered = filter_disallowed_connectors(vec![app("asdk_app_hidden"), app("alpha")]); + let filtered = + filter_disallowed_connectors(vec![app("asdk_app_hidden"), app("alpha")], "codex_cli"); assert_eq!(filtered, vec![app("asdk_app_hidden"), app("alpha")]); } #[test] fn filter_disallowed_connectors_filters_openai_prefix() { - let filtered = filter_disallowed_connectors(vec![ - app("connector_openai_foo"), - app("connector_openai_bar"), - app("gamma"), - ]); + let filtered = filter_disallowed_connectors( + vec![ + app("connector_openai_foo"), + app("connector_openai_bar"), + app("gamma"), + ], + "codex_cli", + ); assert_eq!(filtered, vec![app("gamma")]); } #[test] fn filter_disallowed_connectors_filters_disallowed_connector_ids() { - let filtered = filter_disallowed_connectors(vec![ - app("asdk_app_6938a94a61d881918ef32cb999ff937c"), - app("connector_3f8d1a79f27c4c7ba1a897ab13bf37dc"), - app("delta"), - ]); + let filtered = filter_disallowed_connectors( + vec![ + app("asdk_app_6938a94a61d881918ef32cb999ff937c"), + app("connector_3f8d1a79f27c4c7ba1a897ab13bf37dc"), + app("delta"), + ], + "codex_cli", + ); assert_eq!(filtered, vec![app("delta")]); } #[test] fn first_party_chat_originator_filters_target_and_openai_prefixed_connectors() { - let filtered = filter_disallowed_connectors_for_originator( + let filtered = filter_disallowed_connectors( vec![ app("connector_openai_foo"), app("asdk_app_6938a94a61d881918ef32cb999ff937c"), @@ -1037,7 +1051,7 @@ discoverables = [ .expect("config should load"); assert_eq!( - tool_suggest_connector_ids(&config), + tool_suggest_connector_ids(&config).await, HashSet::from(["connector_2128aebfecb84f64a069897515042a44".to_string()]) ); } @@ -1064,6 +1078,7 @@ fn filter_tool_suggest_discoverable_connectors_keeps_only_plugin_backed_uninstal "connector_2128aebfecb84f64a069897515042a44".to_string(), "connector_68df038e0ba48191908c8434991bbac2".to_string(), ]), + "codex_cli", ); assert_eq!( @@ -1103,6 +1118,7 @@ fn filter_tool_suggest_discoverable_connectors_excludes_accessible_apps_even_whe "connector_2128aebfecb84f64a069897515042a44".to_string(), "connector_68df038e0ba48191908c8434991bbac2".to_string(), ]), + "codex_cli", ); assert_eq!(filtered, Vec::::new()); diff --git a/codex-rs/core/src/context_manager/history_tests.rs b/codex-rs/core/src/context_manager/history_tests.rs index 9720492c1f..ff71b797be 100644 --- a/codex-rs/core/src/context_manager/history_tests.rs +++ b/codex-rs/core/src/context_manager/history_tests.rs @@ -133,6 +133,7 @@ fn reference_context_item() -> TurnContextItem { approval_policy: AskForApproval::OnRequest, sandbox_policy: SandboxPolicy::new_read_only_policy(), network: None, + file_system_sandbox_policy: None, model: "gpt-test".to_string(), personality: None, collaboration_mode: None, diff --git a/codex-rs/core/src/exec.rs b/codex-rs/core/src/exec.rs index af39657e00..396b7701be 100644 --- a/codex-rs/core/src/exec.rs +++ b/codex-rs/core/src/exec.rs @@ -221,7 +221,7 @@ pub async fn process_exec_tool_call( sandbox_policy: &SandboxPolicy, file_system_sandbox_policy: &FileSystemSandboxPolicy, network_sandbox_policy: NetworkSandboxPolicy, - sandbox_cwd: &Path, + sandbox_cwd: &AbsolutePathBuf, codex_linux_sandbox_exe: &Option, use_legacy_landlock: bool, stdout_stream: Option, @@ -247,12 +247,28 @@ pub fn build_exec_request( sandbox_policy: &SandboxPolicy, file_system_sandbox_policy: &FileSystemSandboxPolicy, network_sandbox_policy: NetworkSandboxPolicy, - sandbox_cwd: &Path, + sandbox_cwd: &AbsolutePathBuf, codex_linux_sandbox_exe: &Option, use_legacy_landlock: bool, ) -> Result { - let windows_sandbox_level = params.windows_sandbox_level; - let enforce_managed_network = params.network.is_some(); + let ExecParams { + command, + cwd, + mut env, + expiration, + capture_policy, + network, + windows_sandbox_level, + windows_sandbox_private_desktop, + + // TODO: Should arg0 be set on the ExecRequest that is returned? + arg0: _, + // These fields are related to approvals, so can be ignored here. + justification: _, + sandbox_permissions: _, + } = params; + + let enforce_managed_network = network.is_some(); let sandbox_type = select_process_exec_tool_sandbox_type( file_system_sandbox_policy, network_sandbox_policy, @@ -261,19 +277,6 @@ pub fn build_exec_request( ); tracing::debug!("Sandbox type: {sandbox_type:?}"); - let ExecParams { - command, - cwd, - mut env, - expiration, - capture_policy, - network, - sandbox_permissions: _, - windows_sandbox_level, - windows_sandbox_private_desktop, - justification: _, - arg0: _, - } = params; if let Some(network) = network.as_ref() { network.apply_to_env(&mut env); } @@ -349,6 +352,7 @@ pub(crate) async fn execute_exec_request( command, cwd, env, + exec_server_env_config: _, network, expiration, capture_policy, @@ -356,7 +360,8 @@ pub(crate) async fn execute_exec_request( windows_sandbox_level, windows_sandbox_private_desktop, sandbox_policy, - file_system_sandbox_policy, + // TODO(mbolin): Use file_system_sandbox_policy instead of sandbox_policy. + file_system_sandbox_policy: _, network_sandbox_policy, windows_sandbox_filesystem_overrides, arg0, @@ -377,21 +382,40 @@ pub(crate) async fn execute_exec_request( }; let start = Instant::now(); - let raw_output_result = exec( + let raw_output_result = get_raw_output_result( params, - sandbox, - &sandbox_policy, - &file_system_sandbox_policy, - windows_sandbox_filesystem_overrides.as_ref(), network_sandbox_policy, stdout_stream, after_spawn, + sandbox, + &sandbox_policy, + windows_sandbox_filesystem_overrides.as_ref(), ) .await; let duration = start.elapsed(); finalize_exec_result(raw_output_result, sandbox, duration) } +async fn get_raw_output_result( + params: ExecParams, + network_sandbox_policy: NetworkSandboxPolicy, + stdout_stream: Option, + after_spawn: Option>, + #[cfg_attr(not(windows), allow(unused_variables))] sandbox: SandboxType, + #[cfg_attr(not(windows), allow(unused_variables))] sandbox_policy: &SandboxPolicy, + #[cfg_attr(not(windows), allow(unused_variables))] windows_sandbox_filesystem_overrides: Option< + &WindowsSandboxFilesystemOverrides, + >, +) -> Result { + #[cfg(target_os = "windows")] + if sandbox == SandboxType::WindowsRestrictedToken { + return exec_windows_sandbox(params, sandbox_policy, windows_sandbox_filesystem_overrides) + .await; + } + + exec(params, network_sandbox_policy, stdout_stream, after_spawn).await +} + #[cfg(target_os = "windows")] fn extract_create_process_as_user_error_code(err: &str) -> Option { let marker = "CreateProcessAsUserW failed: "; @@ -798,26 +822,24 @@ fn aggregate_output( } } -#[allow(clippy::too_many_arguments)] +/// This is a general-purpose function for executing a command specified by +/// [ExecParams]. Events are reported via `stdout_stream`, if specified, and +/// `after_spawn` is invoked once the child process has been spawned, before +/// output consumption begins. +/// +/// `network_sandbox_policy` is used to determine whether +/// CODEX_SANDBOX_NETWORK_DISABLED=1 is added to the environment of the spawned +/// process. +/// +/// Note this command does not apply any sandboxing logic. The caller is +/// responsible for constructing [ExecParams::command] to include any sandboxing +/// wrapper args, as appropriate. async fn exec( params: ExecParams, - _sandbox: SandboxType, - _sandbox_policy: &SandboxPolicy, - _file_system_sandbox_policy: &FileSystemSandboxPolicy, - _windows_sandbox_filesystem_overrides: Option<&WindowsSandboxFilesystemOverrides>, network_sandbox_policy: NetworkSandboxPolicy, stdout_stream: Option, after_spawn: Option>, ) -> Result { - #[cfg(target_os = "windows")] - if _sandbox == SandboxType::WindowsRestrictedToken { - return exec_windows_sandbox( - params, - _sandbox_policy, - _windows_sandbox_filesystem_overrides, - ) - .await; - } let ExecParams { command, cwd, @@ -826,8 +848,14 @@ async fn exec( arg0, expiration, capture_policy, + + // If applicable, these fields should have been honored upstream of + // this exec call. windows_sandbox_level: _, - .. + windows_sandbox_private_desktop: _, + // These fields are related to approvals, so can be ignored here. + sandbox_permissions: _, + justification: _, } = params; if let Some(network) = network.as_ref() { network.apply_to_env(&mut env); @@ -844,7 +872,7 @@ async fn exec( program: PathBuf::from(program), args: args.into(), arg0: arg0_ref, - cwd: cwd.to_path_buf(), + cwd, network_sandbox_policy, // The environment already has attempt-scoped proxy settings from // apply_to_env_for_attempt above. Passing network here would reapply @@ -880,7 +908,7 @@ pub(crate) fn unsupported_windows_restricted_token_sandbox_reason( sandbox_policy: &SandboxPolicy, file_system_sandbox_policy: &FileSystemSandboxPolicy, network_sandbox_policy: NetworkSandboxPolicy, - sandbox_policy_cwd: &Path, + sandbox_policy_cwd: &AbsolutePathBuf, windows_sandbox_level: WindowsSandboxLevel, ) -> Option { if windows_sandbox_level == WindowsSandboxLevel::Elevated { @@ -911,7 +939,7 @@ pub(crate) fn resolve_windows_restricted_token_filesystem_overrides( sandbox_policy: &SandboxPolicy, file_system_sandbox_policy: &FileSystemSandboxPolicy, network_sandbox_policy: NetworkSandboxPolicy, - sandbox_policy_cwd: &Path, + sandbox_policy_cwd: &AbsolutePathBuf, windows_sandbox_level: WindowsSandboxLevel, ) -> std::result::Result, String> { if sandbox != SandboxType::WindowsRestrictedToken @@ -953,6 +981,9 @@ pub(crate) fn resolve_windows_restricted_token_filesystem_overrides( if !file_system_sandbox_policy .get_unreadable_roots_with_cwd(sandbox_policy_cwd) .is_empty() + || !file_system_sandbox_policy + .get_unreadable_globs_with_cwd(sandbox_policy_cwd) + .is_empty() { return Err( "windows unelevated restricted-token sandbox cannot enforce unreadable split filesystem carveouts directly; refusing to run unsandboxed" @@ -1047,7 +1078,7 @@ pub(crate) fn resolve_windows_elevated_filesystem_overrides( sandbox_policy: &SandboxPolicy, file_system_sandbox_policy: &FileSystemSandboxPolicy, network_sandbox_policy: NetworkSandboxPolicy, - sandbox_policy_cwd: &Path, + sandbox_policy_cwd: &AbsolutePathBuf, use_windows_elevated_backend: bool, ) -> std::result::Result, String> { if sandbox != SandboxType::WindowsRestrictedToken || !use_windows_elevated_backend { @@ -1068,6 +1099,9 @@ pub(crate) fn resolve_windows_elevated_filesystem_overrides( if !file_system_sandbox_policy .get_unreadable_roots_with_cwd(sandbox_policy_cwd) .is_empty() + || !file_system_sandbox_policy + .get_unreadable_globs_with_cwd(sandbox_policy_cwd) + .is_empty() { return Err( "windows elevated sandbox cannot enforce unreadable split filesystem carveouts directly; refusing to run unsandboxed" diff --git a/codex-rs/core/src/exec_env.rs b/codex-rs/core/src/exec_env.rs index a50fcc2538..ad94bc51a0 100644 --- a/codex-rs/core/src/exec_env.rs +++ b/codex-rs/core/src/exec_env.rs @@ -1,11 +1,10 @@ +#[cfg(test)] use codex_config::types::EnvironmentVariablePattern; use codex_config::types::ShellEnvironmentPolicy; -use codex_config::types::ShellEnvironmentPolicyInherit; use codex_protocol::ThreadId; use std::collections::HashMap; -use std::collections::HashSet; -pub const CODEX_THREAD_ID_ENV_VAR: &str = "CODEX_THREAD_ID"; +pub use codex_config::shell_environment::CODEX_THREAD_ID_ENV_VAR; /// Construct an environment map based on the rules in the specified policy. The /// resulting map can be passed directly to `Command::envs()` after calling @@ -21,9 +20,11 @@ pub fn create_env( policy: &ShellEnvironmentPolicy, thread_id: Option, ) -> HashMap { - create_env_from_vars(std::env::vars(), policy, thread_id) + let thread_id = thread_id.map(|thread_id| thread_id.to_string()); + codex_config::shell_environment::create_env(policy, thread_id.as_deref()) } +#[cfg(all(test, target_os = "windows"))] fn create_env_from_vars( vars: I, policy: &ShellEnvironmentPolicy, @@ -32,35 +33,11 @@ fn create_env_from_vars( where I: IntoIterator, { - let mut env_map = populate_env(vars, policy, thread_id); - - if cfg!(target_os = "windows") { - // This is a workaround to address the failures we are seeing in the - // following tests when run via Bazel on Windows: - // - // ``` - // suite::shell_command::unicode_output::with_login - // suite::shell_command::unicode_output::without_login - // ``` - // - // Currently, we can only reproduce these failures in CI, which makes - // iteration times long, so we include this quick fix for now to unblock - // getting the Windows Bazel build running. - if !env_map.keys().any(|k| k.eq_ignore_ascii_case("PATHEXT")) { - env_map.insert("PATHEXT".to_string(), ".COM;.EXE;.BAT;.CMD".to_string()); - } - } - env_map + let thread_id = thread_id.map(|thread_id| thread_id.to_string()); + codex_config::shell_environment::create_env_from_vars(vars, policy, thread_id.as_deref()) } -const COMMON_CORE_VARS: &[&str] = &["PATH", "SHELL", "TMPDIR", "TEMP", "TMP"]; - -#[cfg(target_os = "windows")] -const PLATFORM_CORE_VARS: &[&str] = &["PATHEXT", "USERNAME", "USERPROFILE"]; - -#[cfg(unix)] -const PLATFORM_CORE_VARS: &[&str] = &["HOME", "LANG", "LC_ALL", "LC_CTYPE", "LOGNAME", "USER"]; - +#[cfg(test)] fn populate_env( vars: I, policy: &ShellEnvironmentPolicy, @@ -69,66 +46,8 @@ fn populate_env( where I: IntoIterator, { - // Step 1 – determine the starting set of variables based on the - // `inherit` strategy. - let mut env_map: HashMap = match policy.inherit { - ShellEnvironmentPolicyInherit::All => vars.into_iter().collect(), - ShellEnvironmentPolicyInherit::None => HashMap::new(), - ShellEnvironmentPolicyInherit::Core => { - let core_vars: HashSet<&str> = COMMON_CORE_VARS - .iter() - .copied() - .chain(PLATFORM_CORE_VARS.iter().copied()) - .collect(); - let is_core_var = |name: &str| { - if cfg!(target_os = "windows") { - core_vars - .iter() - .any(|allowed| allowed.eq_ignore_ascii_case(name)) - } else { - core_vars.contains(name) - } - }; - vars.into_iter().filter(|(k, _)| is_core_var(k)).collect() - } - }; - - // Internal helper – does `name` match **any** pattern in `patterns`? - let matches_any = |name: &str, patterns: &[EnvironmentVariablePattern]| -> bool { - patterns.iter().any(|pattern| pattern.matches(name)) - }; - - // Step 2 – Apply the default exclude if not disabled. - if !policy.ignore_default_excludes { - let default_excludes = vec![ - EnvironmentVariablePattern::new_case_insensitive("*KEY*"), - EnvironmentVariablePattern::new_case_insensitive("*SECRET*"), - EnvironmentVariablePattern::new_case_insensitive("*TOKEN*"), - ]; - env_map.retain(|k, _| !matches_any(k, &default_excludes)); - } - - // Step 3 – Apply custom excludes. - if !policy.exclude.is_empty() { - env_map.retain(|k, _| !matches_any(k, &policy.exclude)); - } - - // Step 4 – Apply user-provided overrides. - for (key, val) in &policy.r#set { - env_map.insert(key.clone(), val.clone()); - } - - // Step 5 – If include_only is non-empty, keep *only* the matching vars. - if !policy.include_only.is_empty() { - env_map.retain(|k, _| matches_any(k, &policy.include_only)); - } - - // Step 6 – Populate the thread ID environment variable when provided. - if let Some(thread_id) = thread_id { - env_map.insert(CODEX_THREAD_ID_ENV_VAR.to_string(), thread_id.to_string()); - } - - env_map + let thread_id = thread_id.map(|thread_id| thread_id.to_string()); + codex_config::shell_environment::populate_env(vars, policy, thread_id.as_deref()) } #[cfg(test)] diff --git a/codex-rs/core/src/exec_tests.rs b/codex-rs/core/src/exec_tests.rs index 937a7d6f80..1cfa87ff3f 100644 --- a/codex-rs/core/src/exec_tests.rs +++ b/codex-rs/core/src/exec_tests.rs @@ -1,6 +1,8 @@ use super::*; use codex_protocol::config_types::WindowsSandboxLevel; use codex_sandboxing::SandboxType; +use core_test_support::PathBufExt; +use core_test_support::PathExt; use pretty_assertions::assert_eq; use std::collections::HashMap; use std::time::Duration; @@ -275,10 +277,6 @@ async fn exec_full_buffer_capture_ignores_expiration() -> Result<()> { justification: None, arg0: None, }, - SandboxType::None, - &SandboxPolicy::DangerFullAccess, - &FileSystemSandboxPolicy::unrestricted(), - /*windows_sandbox_filesystem_overrides*/ None, NetworkSandboxPolicy::Enabled, /*stdout_stream*/ None, /*after_spawn*/ None, @@ -315,10 +313,6 @@ async fn exec_full_buffer_capture_keeps_io_drain_timeout_when_descendant_holds_p justification: None, arg0: None, }, - SandboxType::None, - &SandboxPolicy::DangerFullAccess, - &FileSystemSandboxPolicy::unrestricted(), - /*windows_sandbox_filesystem_overrides*/ None, NetworkSandboxPolicy::Enabled, /*stdout_stream*/ None, /*after_spawn*/ None, @@ -369,7 +363,7 @@ async fn process_exec_tool_call_preserves_full_buffer_capture_policy() -> Result &sandbox_policy, &FileSystemSandboxPolicy::from(&sandbox_policy), NetworkSandboxPolicy::Enabled, - cwd.as_path(), + &cwd, &None, /*use_legacy_landlock*/ false, /*stdout_stream*/ None, @@ -436,7 +430,7 @@ fn windows_restricted_token_rejects_network_only_restrictions() { network_access: codex_protocol::protocol::NetworkAccess::Restricted, }; let file_system_policy = FileSystemSandboxPolicy::unrestricted(); - let sandbox_policy_cwd = std::env::current_dir().expect("cwd"); + let sandbox_policy_cwd = AbsolutePathBuf::current_dir().expect("cwd"); assert_eq!( unsupported_windows_restricted_token_sandbox_reason( @@ -457,7 +451,7 @@ fn windows_restricted_token_rejects_network_only_restrictions() { fn windows_restricted_token_allows_legacy_restricted_policies() { let policy = SandboxPolicy::new_read_only_policy(); let file_system_policy = FileSystemSandboxPolicy::from(&policy); - let sandbox_policy_cwd = std::env::current_dir().expect("cwd"); + let sandbox_policy_cwd = AbsolutePathBuf::current_dir().expect("cwd"); assert_eq!( unsupported_windows_restricted_token_sandbox_reason( @@ -482,7 +476,7 @@ fn windows_restricted_token_allows_legacy_workspace_write_policies() { exclude_slash_tmp: true, }; let file_system_policy = FileSystemSandboxPolicy::from(&policy); - let sandbox_policy_cwd = std::env::current_dir().expect("cwd"); + let sandbox_policy_cwd = AbsolutePathBuf::current_dir().expect("cwd"); assert_eq!( unsupported_windows_restricted_token_sandbox_reason( @@ -520,7 +514,7 @@ fn windows_elevated_allows_legacy_restricted_read_policies() { &policy, &file_system_policy, NetworkSandboxPolicy::Restricted, - temp_dir.path(), + &temp_dir.path().abs(), WindowsSandboxLevel::Elevated, ), None @@ -561,7 +555,7 @@ fn windows_restricted_token_rejects_split_only_filesystem_policies() { &policy, &file_system_policy, NetworkSandboxPolicy::Restricted, - temp_dir.path(), + &temp_dir.path().abs(), WindowsSandboxLevel::RestrictedToken, ), Some( @@ -605,7 +599,7 @@ fn windows_restricted_token_rejects_root_write_read_only_carveouts() { &policy, &file_system_policy, NetworkSandboxPolicy::Restricted, - temp_dir.path(), + &temp_dir.path().abs(), WindowsSandboxLevel::RestrictedToken, ), Some( @@ -618,9 +612,11 @@ fn windows_restricted_token_rejects_root_write_read_only_carveouts() { #[test] fn windows_restricted_token_supports_full_read_split_write_read_carveouts() { let temp_dir = tempfile::TempDir::new().expect("tempdir"); - let cwd = dunce::canonicalize(temp_dir.path()).expect("canonicalize temp dir"); + let cwd = dunce::canonicalize(temp_dir.path()) + .expect("canonicalize temp dir") + .abs(); let docs = cwd.join("docs"); - std::fs::create_dir_all(&docs).expect("create docs"); + std::fs::create_dir_all(docs.as_path()).expect("create docs"); let policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], read_only_access: codex_protocol::protocol::ReadOnlyAccess::FullAccess, @@ -642,20 +638,14 @@ fn windows_restricted_token_supports_full_read_split_write_read_carveouts() { access: codex_protocol::permissions::FileSystemAccessMode::Write, }, codex_protocol::permissions::FileSystemSandboxEntry { - path: codex_protocol::permissions::FileSystemPath::Path { - path: codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(&docs) - .expect("absolute docs"), - }, + path: codex_protocol::permissions::FileSystemPath::Path { path: docs.clone() }, access: codex_protocol::permissions::FileSystemAccessMode::Read, }, ]); // The legacy workspace-write root already protects top-level `.codex`, so // the restricted-token overlay only needs the extra read-only docs carveout. - let expected_deny_write_paths = vec![ - codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(&docs) - .expect("absolute docs"), - ]; + let expected_deny_write_paths = vec![docs]; assert_eq!( resolve_windows_restricted_token_filesystem_overrides( @@ -700,7 +690,7 @@ fn windows_elevated_supports_split_restricted_read_roots() { &policy, &file_system_policy, NetworkSandboxPolicy::Restricted, - temp_dir.path(), + &temp_dir.path().abs(), /*use_windows_elevated_backend*/ true, ), Ok(Some(WindowsSandboxFilesystemOverrides { @@ -752,7 +742,7 @@ fn windows_elevated_supports_split_write_read_carveouts() { &policy, &file_system_policy, NetworkSandboxPolicy::Restricted, - temp_dir.path(), + &temp_dir.path().abs(), /*use_windows_elevated_backend*/ true, ), Ok(Some(WindowsSandboxFilesystemOverrides { @@ -806,7 +796,54 @@ fn windows_elevated_rejects_unreadable_split_carveouts() { &policy, &file_system_policy, NetworkSandboxPolicy::Restricted, - temp_dir.path(), + &temp_dir.path().abs(), + WindowsSandboxLevel::Elevated, + ), + Some( + "windows elevated sandbox cannot enforce unreadable split filesystem carveouts directly; refusing to run unsandboxed" + .to_string() + ) + ); +} + +#[test] +fn windows_elevated_rejects_unreadable_globs() { + let temp_dir = tempfile::TempDir::new().expect("tempdir"); + let policy = SandboxPolicy::WorkspaceWrite { + writable_roots: vec![], + read_only_access: codex_protocol::protocol::ReadOnlyAccess::FullAccess, + network_access: false, + exclude_tmpdir_env_var: true, + exclude_slash_tmp: true, + }; + let file_system_policy = FileSystemSandboxPolicy::restricted(vec![ + codex_protocol::permissions::FileSystemSandboxEntry { + path: codex_protocol::permissions::FileSystemPath::Special { + value: codex_protocol::permissions::FileSystemSpecialPath::Root, + }, + access: codex_protocol::permissions::FileSystemAccessMode::Read, + }, + codex_protocol::permissions::FileSystemSandboxEntry { + path: codex_protocol::permissions::FileSystemPath::Special { + value: codex_protocol::permissions::FileSystemSpecialPath::CurrentWorkingDirectory, + }, + access: codex_protocol::permissions::FileSystemAccessMode::Write, + }, + codex_protocol::permissions::FileSystemSandboxEntry { + path: codex_protocol::permissions::FileSystemPath::GlobPattern { + pattern: "**/*.env".to_string(), + }, + access: codex_protocol::permissions::FileSystemAccessMode::None, + }, + ]); + + assert_eq!( + unsupported_windows_restricted_token_sandbox_reason( + SandboxType::WindowsRestrictedToken, + &policy, + &file_system_policy, + NetworkSandboxPolicy::Restricted, + &temp_dir.path().abs(), WindowsSandboxLevel::Elevated, ), Some( @@ -864,7 +901,7 @@ fn windows_elevated_rejects_reopened_writable_descendants() { &policy, &file_system_policy, NetworkSandboxPolicy::Restricted, - temp_dir.path(), + &temp_dir.path().abs(), WindowsSandboxLevel::Elevated, ), Some( @@ -933,10 +970,6 @@ async fn kill_child_process_group_kills_grandchildren_on_timeout() -> Result<()> let output = exec( params, - SandboxType::None, - &SandboxPolicy::new_read_only_policy(), - &FileSystemSandboxPolicy::from(&SandboxPolicy::new_read_only_policy()), - /*windows_sandbox_filesystem_overrides*/ None, NetworkSandboxPolicy::Restricted, /*stdout_stream*/ None, /*after_spawn*/ None, @@ -998,7 +1031,7 @@ async fn process_exec_tool_call_respects_cancellation_token() -> Result<()> { &SandboxPolicy::DangerFullAccess, &FileSystemSandboxPolicy::from(&SandboxPolicy::DangerFullAccess), NetworkSandboxPolicy::Enabled, - cwd.as_path(), + &cwd, &None, /*use_legacy_landlock*/ false, /*stdout_stream*/ None, diff --git a/codex-rs/core/src/external_agent_config.rs b/codex-rs/core/src/external_agent_config.rs index efa90c8494..f0f4443a75 100644 --- a/codex-rs/core/src/external_agent_config.rs +++ b/codex-rs/core/src/external_agent_config.rs @@ -1,4 +1,17 @@ +use crate::config::Config; +use crate::config::ConfigBuilder; +use crate::plugins::MarketplaceAddRequest; +use crate::plugins::PluginId; +use crate::plugins::PluginInstallRequest; +use crate::plugins::PluginsManager; +use crate::plugins::add_marketplace; +use crate::plugins::configured_plugins_from_stack; +use crate::plugins::find_marketplace_manifest_path; +use crate::plugins::parse_marketplace_source; +use codex_core_plugins::marketplace::MarketplacePluginInstallPolicy; +use codex_protocol::protocol::Product; use serde_json::Value as JsonValue; +use std::collections::BTreeMap; use std::collections::HashSet; use std::ffi::OsString; use std::fs; @@ -9,6 +22,8 @@ use toml::Value as TomlValue; const EXTERNAL_AGENT_CONFIG_DETECT_METRIC: &str = "codex.external_agent_config.detect"; const EXTERNAL_AGENT_CONFIG_IMPORT_METRIC: &str = "codex.external_agent_config.import"; +const EXTERNAL_AGENT_DIR: &str = ".claude"; +const EXTERNAL_AGENT_CONFIG_MD: &str = "CLAUDE.md"; #[derive(Debug, Clone, PartialEq, Eq)] pub struct ExternalAgentConfigDetectOptions { @@ -21,59 +36,84 @@ pub enum ExternalAgentConfigMigrationItemType { Config, Skills, AgentsMd, + Plugins, McpServerConfig, } +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PluginsMigration { + pub marketplace_name: String, + pub plugin_names: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MigrationDetails { + pub plugins: Vec, +} + +#[derive(Debug, Clone, Default, PartialEq, Eq)] +struct PluginImportOutcome { + succeeded_marketplaces: Vec, + succeeded_plugin_ids: Vec, + failed_marketplaces: Vec, + failed_plugin_ids: Vec, +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct ExternalAgentConfigMigrationItem { pub item_type: ExternalAgentConfigMigrationItemType, pub description: String, pub cwd: Option, + pub details: Option, } #[derive(Clone)] pub struct ExternalAgentConfigService { codex_home: PathBuf, - claude_home: PathBuf, + external_agent_home: PathBuf, } impl ExternalAgentConfigService { pub fn new(codex_home: PathBuf) -> Self { - let claude_home = default_claude_home(); + let external_agent_home = default_external_agent_home(); Self { codex_home, - claude_home, + external_agent_home, } } #[cfg(test)] - fn new_for_test(codex_home: PathBuf, claude_home: PathBuf) -> Self { + fn new_for_test(codex_home: PathBuf, external_agent_home: PathBuf) -> Self { Self { codex_home, - claude_home, + external_agent_home, } } - pub fn detect( + pub async fn detect( &self, params: ExternalAgentConfigDetectOptions, ) -> io::Result> { let mut items = Vec::new(); if params.include_home { - self.detect_migrations(/*repo_root*/ None, &mut items)?; + self.detect_migrations(/*repo_root*/ None, &mut items) + .await?; } for cwd in params.cwds.as_deref().unwrap_or(&[]) { let Some(repo_root) = find_repo_root(Some(cwd))? else { continue; }; - self.detect_migrations(Some(&repo_root), &mut items)?; + self.detect_migrations(Some(&repo_root), &mut items).await?; } Ok(items) } - pub fn import(&self, migration_items: Vec) -> io::Result<()> { + pub async fn import( + &self, + migration_items: Vec, + ) -> io::Result<()> { for migration_item in migration_items { match migration_item.item_type { ExternalAgentConfigMigrationItemType::Config => { @@ -100,6 +140,24 @@ impl ExternalAgentConfigService { /*skills_count*/ None, ); } + ExternalAgentConfigMigrationItemType::Plugins => { + let service = self.clone(); + let cwd = migration_item.cwd; + let details = migration_item.details; + tokio::spawn(async move { + if let Err(err) = service.import_plugins(cwd.as_deref(), details).await { + tracing::warn!( + error = %err, + "external agent config plugin import failed" + ); + } + }); + emit_migration_metric( + EXTERNAL_AGENT_CONFIG_IMPORT_METRIC, + ExternalAgentConfigMigrationItemType::Plugins, + /*skills_count*/ None, + ); + } ExternalAgentConfigMigrationItemType::McpServerConfig => {} } } @@ -107,25 +165,23 @@ impl ExternalAgentConfigService { Ok(()) } - fn detect_migrations( + async fn detect_migrations( &self, repo_root: Option<&Path>, items: &mut Vec, ) -> io::Result<()> { let cwd = repo_root.map(Path::to_path_buf); let source_settings = repo_root.map_or_else( - || self.claude_home.join("settings.json"), - |repo_root| repo_root.join(".claude").join("settings.json"), + || self.external_agent_home.join("settings.json"), + |repo_root| repo_root.join(EXTERNAL_AGENT_DIR).join("settings.json"), ); + let settings = read_external_settings(&source_settings)?; let target_config = repo_root.map_or_else( || self.codex_home.join("config.toml"), |repo_root| repo_root.join(".codex").join("config.toml"), ); - if source_settings.is_file() { - let raw_settings = fs::read_to_string(&source_settings)?; - let settings: JsonValue = serde_json::from_str(&raw_settings) - .map_err(|err| invalid_data_error(err.to_string()))?; - let migrated = build_config_from_external(&settings)?; + if let Some(settings) = settings.as_ref() { + let migrated = build_config_from_external(settings)?; if !is_empty_toml_table(&migrated) { let mut should_include = true; if target_config.exists() { @@ -149,6 +205,7 @@ impl ExternalAgentConfigService { target_config.display() ), cwd: cwd.clone(), + details: None, }); emit_migration_metric( EXTERNAL_AGENT_CONFIG_DETECT_METRIC, @@ -160,8 +217,8 @@ impl ExternalAgentConfigService { } let source_skills = repo_root.map_or_else( - || self.claude_home.join("skills"), - |repo_root| repo_root.join(".claude").join("skills"), + || self.external_agent_home.join("skills"), + |repo_root| repo_root.join(EXTERNAL_AGENT_DIR).join("skills"), ); let target_skills = repo_root.map_or_else( || self.home_target_skills_dir(), @@ -172,11 +229,12 @@ impl ExternalAgentConfigService { items.push(ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::Skills, description: format!( - "Copy skill folders from {} to {}", + "Migrate skills from {} to {}", source_skills.display(), target_skills.display() ), cwd: cwd.clone(), + details: None, }); emit_migration_metric( EXTERNAL_AGENT_CONFIG_DETECT_METRIC, @@ -188,7 +246,7 @@ impl ExternalAgentConfigService { let source_agents_md = if let Some(repo_root) = repo_root { find_repo_agents_md_source(repo_root)? } else { - let path = self.claude_home.join("CLAUDE.md"); + let path = self.external_agent_home.join(EXTERNAL_AGENT_CONFIG_MD); is_non_empty_text_file(&path)?.then_some(path) }; let target_agents_md = repo_root.map_or_else( @@ -205,7 +263,8 @@ impl ExternalAgentConfigService { source_agents_md.display(), target_agents_md.display() ), - cwd, + cwd: cwd.clone(), + details: None, }); emit_migration_metric( EXTERNAL_AGENT_CONFIG_DETECT_METRIC, @@ -214,6 +273,43 @@ impl ExternalAgentConfigService { ); } + if let Some(settings) = settings.as_ref() { + match ConfigBuilder::default() + .codex_home(self.codex_home.clone()) + .fallback_cwd(Some(self.codex_home.clone())) + .build() + .await + { + Ok(config) => { + let configured_plugin_ids = + configured_plugins_from_stack(&config.config_layer_stack) + .into_keys() + .collect::>(); + let configured_marketplace_plugins = configured_marketplace_plugins( + &config, + &PluginsManager::new(self.codex_home.clone()), + )?; + if let Some(item) = self.detect_plugin_migration( + source_settings.as_path(), + repo_root.unwrap_or(self.external_agent_home.as_path()), + cwd.clone(), + settings, + &configured_plugin_ids, + &configured_marketplace_plugins, + ) { + items.push(item); + } + } + Err(err) => { + tracing::warn!( + error = %err, + settings_path = %source_settings.display(), + "skipping external agent plugin migration detection because config load failed" + ); + } + } + } + Ok(()) } @@ -224,17 +320,125 @@ impl ExternalAgentConfigService { .unwrap_or_else(|| PathBuf::from(".agents").join("skills")) } + fn detect_plugin_migration( + &self, + source_settings: &Path, + source_root: &Path, + cwd: Option, + settings: &JsonValue, + configured_plugin_ids: &HashSet, + configured_marketplace_plugins: &BTreeMap>, + ) -> Option { + let plugin_details = extract_plugin_migration_details( + settings, + source_root, + configured_plugin_ids, + configured_marketplace_plugins, + )?; + emit_migration_metric( + EXTERNAL_AGENT_CONFIG_DETECT_METRIC, + ExternalAgentConfigMigrationItemType::Plugins, + /*skills_count*/ None, + ); + + Some(ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: format!("Import enabled plugins from {}", source_settings.display()), + cwd, + details: Some(plugin_details), + }) + } + + async fn import_plugins( + &self, + cwd: Option<&Path>, + details: Option, + ) -> io::Result { + let Some(MigrationDetails { plugins }) = details else { + return Err(invalid_data_error( + "plugins migration item is missing details".to_string(), + )); + }; + let mut outcome = PluginImportOutcome::default(); + let plugins_manager = PluginsManager::new(self.codex_home.clone()); + for plugin_group in plugins { + let marketplace_name = plugin_group.marketplace_name.clone(); + let plugin_names = plugin_group.plugin_names; + let plugin_ids = plugin_names + .iter() + .map(|plugin_name| format!("{plugin_name}@{marketplace_name}")) + .collect::>(); + let source_settings = cwd.map_or_else( + || self.external_agent_home.join("settings.json"), + |cwd| cwd.join(EXTERNAL_AGENT_DIR).join("settings.json"), + ); + let source_root = cwd.unwrap_or(self.external_agent_home.as_path()); + let import_source = read_external_settings(&source_settings)?.and_then(|settings| { + collect_marketplace_import_sources(&settings, source_root).remove(&marketplace_name) + }); + let Some(import_source) = import_source else { + outcome.failed_marketplaces.push(marketplace_name); + outcome.failed_plugin_ids.extend(plugin_ids); + continue; + }; + let request = MarketplaceAddRequest { + source: import_source.source, + ref_name: import_source.ref_name, + sparse_paths: Vec::new(), + }; + let add_marketplace_outcome = add_marketplace(self.codex_home.clone(), request).await; + let marketplace_path = match add_marketplace_outcome { + Ok(add_marketplace_outcome) => { + let Some(marketplace_path) = find_marketplace_manifest_path( + add_marketplace_outcome.installed_root.as_path(), + ) else { + outcome.failed_marketplaces.push(marketplace_name); + outcome.failed_plugin_ids.extend(plugin_ids); + continue; + }; + outcome + .succeeded_marketplaces + .push(marketplace_name.clone()); + marketplace_path + } + Err(_) => { + outcome.failed_marketplaces.push(marketplace_name); + outcome.failed_plugin_ids.extend(plugin_ids); + continue; + } + }; + for plugin_name in plugin_names { + match plugins_manager + .install_plugin(PluginInstallRequest { + plugin_name: plugin_name.clone(), + marketplace_path: marketplace_path.clone(), + }) + .await + { + Ok(_) => outcome + .succeeded_plugin_ids + .push(format!("{plugin_name}@{marketplace_name}")), + Err(_) => outcome + .failed_plugin_ids + .push(format!("{plugin_name}@{marketplace_name}")), + } + } + } + + Ok(outcome) + } + fn import_config(&self, cwd: Option<&Path>) -> io::Result<()> { let (source_settings, target_config) = if let Some(repo_root) = find_repo_root(cwd)? { ( - repo_root.join(".claude").join("settings.json"), + repo_root.join(EXTERNAL_AGENT_DIR).join("settings.json"), repo_root.join(".codex").join("config.toml"), ) } else if cwd.is_some_and(|cwd| !cwd.as_os_str().is_empty()) { return Ok(()); } else { ( - self.claude_home.join("settings.json"), + self.external_agent_home.join("settings.json"), self.codex_home.join("config.toml"), ) }; @@ -279,14 +483,14 @@ impl ExternalAgentConfigService { fn import_skills(&self, cwd: Option<&Path>) -> io::Result { let (source_skills, target_skills) = if let Some(repo_root) = find_repo_root(cwd)? { ( - repo_root.join(".claude").join("skills"), + repo_root.join(EXTERNAL_AGENT_DIR).join("skills"), repo_root.join(".agents").join("skills"), ) } else if cwd.is_some_and(|cwd| !cwd.as_os_str().is_empty()) { return Ok(0); } else { ( - self.claude_home.join("skills"), + self.external_agent_home.join("skills"), self.home_target_skills_dir(), ) }; @@ -326,7 +530,7 @@ impl ExternalAgentConfigService { return Ok(()); } else { ( - self.claude_home.join("CLAUDE.md"), + self.external_agent_home.join(EXTERNAL_AGENT_CONFIG_MD), self.codex_home.join("AGENTS.md"), ) }; @@ -345,12 +549,199 @@ impl ExternalAgentConfigService { } } -fn default_claude_home() -> PathBuf { +fn default_external_agent_home() -> PathBuf { if let Some(home) = std::env::var_os("HOME").or_else(|| std::env::var_os("USERPROFILE")) { - return PathBuf::from(home).join(".claude"); + return PathBuf::from(home).join(EXTERNAL_AGENT_DIR); } - PathBuf::from(".claude") + PathBuf::from(EXTERNAL_AGENT_DIR) +} + +fn read_external_settings(path: &Path) -> io::Result> { + if !path.is_file() { + return Ok(None); + } + + let raw_settings = fs::read_to_string(path)?; + let settings = + serde_json::from_str(&raw_settings).map_err(|err| invalid_data_error(err.to_string()))?; + Ok(Some(settings)) +} + +fn extract_plugin_migration_details( + settings: &JsonValue, + source_root: &Path, + configured_plugin_ids: &HashSet, + configured_marketplace_plugins: &BTreeMap>, +) -> Option { + let loadable_marketplaces = collect_marketplace_import_sources(settings, source_root) + .into_iter() + .filter_map(|(marketplace_name, source)| { + parse_marketplace_source(&source.source, source.ref_name) + .ok() + .map(|_| marketplace_name) + }) + .collect::>(); + let mut plugins = BTreeMap::new(); + for plugin_id in collect_enabled_plugins(settings) + .into_iter() + .filter(|plugin_id| !configured_plugin_ids.contains(plugin_id)) + { + let Ok(plugin_id) = PluginId::parse(&plugin_id) else { + continue; + }; + if let Some(installable_plugins) = + configured_marketplace_plugins.get(&plugin_id.marketplace_name) + { + if !installable_plugins.contains(&plugin_id.plugin_name) { + continue; + } + } else if !loadable_marketplaces.contains(&plugin_id.marketplace_name) { + continue; + } + let plugin_group = plugins + .entry(plugin_id.marketplace_name.clone()) + .or_insert_with(|| PluginsMigration { + marketplace_name: plugin_id.marketplace_name.clone(), + plugin_names: Vec::new(), + }); + plugin_group.plugin_names.push(plugin_id.plugin_name); + } + + let plugins = plugins + .into_values() + .filter_map(|mut plugin_group| { + if plugin_group.plugin_names.is_empty() { + return None; + } + plugin_group.plugin_names.sort(); + Some(plugin_group) + }) + .collect::>(); + if plugins.is_empty() { + return None; + } + + Some(MigrationDetails { plugins }) +} + +fn collect_enabled_plugins(settings: &JsonValue) -> Vec { + let Some(enabled_plugins) = settings + .as_object() + .and_then(|settings| settings.get("enabledPlugins")) + .and_then(JsonValue::as_object) + else { + return Vec::new(); + }; + + enabled_plugins + .iter() + .filter_map(|(plugin_key, enabled)| { + if !enabled.as_bool().unwrap_or(false) { + return None; + } + PluginId::parse(plugin_key) + .ok() + .map(|plugin_id| plugin_id.as_key()) + }) + .collect() +} + +fn configured_marketplace_plugins( + config: &Config, + plugins_manager: &PluginsManager, +) -> io::Result>> { + let marketplaces = plugins_manager + .list_marketplaces_for_config(config, &[]) + .map_err(|err| { + invalid_data_error(format!("failed to list configured marketplaces: {err}")) + })?; + let mut marketplace_plugins = BTreeMap::new(); + for marketplace in marketplaces.marketplaces { + let plugins = marketplace + .plugins + .into_iter() + .filter(|plugin| { + plugin.policy.installation != MarketplacePluginInstallPolicy::NotAvailable + }) + .filter(|plugin| { + plugin + .policy + .products + .as_deref() + .is_none_or(|products| Product::Codex.matches_product_restriction(products)) + }) + .map(|plugin| plugin.name) + .collect::>(); + marketplace_plugins.insert(marketplace.name, plugins); + } + Ok(marketplace_plugins) +} + +fn collect_marketplace_import_sources( + settings: &JsonValue, + source_root: &Path, +) -> BTreeMap { + let Some(extra_known_marketplaces) = settings + .as_object() + .and_then(|settings| settings.get("extraKnownMarketplaces")) + .and_then(JsonValue::as_object) + else { + return BTreeMap::new(); + }; + + extra_known_marketplaces + .iter() + .filter_map(|(name, value)| { + let source_fields = if let Some(source) = value.get("source") + && source.is_object() + { + source.as_object()? + } else { + value.as_object()? + }; + let source = source_fields + .get("repo") + .or_else(|| source_fields.get("url")) + .or_else(|| source_fields.get("path")) + .or_else(|| value.get("source"))? + .as_str()? + .trim() + .to_string(); + if source.is_empty() { + return None; + } + let source = resolve_external_marketplace_source(&source, source_root); + + let ref_name = source_fields + .get("ref") + .or_else(|| value.get("ref")) + .and_then(JsonValue::as_str) + .map(str::trim) + .filter(|value| !value.is_empty()) + .map(ToOwned::to_owned); + + Some((name.clone(), MarketplaceImportSource { source, ref_name })) + }) + .collect() +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct MarketplaceImportSource { + source: String, + ref_name: Option, +} + +fn resolve_external_marketplace_source(source: &str, source_root: &Path) -> String { + if !looks_like_relative_local_path(source) { + return source.to_string(); + } + + source_root.join(source).display().to_string() +} + +fn looks_like_relative_local_path(source: &str) -> bool { + source.starts_with("./") || source.starts_with("../") || source == "." || source == ".." } fn find_repo_root(cwd: Option<&Path>) -> io::Result> { @@ -435,8 +826,10 @@ fn is_non_empty_text_file(path: &Path) -> io::Result { fn find_repo_agents_md_source(repo_root: &Path) -> io::Result> { for candidate in [ - repo_root.join("CLAUDE.md"), - repo_root.join(".claude").join("CLAUDE.md"), + repo_root.join(EXTERNAL_AGENT_CONFIG_MD), + repo_root + .join(EXTERNAL_AGENT_DIR) + .join(EXTERNAL_AGENT_CONFIG_MD), ] { if is_non_empty_text_file(&candidate)? { return Ok(Some(candidate)); @@ -480,12 +873,16 @@ fn is_skill_md(path: &Path) -> bool { fn rewrite_and_copy_text_file(source: &Path, target: &Path) -> io::Result<()> { let source_contents = fs::read_to_string(source)?; - let rewritten = rewrite_claude_terms(&source_contents); + let rewritten = rewrite_external_agent_terms(&source_contents); fs::write(target, rewritten) } -fn rewrite_claude_terms(content: &str) -> String { - let mut rewritten = replace_case_insensitive_with_boundaries(content, "claude.md", "AGENTS.md"); +fn rewrite_external_agent_terms(content: &str) -> String { + let mut rewritten = replace_case_insensitive_with_boundaries( + content, + &EXTERNAL_AGENT_CONFIG_MD.to_ascii_lowercase(), + "AGENTS.md", + ); for from in [ "claude code", "claude-code", @@ -662,6 +1059,7 @@ fn migration_metric_tags( ExternalAgentConfigMigrationItemType::Config => "config", ExternalAgentConfigMigrationItemType::Skills => "skills", ExternalAgentConfigMigrationItemType::AgentsMd => "agents_md", + ExternalAgentConfigMigrationItemType::Plugins => "plugins", ExternalAgentConfigMigrationItemType::McpServerConfig => "mcp_server_config", }; let mut tags = vec![("migration_type", migration_type.to_string())]; diff --git a/codex-rs/core/src/external_agent_config_tests.rs b/codex-rs/core/src/external_agent_config_tests.rs index 7baadbec01..f01a40e3d2 100644 --- a/codex-rs/core/src/external_agent_config_tests.rs +++ b/codex-rs/core/src/external_agent_config_tests.rs @@ -1,38 +1,53 @@ use super::*; use pretty_assertions::assert_eq; +use std::io; use tempfile::TempDir; fn fixture_paths() -> (TempDir, PathBuf, PathBuf) { let root = TempDir::new().expect("create tempdir"); - let claude_home = root.path().join(".claude"); + let external_agent_home = root.path().join(".claude"); let codex_home = root.path().join(".codex"); - (root, claude_home, codex_home) + (root, external_agent_home, codex_home) } -fn service_for_paths(claude_home: PathBuf, codex_home: PathBuf) -> ExternalAgentConfigService { - ExternalAgentConfigService::new_for_test(codex_home, claude_home) +fn service_for_paths( + external_agent_home: PathBuf, + codex_home: PathBuf, +) -> ExternalAgentConfigService { + ExternalAgentConfigService::new_for_test(codex_home, external_agent_home) } -#[test] -fn detect_home_lists_config_skills_and_agents_md() { - let (_root, claude_home, codex_home) = fixture_paths(); +fn github_plugin_details() -> MigrationDetails { + MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "acme-tools".to_string(), + plugin_names: vec!["formatter".to_string()], + }], + } +} + +#[tokio::test] +async fn detect_home_lists_config_skills_and_agents_md() { + let (_root, external_agent_home, codex_home) = fixture_paths(); let agents_skills = codex_home .parent() .map(|parent| parent.join(".agents").join("skills")) .unwrap_or_else(|| PathBuf::from(".agents").join("skills")); - fs::create_dir_all(claude_home.join("skills").join("skill-a")).expect("create skills"); - fs::write(claude_home.join("CLAUDE.md"), "claude rules").expect("write claude md"); + fs::create_dir_all(external_agent_home.join("skills").join("skill-a")).expect("create skills"); + fs::write(external_agent_home.join("CLAUDE.md"), "claude rules") + .expect("write external agent md"); fs::write( - claude_home.join("settings.json"), + external_agent_home.join("settings.json"), r#"{"model":"claude","env":{"FOO":"bar"}}"#, ) .expect("write settings"); - let items = service_for_paths(claude_home.clone(), codex_home.clone()) + let items = service_for_paths(external_agent_home.clone(), codex_home.clone()) .detect(ExternalAgentConfigDetectOptions { include_home: true, cwds: None, }) + .await .expect("detect"); let expected = vec![ @@ -40,36 +55,39 @@ fn detect_home_lists_config_skills_and_agents_md() { item_type: ExternalAgentConfigMigrationItemType::Config, description: format!( "Migrate {} into {}", - claude_home.join("settings.json").display(), + external_agent_home.join("settings.json").display(), codex_home.join("config.toml").display() ), cwd: None, + details: None, }, ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::Skills, description: format!( - "Copy skill folders from {} to {}", - claude_home.join("skills").display(), + "Migrate skills from {} to {}", + external_agent_home.join("skills").display(), agents_skills.display() ), cwd: None, + details: None, }, ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::AgentsMd, description: format!( "Import {} to {}", - claude_home.join("CLAUDE.md").display(), + external_agent_home.join("CLAUDE.md").display(), codex_home.join("AGENTS.md").display() ), cwd: None, + details: None, }, ]; assert_eq!(items, expected); } -#[test] -fn detect_repo_lists_agents_md_for_each_cwd() { +#[tokio::test] +async fn detect_repo_lists_agents_md_for_each_cwd() { let root = TempDir::new().expect("create tempdir"); let repo_root = root.path().join("repo"); let nested = repo_root.join("nested").join("child"); @@ -82,6 +100,7 @@ fn detect_repo_lists_agents_md_for_each_cwd() { include_home: false, cwds: Some(vec![nested, repo_root.clone()]), }) + .await .expect("detect"); let expected = vec![ @@ -93,6 +112,7 @@ fn detect_repo_lists_agents_md_for_each_cwd() { repo_root.join("AGENTS.md").display(), ), cwd: Some(repo_root.clone()), + details: None, }, ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::AgentsMd, @@ -102,50 +122,138 @@ fn detect_repo_lists_agents_md_for_each_cwd() { repo_root.join("AGENTS.md").display(), ), cwd: Some(repo_root), + details: None, }, ]; assert_eq!(items, expected); } -#[test] -fn import_home_migrates_supported_config_fields_skills_and_agents_md() { - let (_root, claude_home, codex_home) = fixture_paths(); +#[tokio::test] +async fn detect_repo_still_reports_non_plugin_items_when_home_config_is_invalid() { + let root = TempDir::new().expect("create tempdir"); + let repo_root = root.path().join("repo"); + let codex_home = root.path().join(".codex"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude").join("skills").join("skill-a")) + .expect("create repo skills"); + fs::create_dir_all(&codex_home).expect("create codex home"); + fs::write(codex_home.join("config.toml"), "this is not valid = [toml") + .expect("write invalid codex config"); + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{"env":{"FOO":"bar"}}"#, + ) + .expect("write settings"); + fs::write( + repo_root + .join(".claude") + .join("skills") + .join("skill-a") + .join("SKILL.md"), + "Use Claude Code and CLAUDE utilities.", + ) + .expect("write skill"); + fs::write( + repo_root.join(".claude").join("CLAUDE.md"), + "Claude code guidance", + ) + .expect("write agents"); + + let items = service_for_paths(root.path().join(".claude"), codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: false, + cwds: Some(vec![repo_root.clone()]), + }) + .await + .expect("detect"); + + assert_eq!( + items, + vec![ + ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Config, + description: format!( + "Migrate {} into {}", + repo_root.join(".claude").join("settings.json").display(), + repo_root.join(".codex").join("config.toml").display() + ), + cwd: Some(repo_root.clone()), + details: None, + }, + ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Skills, + description: format!( + "Migrate skills from {} to {}", + repo_root.join(".claude").join("skills").display(), + repo_root.join(".agents").join("skills").display() + ), + cwd: Some(repo_root.clone()), + details: None, + }, + ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::AgentsMd, + description: format!( + "Import {} to {}", + repo_root.join(".claude").join("CLAUDE.md").display(), + repo_root.join("AGENTS.md").display(), + ), + cwd: Some(repo_root), + details: None, + }, + ] + ); +} + +#[tokio::test] +async fn import_home_migrates_supported_config_fields_skills_and_agents_md() { + let (_root, external_agent_home, codex_home) = fixture_paths(); let agents_skills = codex_home .parent() .map(|parent| parent.join(".agents").join("skills")) .unwrap_or_else(|| PathBuf::from(".agents").join("skills")); - fs::create_dir_all(claude_home.join("skills").join("skill-a")).expect("create skills"); + fs::create_dir_all(external_agent_home.join("skills").join("skill-a")).expect("create skills"); fs::write( - claude_home.join("settings.json"), + external_agent_home.join("settings.json"), r#"{"model":"claude","permissions":{"ask":["git push"]},"env":{"FOO":"bar","CI":false,"MAX_RETRIES":3,"MY_TEAM":"codex","IGNORED":null,"LIST":["a","b"],"MAP":{"x":1}},"sandbox":{"enabled":true,"network":{"allowLocalBinding":true}}}"#, ) .expect("write settings"); fs::write( - claude_home.join("skills").join("skill-a").join("SKILL.md"), + external_agent_home + .join("skills") + .join("skill-a") + .join("SKILL.md"), "Use Claude Code and CLAUDE utilities.", ) .expect("write skill"); - fs::write(claude_home.join("CLAUDE.md"), "Claude code guidance").expect("write agents"); + fs::write( + external_agent_home.join("CLAUDE.md"), + "Claude code guidance", + ) + .expect("write agents"); - service_for_paths(claude_home, codex_home.clone()) + service_for_paths(external_agent_home, codex_home.clone()) .import(vec![ ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::AgentsMd, description: String::new(), cwd: None, + details: None, }, ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::Config, description: String::new(), cwd: None, + details: None, }, ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::Skills, description: String::new(), cwd: None, + details: None, }, ]) + .await .expect("import"); assert_eq!( @@ -164,34 +272,36 @@ fn import_home_migrates_supported_config_fields_skills_and_agents_md() { ); } -#[test] -fn import_home_skips_empty_config_migration() { - let (_root, claude_home, codex_home) = fixture_paths(); - fs::create_dir_all(&claude_home).expect("create claude home"); +#[tokio::test] +async fn import_home_skips_empty_config_migration() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + fs::create_dir_all(&external_agent_home).expect("create external agent home"); fs::write( - claude_home.join("settings.json"), + external_agent_home.join("settings.json"), r#"{"model":"claude","sandbox":{"enabled":false}}"#, ) .expect("write settings"); - service_for_paths(claude_home, codex_home.clone()) + service_for_paths(external_agent_home, codex_home.clone()) .import(vec![ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::Config, description: String::new(), cwd: None, + details: None, }]) + .await .expect("import"); assert!(!codex_home.join("config.toml").exists()); } -#[test] -fn detect_home_skips_config_when_target_already_has_supported_fields() { - let (_root, claude_home, codex_home) = fixture_paths(); - fs::create_dir_all(&claude_home).expect("create claude home"); +#[tokio::test] +async fn detect_home_skips_config_when_target_already_has_supported_fields() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + fs::create_dir_all(&external_agent_home).expect("create external agent home"); fs::create_dir_all(&codex_home).expect("create codex home"); fs::write( - claude_home.join("settings.json"), + external_agent_home.join("settings.json"), r#"{"env":{"FOO":"bar"},"sandbox":{"enabled":true}}"#, ) .expect("write settings"); @@ -209,38 +319,40 @@ fn detect_home_skips_config_when_target_already_has_supported_fields() { ) .expect("write config"); - let items = service_for_paths(claude_home, codex_home) + let items = service_for_paths(external_agent_home, codex_home) .detect(ExternalAgentConfigDetectOptions { include_home: true, cwds: None, }) + .await .expect("detect"); assert_eq!(items, Vec::::new()); } -#[test] -fn detect_home_skips_skills_when_all_skill_directories_exist() { - let (_root, claude_home, codex_home) = fixture_paths(); +#[tokio::test] +async fn detect_home_skips_skills_when_all_skill_directories_exist() { + let (_root, external_agent_home, codex_home) = fixture_paths(); let agents_skills = codex_home .parent() .map(|parent| parent.join(".agents").join("skills")) .unwrap_or_else(|| PathBuf::from(".agents").join("skills")); - fs::create_dir_all(claude_home.join("skills").join("skill-a")).expect("create source"); + fs::create_dir_all(external_agent_home.join("skills").join("skill-a")).expect("create source"); fs::create_dir_all(agents_skills.join("skill-a")).expect("create target"); - let items = service_for_paths(claude_home, codex_home) + let items = service_for_paths(external_agent_home, codex_home) .detect(ExternalAgentConfigDetectOptions { include_home: true, cwds: None, }) + .await .expect("detect"); assert_eq!(items, Vec::::new()); } -#[test] -fn import_repo_agents_md_rewrites_terms_and_skips_non_empty_targets() { +#[tokio::test] +async fn import_repo_agents_md_rewrites_terms_and_skips_non_empty_targets() { let root = TempDir::new().expect("create tempdir"); let repo_root = root.path().join("repo-a"); let repo_with_existing_target = root.path().join("repo-b"); @@ -264,13 +376,16 @@ fn import_repo_agents_md_rewrites_terms_and_skips_non_empty_targets() { item_type: ExternalAgentConfigMigrationItemType::AgentsMd, description: String::new(), cwd: Some(repo_root.clone()), + details: None, }, ExternalAgentConfigMigrationItem { item_type: ExternalAgentConfigMigrationItemType::AgentsMd, description: String::new(), cwd: Some(repo_with_existing_target.clone()), + details: None, }, ]) + .await .expect("import"); assert_eq!( @@ -284,8 +399,8 @@ fn import_repo_agents_md_rewrites_terms_and_skips_non_empty_targets() { ); } -#[test] -fn import_repo_agents_md_overwrites_empty_targets() { +#[tokio::test] +async fn import_repo_agents_md_overwrites_empty_targets() { let root = TempDir::new().expect("create tempdir"); let repo_root = root.path().join("repo"); fs::create_dir_all(repo_root.join(".git")).expect("create git"); @@ -297,7 +412,9 @@ fn import_repo_agents_md_overwrites_empty_targets() { item_type: ExternalAgentConfigMigrationItemType::AgentsMd, description: String::new(), cwd: Some(repo_root.clone()), + details: None, }]) + .await .expect("import"); assert_eq!( @@ -306,8 +423,8 @@ fn import_repo_agents_md_overwrites_empty_targets() { ); } -#[test] -fn detect_repo_prefers_non_empty_dot_claude_agents_source() { +#[tokio::test] +async fn detect_repo_prefers_non_empty_external_agent_agents_source() { let root = TempDir::new().expect("create tempdir"); let repo_root = root.path().join("repo"); fs::create_dir_all(repo_root.join(".git")).expect("create git"); @@ -324,6 +441,7 @@ fn detect_repo_prefers_non_empty_dot_claude_agents_source() { include_home: false, cwds: Some(vec![repo_root.clone()]), }) + .await .expect("detect"); assert_eq!( @@ -336,12 +454,13 @@ fn detect_repo_prefers_non_empty_dot_claude_agents_source() { repo_root.join("AGENTS.md").display(), ), cwd: Some(repo_root), + details: None, }] ); } -#[test] -fn import_repo_uses_non_empty_dot_claude_agents_source() { +#[tokio::test] +async fn import_repo_uses_non_empty_external_agent_agents_source() { let root = TempDir::new().expect("create tempdir"); let repo_root = root.path().join("repo"); fs::create_dir_all(repo_root.join(".git")).expect("create git"); @@ -358,7 +477,9 @@ fn import_repo_uses_non_empty_dot_claude_agents_source() { item_type: ExternalAgentConfigMigrationItemType::AgentsMd, description: String::new(), cwd: Some(repo_root.clone()), + details: None, }]) + .await .expect("import"); assert_eq!( @@ -378,18 +499,922 @@ fn migration_metric_tags_for_skills_include_skills_count() { ); } +#[tokio::test] +async fn detect_home_lists_enabled_plugins_from_settings() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + fs::create_dir_all(&external_agent_home).expect("create external agent home"); + fs::write( + external_agent_home.join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true, + "deployer@acme-tools": true, + "analyzer@security-plugins": false + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "acme-corp/claude-plugins" + } + } + }"#, + ) + .expect("write settings"); + + let items = service_for_paths(external_agent_home.clone(), codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: true, + cwds: None, + }) + .await + .expect("detect"); + + assert_eq!( + items, + vec![ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: format!( + "Import enabled plugins from {}", + external_agent_home.join("settings.json").display() + ), + cwd: None, + details: Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "acme-tools".to_string(), + plugin_names: vec!["deployer".to_string(), "formatter".to_string()], + }], + }), + }] + ); +} + +#[tokio::test] +async fn detect_repo_skips_plugins_that_are_already_configured_in_codex() { + let root = TempDir::new().expect("create tempdir"); + let external_agent_home = root.path().join(".claude"); + let codex_home = root.path().join(".codex"); + let repo_root = root.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude")).expect("create repo external agent dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true, + "deployer@acme-tools": true + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "acme-corp/claude-plugins" + } + } + }"#, + ) + .expect("write repo settings"); + fs::write( + codex_home.join("config.toml"), + r#" +[plugins."formatter@acme-tools"] +enabled = true +"#, + ) + .expect("write codex config"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: false, + cwds: Some(vec![repo_root.clone()]), + }) + .await + .expect("detect"); + + assert_eq!( + items, + vec![ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: format!( + "Import enabled plugins from {}", + repo_root.join(".claude").join("settings.json").display() + ), + cwd: Some(repo_root), + details: Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "acme-tools".to_string(), + plugin_names: vec!["deployer".to_string()], + }], + }), + }] + ); +} + +#[tokio::test] +async fn detect_repo_skips_plugins_that_are_disabled_in_codex() { + let root = TempDir::new().expect("create tempdir"); + let external_agent_home = root.path().join(".claude"); + let codex_home = root.path().join(".codex"); + let repo_root = root.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude")).expect("create repo external agent dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "acme-corp/claude-plugins" + } + } + }"#, + ) + .expect("write repo settings"); + fs::write( + codex_home.join("config.toml"), + r#" +[plugins."formatter@acme-tools"] +enabled = false +"#, + ) + .expect("write codex config"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: false, + cwds: Some(vec![repo_root]), + }) + .await + .expect("detect"); + + assert_eq!(items, Vec::::new()); +} + +#[tokio::test] +async fn detect_repo_skips_plugins_without_explicit_enabled_in_codex() { + let root = TempDir::new().expect("create tempdir"); + let external_agent_home = root.path().join(".claude"); + let codex_home = root.path().join(".codex"); + let repo_root = root.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude")).expect("create repo external agent dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "acme-corp/claude-plugins" + } + } + }"#, + ) + .expect("write repo settings"); + fs::write( + codex_home.join("config.toml"), + r#" +[plugins."formatter@acme-tools"] +"#, + ) + .expect("write codex config"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: false, + cwds: Some(vec![repo_root]), + }) + .await + .expect("detect"); + + assert_eq!(items, Vec::::new()); +} + +#[tokio::test] +async fn import_plugins_requires_details() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + + let err = service_for_paths(external_agent_home, codex_home) + .import_plugins(/*cwd*/ None, /*details*/ None) + .await + .expect_err("expected missing details error"); + + assert_eq!(err.kind(), io::ErrorKind::InvalidData); + assert_eq!(err.to_string(), "plugins migration item is missing details"); +} + +#[tokio::test] +async fn detect_repo_does_not_skip_plugins_only_configured_in_project_codex() { + let root = TempDir::new().expect("create tempdir"); + let external_agent_home = root.path().join(".claude"); + let codex_home = root.path().join(".codex"); + let repo_root = root.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude")).expect("create repo external agent dir"); + fs::create_dir_all(repo_root.join(".codex")).expect("create repo codex dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "acme-corp/claude-plugins" + } + } + }"#, + ) + .expect("write repo settings"); + fs::write( + repo_root.join(".codex").join("config.toml"), + r#" +[plugins."formatter@acme-tools"] +enabled = true +"#, + ) + .expect("write project codex config"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: false, + cwds: Some(vec![repo_root.clone()]), + }) + .await + .expect("detect"); + + assert_eq!( + items, + vec![ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: format!( + "Import enabled plugins from {}", + repo_root.join(".claude").join("settings.json").display() + ), + cwd: Some(repo_root), + details: Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "acme-tools".to_string(), + plugin_names: vec!["formatter".to_string()], + }], + }), + }] + ); +} + +#[tokio::test] +async fn detect_home_skips_plugins_without_marketplace_source() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + fs::create_dir_all(&external_agent_home).expect("create external agent home"); + fs::write( + external_agent_home.join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true + } + }"#, + ) + .expect("write settings"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: true, + cwds: None, + }) + .await + .expect("detect"); + + assert_eq!(items, Vec::::new()); +} + +#[tokio::test] +async fn detect_home_skips_plugins_with_invalid_marketplace_source() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + fs::create_dir_all(&external_agent_home).expect("create external agent home"); + fs::write( + external_agent_home.join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "github" + } + } + }"#, + ) + .expect("write settings"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: true, + cwds: None, + }) + .await + .expect("detect"); + + assert_eq!(items, Vec::::new()); +} + +#[tokio::test] +async fn detect_repo_filters_plugins_against_installed_marketplace() { + let root = TempDir::new().expect("create tempdir"); + let external_agent_home = root.path().join(".claude"); + let codex_home = root.path().join(".codex"); + let repo_root = root.path().join("repo"); + let marketplace_root = codex_home.join(".tmp").join("marketplaces").join("debug"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude")).expect("create repo external agent dir"); + fs::create_dir_all(marketplace_root.join(".agents").join("plugins")) + .expect("create marketplace manifest dir"); + fs::create_dir_all( + marketplace_root + .join("plugins") + .join("sample") + .join(".codex-plugin"), + ) + .expect("create sample plugin"); + fs::create_dir_all( + marketplace_root + .join("plugins") + .join("available") + .join(".codex-plugin"), + ) + .expect("create available plugin"); + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{ + "enabledPlugins": { + "sample@debug": true, + "available@debug": true, + "missing@debug": true + }, + "extraKnownMarketplaces": { + "debug": { + "source": "owner/debug-marketplace" + } + } + }"#, + ) + .expect("write repo settings"); + fs::write( + codex_home.join("config.toml"), + r#" +[marketplaces.debug] +source_type = "git" +source = "owner/debug-marketplace" +"#, + ) + .expect("write codex config"); + fs::write( + marketplace_root + .join(".agents") + .join("plugins") + .join("marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample", + "source": { + "source": "local", + "path": "./plugins/sample" + }, + "policy": { + "installation": "NOT_AVAILABLE" + } + }, + { + "name": "available", + "source": { + "source": "local", + "path": "./plugins/available" + } + } + ] +}"#, + ) + .expect("write marketplace manifest"); + fs::write( + marketplace_root + .join("plugins") + .join("sample") + .join(".codex-plugin") + .join("plugin.json"), + r#"{"name":"sample"}"#, + ) + .expect("write sample plugin manifest"); + fs::write( + marketplace_root + .join("plugins") + .join("available") + .join(".codex-plugin") + .join("plugin.json"), + r#"{"name":"available"}"#, + ) + .expect("write available plugin manifest"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: false, + cwds: Some(vec![repo_root.clone()]), + }) + .await + .expect("detect"); + + assert_eq!( + items, + vec![ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: format!( + "Import enabled plugins from {}", + repo_root.join(".claude").join("settings.json").display() + ), + cwd: Some(repo_root), + details: Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "debug".to_string(), + plugin_names: vec!["available".to_string()], + }], + }), + }] + ); +} + +#[tokio::test] +async fn import_plugins_requires_source_marketplace_details() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + fs::create_dir_all(&external_agent_home).expect("create external agent home"); + fs::write( + external_agent_home.join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "github", + "repo": "acme-corp/claude-plugins" + } + } + }"#, + ) + .expect("write settings"); + + let outcome = service_for_paths(external_agent_home, codex_home) + .import_plugins( + /*cwd*/ None, + Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "other-tools".to_string(), + plugin_names: github_plugin_details().plugins[0].plugin_names.clone(), + }], + }), + ) + .await + .expect("import plugins"); + + assert_eq!( + outcome, + PluginImportOutcome { + succeeded_marketplaces: Vec::new(), + succeeded_plugin_ids: Vec::new(), + failed_marketplaces: vec!["other-tools".to_string()], + failed_plugin_ids: vec!["formatter@other-tools".to_string()], + } + ); +} + +#[tokio::test] +async fn import_plugins_defers_marketplace_source_validation_to_add_marketplace() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + fs::create_dir_all(&external_agent_home).expect("create external agent home"); + fs::write( + external_agent_home.join("settings.json"), + r#"{ + "enabledPlugins": { + "formatter@acme-tools": true + }, + "extraKnownMarketplaces": { + "acme-tools": { + "source": "local", + "path": "./external_plugins/acme-tools" + } + } + }"#, + ) + .expect("write settings"); + + let outcome = service_for_paths(external_agent_home, codex_home) + .import_plugins(/*cwd*/ None, Some(github_plugin_details())) + .await + .expect("import plugins"); + + assert_eq!( + outcome, + PluginImportOutcome { + succeeded_marketplaces: Vec::new(), + succeeded_plugin_ids: Vec::new(), + failed_marketplaces: vec!["acme-tools".to_string()], + failed_plugin_ids: vec!["formatter@acme-tools".to_string()], + } + ); +} + +#[tokio::test] +async fn import_plugins_supports_external_agent_plugin_marketplace_layout() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + let marketplace_root = external_agent_home.join("my-marketplace"); + let plugin_root = marketplace_root.join("plugins").join("cloudflare"); + fs::create_dir_all(marketplace_root.join(".claude-plugin")) + .expect("create marketplace manifest dir"); + fs::create_dir_all(plugin_root.join(".codex-plugin")).expect("create plugin manifest dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + + fs::write( + external_agent_home.join("settings.json"), + serde_json::to_string_pretty(&serde_json::json!({ + "enabledPlugins": { + "cloudflare@my-plugins": true + }, + "extraKnownMarketplaces": { + "my-plugins": { + "source": "local", + "path": marketplace_root + } + } + })) + .expect("serialize settings"), + ) + .expect("write settings"); + fs::write( + marketplace_root + .join(".claude-plugin") + .join("marketplace.json"), + r#"{ + "name": "my-plugins", + "plugins": [ + { + "name": "cloudflare", + "source": "./plugins/cloudflare" + } + ] + }"#, + ) + .expect("write marketplace manifest"); + fs::write( + plugin_root.join(".codex-plugin").join("plugin.json"), + r#"{"name":"cloudflare","version":"0.1.0"}"#, + ) + .expect("write plugin manifest"); + + let outcome = service_for_paths(external_agent_home, codex_home.clone()) + .import_plugins( + /*cwd*/ None, + Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "my-plugins".to_string(), + plugin_names: vec!["cloudflare".to_string()], + }], + }), + ) + .await + .expect("import plugins"); + + assert_eq!( + outcome, + PluginImportOutcome { + succeeded_marketplaces: vec!["my-plugins".to_string()], + succeeded_plugin_ids: vec!["cloudflare@my-plugins".to_string()], + failed_marketplaces: Vec::new(), + failed_plugin_ids: Vec::new(), + } + ); + let config = fs::read_to_string(codex_home.join("config.toml")).expect("read config"); + assert!(config.contains(r#"[plugins."cloudflare@my-plugins"]"#)); + assert!(config.contains("enabled = true")); +} + +#[tokio::test] +async fn detect_home_supports_relative_external_agent_plugin_marketplace_path() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + let marketplace_root = external_agent_home.join("my-marketplace"); + let plugin_root = marketplace_root.join("plugins").join("cloudflare"); + fs::create_dir_all(marketplace_root.join(".claude-plugin")) + .expect("create marketplace manifest dir"); + fs::create_dir_all(plugin_root.join(".codex-plugin")).expect("create plugin manifest dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + + fs::write( + external_agent_home.join("settings.json"), + r#"{ + "enabledPlugins": { + "cloudflare@my-plugins": true + }, + "extraKnownMarketplaces": { + "my-plugins": { + "source": "directory", + "path": "./my-marketplace" + } + } + }"#, + ) + .expect("write settings"); + fs::write( + marketplace_root + .join(".claude-plugin") + .join("marketplace.json"), + r#"{ + "name": "my-plugins", + "plugins": [ + { + "name": "cloudflare", + "source": "./plugins/cloudflare" + } + ] + }"#, + ) + .expect("write marketplace manifest"); + fs::write( + plugin_root.join(".codex-plugin").join("plugin.json"), + r#"{"name":"cloudflare","version":"0.1.0"}"#, + ) + .expect("write plugin manifest"); + + let items = service_for_paths(external_agent_home.clone(), codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: true, + cwds: None, + }) + .await + .expect("detect"); + + assert_eq!( + items, + vec![ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: format!( + "Import enabled plugins from {}", + external_agent_home.join("settings.json").display() + ), + cwd: None, + details: Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "my-plugins".to_string(), + plugin_names: vec!["cloudflare".to_string()], + }], + }), + }] + ); +} + +#[tokio::test] +async fn import_plugins_supports_relative_external_agent_plugin_marketplace_path() { + let (_root, external_agent_home, codex_home) = fixture_paths(); + let marketplace_root = external_agent_home.join("my-marketplace"); + let plugin_root = marketplace_root.join("plugins").join("cloudflare"); + fs::create_dir_all(marketplace_root.join(".claude-plugin")) + .expect("create marketplace manifest dir"); + fs::create_dir_all(plugin_root.join(".codex-plugin")).expect("create plugin manifest dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + + fs::write( + external_agent_home.join("settings.json"), + r#"{ + "enabledPlugins": { + "cloudflare@my-plugins": true + }, + "extraKnownMarketplaces": { + "my-plugins": { + "source": "directory", + "path": "./my-marketplace" + } + } + }"#, + ) + .expect("write settings"); + fs::write( + marketplace_root + .join(".claude-plugin") + .join("marketplace.json"), + r#"{ + "name": "my-plugins", + "plugins": [ + { + "name": "cloudflare", + "source": "./plugins/cloudflare" + } + ] + }"#, + ) + .expect("write marketplace manifest"); + fs::write( + plugin_root.join(".codex-plugin").join("plugin.json"), + r#"{"name":"cloudflare","version":"0.1.0"}"#, + ) + .expect("write plugin manifest"); + + let outcome = service_for_paths(external_agent_home, codex_home.clone()) + .import_plugins( + /*cwd*/ None, + Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "my-plugins".to_string(), + plugin_names: vec!["cloudflare".to_string()], + }], + }), + ) + .await + .expect("import plugins"); + + assert_eq!( + outcome, + PluginImportOutcome { + succeeded_marketplaces: vec!["my-plugins".to_string()], + succeeded_plugin_ids: vec!["cloudflare@my-plugins".to_string()], + failed_marketplaces: Vec::new(), + failed_plugin_ids: Vec::new(), + } + ); + let config = fs::read_to_string(codex_home.join("config.toml")).expect("read config"); + assert!(config.contains(r#"[plugins."cloudflare@my-plugins"]"#)); + assert!(config.contains("enabled = true")); +} + +#[tokio::test] +async fn detect_repo_supports_project_relative_external_agent_plugin_marketplace_path() { + let root = TempDir::new().expect("create tempdir"); + let external_agent_home = root.path().join(".claude"); + let codex_home = root.path().join(".codex"); + let repo_root = root.path().join("repo"); + let marketplace_root = repo_root.join("my-marketplace"); + let plugin_root = marketplace_root.join("plugins").join("cloudflare"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude")).expect("create repo external agent dir"); + fs::create_dir_all(marketplace_root.join(".claude-plugin")) + .expect("create marketplace manifest dir"); + fs::create_dir_all(plugin_root.join(".codex-plugin")).expect("create plugin manifest dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{ + "enabledPlugins": { + "cloudflare@my-plugins": true + }, + "extraKnownMarketplaces": { + "my-plugins": { + "source": "directory", + "path": "./my-marketplace" + } + } + }"#, + ) + .expect("write settings"); + fs::write( + marketplace_root + .join(".claude-plugin") + .join("marketplace.json"), + r#"{ + "name": "my-plugins", + "plugins": [ + { + "name": "cloudflare", + "source": "./plugins/cloudflare" + } + ] + }"#, + ) + .expect("write marketplace manifest"); + fs::write( + plugin_root.join(".codex-plugin").join("plugin.json"), + r#"{"name":"cloudflare","version":"0.1.0"}"#, + ) + .expect("write plugin manifest"); + + let items = service_for_paths(external_agent_home, codex_home) + .detect(ExternalAgentConfigDetectOptions { + include_home: false, + cwds: Some(vec![repo_root.clone()]), + }) + .await + .expect("detect"); + + assert_eq!( + items, + vec![ExternalAgentConfigMigrationItem { + item_type: ExternalAgentConfigMigrationItemType::Plugins, + description: format!( + "Import enabled plugins from {}", + repo_root.join(".claude").join("settings.json").display() + ), + cwd: Some(repo_root), + details: Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "my-plugins".to_string(), + plugin_names: vec!["cloudflare".to_string()], + }], + }), + }] + ); +} + +#[tokio::test] +async fn import_plugins_supports_project_relative_external_agent_plugin_marketplace_path() { + let root = TempDir::new().expect("create tempdir"); + let external_agent_home = root.path().join(".claude"); + let codex_home = root.path().join(".codex"); + let repo_root = root.path().join("repo"); + let marketplace_root = repo_root.join("my-marketplace"); + let plugin_root = marketplace_root.join("plugins").join("cloudflare"); + fs::create_dir_all(repo_root.join(".git")).expect("create git dir"); + fs::create_dir_all(repo_root.join(".claude")).expect("create repo external agent dir"); + fs::create_dir_all(marketplace_root.join(".claude-plugin")) + .expect("create marketplace manifest dir"); + fs::create_dir_all(plugin_root.join(".codex-plugin")).expect("create plugin manifest dir"); + fs::create_dir_all(&codex_home).expect("create codex home"); + + fs::write( + repo_root.join(".claude").join("settings.json"), + r#"{ + "enabledPlugins": { + "cloudflare@my-plugins": true + }, + "extraKnownMarketplaces": { + "my-plugins": { + "source": "directory", + "path": "./my-marketplace" + } + } + }"#, + ) + .expect("write settings"); + fs::write( + marketplace_root + .join(".claude-plugin") + .join("marketplace.json"), + r#"{ + "name": "my-plugins", + "plugins": [ + { + "name": "cloudflare", + "source": "./plugins/cloudflare" + } + ] + }"#, + ) + .expect("write marketplace manifest"); + fs::write( + plugin_root.join(".codex-plugin").join("plugin.json"), + r#"{"name":"cloudflare","version":"0.1.0"}"#, + ) + .expect("write plugin manifest"); + + let outcome = service_for_paths(external_agent_home, codex_home.clone()) + .import_plugins( + Some(repo_root.as_path()), + Some(MigrationDetails { + plugins: vec![PluginsMigration { + marketplace_name: "my-plugins".to_string(), + plugin_names: vec!["cloudflare".to_string()], + }], + }), + ) + .await + .expect("import plugins"); + + assert_eq!( + outcome, + PluginImportOutcome { + succeeded_marketplaces: vec!["my-plugins".to_string()], + succeeded_plugin_ids: vec!["cloudflare@my-plugins".to_string()], + failed_marketplaces: Vec::new(), + failed_plugin_ids: Vec::new(), + } + ); + let config = fs::read_to_string(codex_home.join("config.toml")).expect("read config"); + assert!(config.contains(r#"[plugins."cloudflare@my-plugins"]"#)); + assert!(config.contains("enabled = true")); +} + #[test] fn import_skills_returns_only_new_skill_directory_count() { - let (_root, claude_home, codex_home) = fixture_paths(); + let (_root, external_agent_home, codex_home) = fixture_paths(); let agents_skills = codex_home .parent() .map(|parent| parent.join(".agents").join("skills")) .unwrap_or_else(|| PathBuf::from(".agents").join("skills")); - fs::create_dir_all(claude_home.join("skills").join("skill-a")).expect("create source a"); - fs::create_dir_all(claude_home.join("skills").join("skill-b")).expect("create source b"); + fs::create_dir_all(external_agent_home.join("skills").join("skill-a")) + .expect("create source a"); + fs::create_dir_all(external_agent_home.join("skills").join("skill-b")) + .expect("create source b"); fs::create_dir_all(agents_skills.join("skill-a")).expect("create existing target"); - let copied_count = service_for_paths(claude_home, codex_home) + let copied_count = service_for_paths(external_agent_home, codex_home) .import_skills(/*cwd*/ None) .expect("import skills"); diff --git a/codex-rs/core/src/file_watcher.rs b/codex-rs/core/src/file_watcher.rs index f8f0e4b11f..bfa01f8c9e 100644 --- a/codex-rs/core/src/file_watcher.rs +++ b/codex-rs/core/src/file_watcher.rs @@ -252,6 +252,16 @@ pub struct WatchRegistration { watched_paths: Vec, } +impl Default for WatchRegistration { + fn default() -> Self { + Self { + file_watcher: std::sync::Weak::new(), + subscriber_id: 0, + watched_paths: Vec::new(), + } + } +} + impl Drop for WatchRegistration { fn drop(&mut self) { if let Some(file_watcher) = self.file_watcher.upgrade() { diff --git a/codex-rs/core/src/git_info_tests.rs b/codex-rs/core/src/git_info_tests.rs index 4cb50f02fd..0ffdd1c4df 100644 --- a/codex-rs/core/src/git_info_tests.rs +++ b/codex-rs/core/src/git_info_tests.rs @@ -1,3 +1,4 @@ +use codex_exec_server::LOCAL_FS; use codex_git_utils::GitInfo; use codex_git_utils::GitSha; use codex_git_utils::collect_git_info; @@ -5,6 +6,9 @@ use codex_git_utils::get_has_changes; use codex_git_utils::git_diff_to_remote; use codex_git_utils::recent_commits; use codex_git_utils::resolve_root_git_project_for_trust; +use codex_utils_path::normalize_for_path_comparison; +use core_test_support::PathBufExt; +use core_test_support::PathExt; use core_test_support::skip_if_sandbox; use std::fs; use std::path::PathBuf; @@ -426,25 +430,31 @@ async fn test_get_git_working_tree_state_branch_fallback() { assert_eq!(state.sha, GitSha::new(&remote_sha)); } -#[test] -fn resolve_root_git_project_for_trust_returns_none_outside_repo() { +#[tokio::test] +async fn resolve_root_git_project_for_trust_returns_none_outside_repo() { let tmp = TempDir::new().expect("tempdir"); - assert!(resolve_root_git_project_for_trust(tmp.path()).is_none()); + assert!( + resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &tmp.path().abs()) + .await + .is_none() + ); } #[tokio::test] async fn resolve_root_git_project_for_trust_regular_repo_returns_repo_root() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); - let repo_path = create_test_git_repo(&temp_dir).await; - let expected = std::fs::canonicalize(&repo_path).unwrap(); + let repo_path = create_test_git_repo(&temp_dir).await.abs(); assert_eq!( - resolve_root_git_project_for_trust(&repo_path), - Some(expected.clone()) + resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &repo_path).await, + Some(repo_path.clone()) ); let nested = repo_path.join("sub/dir"); - std::fs::create_dir_all(&nested).unwrap(); - assert_eq!(resolve_root_git_project_for_trust(&nested), Some(expected)); + std::fs::create_dir_all(nested.as_path()).unwrap(); + assert_eq!( + resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &nested).await, + Some(repo_path) + ); } #[tokio::test] @@ -466,19 +476,31 @@ async fn resolve_root_git_project_for_trust_detects_worktree_and_returns_main_ro .output() .expect("git worktree add"); - let expected = std::fs::canonicalize(&repo_path).ok(); - let got = - resolve_root_git_project_for_trust(&wt_root).and_then(|p| std::fs::canonicalize(p).ok()); - assert_eq!(got, expected); + let expected = normalize_for_path_comparison(&repo_path).unwrap(); + let wt_root = wt_root.abs(); + let got = resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &wt_root).await; + assert_eq!( + got.as_ref() + .map(normalize_for_path_comparison) + .transpose() + .unwrap(), + Some(expected.clone()) + ); let nested = wt_root.join("nested/sub"); - std::fs::create_dir_all(&nested).unwrap(); - let got_nested = - resolve_root_git_project_for_trust(&nested).and_then(|p| std::fs::canonicalize(p).ok()); - assert_eq!(got_nested, expected); + std::fs::create_dir_all(nested.as_path()).unwrap(); + let got_nested = resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &nested).await; + assert_eq!( + got_nested + .as_ref() + .map(normalize_for_path_comparison) + .transpose() + .unwrap(), + Some(expected) + ); } -#[test] -fn resolve_root_git_project_for_trust_detects_worktree_pointer_without_git_command() { +#[tokio::test] +async fn resolve_root_git_project_for_trust_detects_worktree_pointer_without_git_command() { let tmp = TempDir::new().expect("tempdir"); let repo_root = tmp.path().join("repo"); let common_dir = repo_root.join(".git"); @@ -493,19 +515,21 @@ fn resolve_root_git_project_for_trust_detects_worktree_pointer_without_git_comma ) .unwrap(); - let expected = std::fs::canonicalize(&repo_root).unwrap(); + let expected = repo_root.abs(); + let worktree_root = worktree_root.abs(); assert_eq!( - resolve_root_git_project_for_trust(&worktree_root), + resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &worktree_root).await, Some(expected.clone()) ); + let nested = worktree_root.join("nested"); assert_eq!( - resolve_root_git_project_for_trust(&worktree_root.join("nested")), + resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &nested).await, Some(expected) ); } -#[test] -fn resolve_root_git_project_for_trust_non_worktrees_gitdir_returns_none() { +#[tokio::test] +async fn resolve_root_git_project_for_trust_non_worktrees_gitdir_returns_none() { let tmp = TempDir::new().expect("tempdir"); let proj = tmp.path().join("proj"); std::fs::create_dir_all(proj.join("nested")).unwrap(); @@ -520,8 +544,18 @@ fn resolve_root_git_project_for_trust_non_worktrees_gitdir_returns_none() { ) .unwrap(); - assert!(resolve_root_git_project_for_trust(&proj).is_none()); - assert!(resolve_root_git_project_for_trust(&proj.join("nested")).is_none()); + let proj = proj.abs(); + assert!( + resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &proj) + .await + .is_none() + ); + let nested = proj.join("nested"); + assert!( + resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &nested) + .await + .is_none() + ); } #[tokio::test] diff --git a/codex-rs/core/src/guardian/approval_request.rs b/codex-rs/core/src/guardian/approval_request.rs index c9cc9d9fa4..6d1d3f76af 100644 --- a/codex-rs/core/src/guardian/approval_request.rs +++ b/codex-rs/core/src/guardian/approval_request.rs @@ -1,5 +1,4 @@ use std::path::Path; -use std::path::PathBuf; use codex_protocol::approvals::GuardianAssessmentAction; use codex_protocol::approvals::GuardianCommandSource; @@ -17,7 +16,7 @@ pub(crate) enum GuardianApprovalRequest { Shell { id: String, command: Vec, - cwd: PathBuf, + cwd: AbsolutePathBuf, sandbox_permissions: crate::sandboxing::SandboxPermissions, additional_permissions: Option, justification: Option, @@ -25,7 +24,7 @@ pub(crate) enum GuardianApprovalRequest { ExecCommand { id: String, command: Vec, - cwd: PathBuf, + cwd: AbsolutePathBuf, sandbox_permissions: crate::sandboxing::SandboxPermissions, additional_permissions: Option, justification: Option, @@ -37,12 +36,12 @@ pub(crate) enum GuardianApprovalRequest { source: GuardianCommandSource, program: String, argv: Vec, - cwd: PathBuf, + cwd: AbsolutePathBuf, additional_permissions: Option, }, ApplyPatch { id: String, - cwd: PathBuf, + cwd: AbsolutePathBuf, files: Vec, patch: String, }, @@ -151,12 +150,12 @@ fn serialize_command_guardian_action( fn command_assessment_action( source: GuardianCommandSource, command: &[String], - cwd: &Path, + cwd: &AbsolutePathBuf, ) -> GuardianAssessmentAction { GuardianAssessmentAction::Command { source, command: codex_shell_command::parse_command::shlex_join(command), - cwd: cwd.to_path_buf(), + cwd: cwd.clone(), } } @@ -323,10 +322,7 @@ pub(crate) fn guardian_assessment_action( GuardianApprovalRequest::ApplyPatch { cwd, files, .. } => { GuardianAssessmentAction::ApplyPatch { cwd: cwd.clone(), - files: files - .iter() - .map(codex_utils_absolute_path::AbsolutePathBuf::to_path_buf) - .collect(), + files: files.clone(), } } GuardianApprovalRequest::NetworkAccess { diff --git a/codex-rs/core/src/guardian/mod.rs b/codex-rs/core/src/guardian/mod.rs index 67e9a828ee..4fa150a232 100644 --- a/codex-rs/core/src/guardian/mod.rs +++ b/codex-rs/core/src/guardian/mod.rs @@ -34,7 +34,7 @@ pub(crate) use review::review_approval_request_with_cancel; pub(crate) use review::routes_approval_to_guardian; pub(crate) use review_session::GuardianReviewSessionManager; -const GUARDIAN_PREFERRED_MODEL: &str = "gpt-5.4"; +const GUARDIAN_PREFERRED_MODEL: &str = "codex-auto-review"; pub(crate) const GUARDIAN_REVIEW_TIMEOUT: Duration = Duration::from_secs(90); pub(crate) const GUARDIAN_REVIEWER_NAME: &str = "guardian"; const GUARDIAN_MAX_MESSAGE_TRANSCRIPT_TOKENS: usize = 10_000; diff --git a/codex-rs/core/src/guardian/review.rs b/codex-rs/core/src/guardian/review.rs index 861d3576cd..18107f7377 100644 --- a/codex-rs/core/src/guardian/review.rs +++ b/codex-rs/core/src/guardian/review.rs @@ -163,14 +163,14 @@ async fn run_guardian_review( let schema = guardian_output_schema(); let terminal_action = action_summary.clone(); - let outcome = run_guardian_review_session( + let outcome = Box::pin(run_guardian_review_session( session.clone(), turn.clone(), request, retry_reason, schema, external_cancel, - ) + )) .await; let assessment = match outcome { @@ -303,14 +303,16 @@ pub(crate) async fn review_approval_request( request: GuardianApprovalRequest, retry_reason: Option, ) -> ReviewDecision { - run_guardian_review( + // Box the delegated review future so callers do not inline the entire + // guardian session state machine into their own async stack. + Box::pin(run_guardian_review( Arc::clone(session), Arc::clone(turn), review_id, request, retry_reason, /*external_cancel*/ None, - ) + )) .await } @@ -322,14 +324,14 @@ pub(crate) async fn review_approval_request_with_cancel( retry_reason: Option, cancel_token: CancellationToken, ) -> ReviewDecision { - run_guardian_review( + Box::pin(run_guardian_review( Arc::clone(session), Arc::clone(turn), review_id, request, retry_reason, Some(cancel_token), - ) + )) .await } @@ -411,22 +413,24 @@ pub(super) async fn run_guardian_review_session( Err(err) => return GuardianReviewOutcome::Completed(Err(err)), }; - match session - .guardian_review_session - .run_review(GuardianReviewSessionParams { - parent_session: Arc::clone(&session), - parent_turn: turn.clone(), - spawn_config: guardian_config, - request, - retry_reason, - schema, - model: guardian_model, - reasoning_effort: guardian_reasoning_effort, - reasoning_summary: turn.reasoning_summary, - personality: turn.personality, - external_cancel, - }) - .await + match Box::pin( + session + .guardian_review_session + .run_review(GuardianReviewSessionParams { + parent_session: Arc::clone(&session), + parent_turn: turn.clone(), + spawn_config: guardian_config, + request, + retry_reason, + schema, + model: guardian_model, + reasoning_effort: guardian_reasoning_effort, + reasoning_summary: turn.reasoning_summary, + personality: turn.personality, + external_cancel, + }), + ) + .await { GuardianReviewSessionOutcome::Completed(Ok(last_agent_message)) => { GuardianReviewOutcome::Completed(parse_guardian_assessment( diff --git a/codex-rs/core/src/guardian/review_session.rs b/codex-rs/core/src/guardian/review_session.rs index f6cd4d02b2..f1c6e1e054 100644 --- a/codex-rs/core/src/guardian/review_session.rs +++ b/codex-rs/core/src/guardian/review_session.rs @@ -35,6 +35,7 @@ use crate::rollout::recorder::RolloutRecorder; use codex_config::types::McpServerConfig; use codex_features::Feature; use codex_model_provider_info::ModelProviderInfo; +use codex_utils_absolute_path::AbsolutePathBuf; use super::GUARDIAN_REVIEW_TIMEOUT; use super::GUARDIAN_REVIEWER_NAME; @@ -129,7 +130,7 @@ struct GuardianReviewSessionReuseKey { base_instructions: Option, user_instructions: Option, compact_prompt: Option, - cwd: PathBuf, + cwd: AbsolutePathBuf, mcp_servers: Constrained>, codex_linux_sandbox_exe: Option, main_execve_wrapper_exe: Option, @@ -156,7 +157,7 @@ impl GuardianReviewSessionReuseKey { base_instructions: spawn_config.base_instructions.clone(), user_instructions: spawn_config.user_instructions.clone(), compact_prompt: spawn_config.compact_prompt.clone(), - cwd: spawn_config.cwd.to_path_buf(), + cwd: spawn_config.cwd.clone(), mcp_servers: spawn_config.mcp_servers.clone(), codex_linux_sandbox_exe: spawn_config.codex_linux_sandbox_exe.clone(), main_execve_wrapper_exe: spawn_config.main_execve_wrapper_exe.clone(), @@ -326,32 +327,30 @@ impl GuardianReviewSessionManager { }; if trunk.reuse_key != next_reuse_key { - return self - .run_ephemeral_review( - params, - next_reuse_key, - deadline, - /*fork_snapshot*/ None, - ) - .await; + return Box::pin(self.run_ephemeral_review( + params, + next_reuse_key, + deadline, + /*fork_snapshot*/ None, + )) + .await; } let trunk_guard = match trunk.review_lock.try_lock() { Ok(trunk_guard) => trunk_guard, Err(_) => { - return self - .run_ephemeral_review( - params, - next_reuse_key, - deadline, - trunk.fork_snapshot().await, - ) - .await; + return Box::pin(self.run_ephemeral_review( + params, + next_reuse_key, + deadline, + trunk.fork_snapshot().await, + )) + .await; } }; let (outcome, keep_review_session) = - run_review_on_session(trunk.as_ref(), ¶ms, deadline).await; + Box::pin(run_review_on_session(trunk.as_ref(), ¶ms, deadline)).await; if keep_review_session && matches!(outcome, GuardianReviewSessionOutcome::Completed(_)) { trunk.refresh_last_committed_fork_snapshot().await; } @@ -487,7 +486,12 @@ impl GuardianReviewSessionManager { let mut cleanup = EphemeralReviewCleanup::new(Arc::clone(&self.state), Arc::clone(&review_session)); - let (outcome, _) = run_review_on_session(review_session.as_ref(), ¶ms, deadline).await; + let (outcome, _) = Box::pin(run_review_on_session( + review_session.as_ref(), + ¶ms, + deadline, + )) + .await; if let Some(review_session) = self.take_active_ephemeral(&review_session).await { cleanup.disarm(); review_session.shutdown_in_background(); @@ -511,7 +515,7 @@ async fn spawn_guardian_review_session( ), None => (None, 0, None), }; - let codex = run_codex_thread_interactive( + let codex = Box::pin(run_codex_thread_interactive( spawn_config, params.parent_session.services.auth_manager.clone(), params.parent_session.services.models_manager.clone(), @@ -520,7 +524,7 @@ async fn spawn_guardian_review_session( cancel_token.clone(), SubAgentSource::Other(GUARDIAN_REVIEWER_NAME.to_string()), initial_history, - ) + )) .await?; Ok(GuardianReviewSession { @@ -744,6 +748,7 @@ pub(crate) fn build_guardian_review_session_config( for feature in [ Feature::SpawnCsv, Feature::Collab, + Feature::CodexHooks, Feature::WebSearchRequest, Feature::WebSearchCached, ] { @@ -818,9 +823,9 @@ async fn interrupt_and_drain_turn(codex: &Codex) -> anyhow::Result<()> { mod tests { use super::*; - #[test] - fn guardian_review_session_config_change_invalidates_cached_session() { - let parent_config = crate::config::test_config(); + #[tokio::test] + async fn guardian_review_session_config_change_invalidates_cached_session() { + let parent_config = crate::config::test_config().await; let cached_spawn_config = build_guardian_review_session_config( &parent_config, /*live_network_config*/ None, @@ -850,6 +855,25 @@ mod tests { ); } + #[tokio::test] + async fn guardian_review_session_config_disables_hooks() { + let mut parent_config = crate::config::test_config().await; + parent_config + .features + .enable(Feature::CodexHooks) + .expect("enable hooks on parent config"); + + let guardian_config = build_guardian_review_session_config( + &parent_config, + /*live_network_config*/ None, + "active-model", + /*reasoning_effort*/ None, + ) + .expect("guardian config"); + + assert!(!guardian_config.features.enabled(Feature::CodexHooks)); + } + #[tokio::test(flavor = "current_thread")] async fn run_before_review_deadline_times_out_before_future_completes() { let outcome = run_before_review_deadline( diff --git a/codex-rs/core/src/guardian/tests.rs b/codex-rs/core/src/guardian/tests.rs index 518cdd8565..2712a94039 100644 --- a/codex-rs/core/src/guardian/tests.rs +++ b/codex-rs/core/src/guardian/tests.rs @@ -16,6 +16,7 @@ use crate::config_loader::RequirementSource; use crate::config_loader::Sourced; use crate::test_support; use codex_config::config_toml::ConfigToml; +use codex_exec_server::LOCAL_FS; use codex_network_proxy::NetworkProxyConfig; use codex_protocol::ThreadId; use codex_protocol::approvals::NetworkApprovalProtocol; @@ -45,11 +46,11 @@ use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::streaming_sse::StreamingSseChunk; use core_test_support::streaming_sse::start_streaming_sse_server; +use core_test_support::test_path_buf; use insta::Settings; use insta::assert_snapshot; use pretty_assertions::assert_eq; use std::collections::BTreeMap; -use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use tempfile::TempDir; @@ -76,7 +77,7 @@ async fn guardian_test_session_and_turn_with_base_url( config.user_instructions = None; let config = Arc::new(config); let models_manager = Arc::new(test_support::models_manager_with_provider( - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::clone(&session.services.auth_manager), config.model_provider.clone(), )); @@ -154,6 +155,20 @@ fn guardian_snapshot_options() -> ContextSnapshotOptions { .strip_agents_md_user_context() } +fn normalize_guardian_snapshot_paths(text: String) -> String { + let platform_path = test_path_buf("/repo/codex-rs/core").display().to_string(); + if platform_path == "/repo/codex-rs/core" { + return text; + } + + let escaped_platform_path = serde_json::to_string(&platform_path) + .expect("test path should serialize") + .trim_matches('"') + .to_string(); + text.replace(&escaped_platform_path, "/repo/codex-rs/core") + .replace(&platform_path, "/repo/codex-rs/core") +} + fn guardian_prompt_text(items: &[codex_protocol::user_input::UserInput]) -> String { items .iter() @@ -219,7 +234,7 @@ async fn build_guardian_prompt_full_mode_preserves_initial_review_format() -> an GuardianApprovalRequest::Shell { id: "shell-1".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the reviewed docs fix.".to_string()), @@ -275,7 +290,7 @@ async fn build_guardian_prompt_delta_mode_preserves_original_numbering() -> anyh GuardianApprovalRequest::Shell { id: "shell-2".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the second docs fix.".to_string()), @@ -313,7 +328,7 @@ async fn build_guardian_prompt_delta_mode_handles_empty_delta() -> anyhow::Resul GuardianApprovalRequest::Shell { id: "shell-2".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the second docs fix.".to_string()), @@ -348,7 +363,7 @@ async fn build_guardian_prompt_stale_delta_cursor_falls_back_to_full_prompt() -> GuardianApprovalRequest::Shell { id: "shell-3".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the docs fix.".to_string()), @@ -433,7 +448,7 @@ async fn build_guardian_prompt_stale_delta_version_falls_back_to_full_prompt() - GuardianApprovalRequest::Shell { id: "shell-4".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push after the compaction.".to_string()), @@ -565,7 +580,7 @@ fn format_guardian_action_pretty_truncates_large_string_fields() -> serde_json:: let patch = "line\n".repeat(100_000); let action = GuardianApprovalRequest::ApplyPatch { id: "patch-1".to_string(), - cwd: PathBuf::from("/tmp"), + cwd: test_path_buf("/tmp").abs(), files: Vec::new(), patch: patch.clone(), }; @@ -621,13 +636,8 @@ fn guardian_approval_request_to_json_renders_mcp_tool_call_shape() -> serde_json #[test] fn guardian_assessment_action_redacts_apply_patch_patch_text() { - let (cwd, file) = if cfg!(windows) { - (r"C:\tmp", r"C:\tmp\guardian.txt") - } else { - ("/tmp", "/tmp/guardian.txt") - }; - let cwd = PathBuf::from(cwd); - let file = PathBuf::from(file).abs(); + let cwd = test_path_buf("/tmp").abs(); + let file = test_path_buf("/tmp/guardian.txt").abs(); let action = GuardianApprovalRequest::ApplyPatch { id: "patch-1".to_string(), cwd: cwd.clone(), @@ -658,8 +668,8 @@ fn guardian_request_turn_id_prefers_network_access_owner_turn() { }; let apply_patch = GuardianApprovalRequest::ApplyPatch { id: "patch-1".to_string(), - cwd: PathBuf::from("/tmp"), - files: vec![PathBuf::from("/tmp/guardian.txt").abs()], + cwd: test_path_buf("/tmp").abs(), + files: vec![test_path_buf("/tmp/guardian.txt").abs()], patch: "*** Begin Patch\n*** Update File: guardian.txt\n@@\n+hello\n*** End Patch" .to_string(), }; @@ -686,8 +696,8 @@ async fn cancelled_guardian_review_emits_terminal_abort_without_warning() { "review-cancelled-guardian".to_string(), GuardianApprovalRequest::ApplyPatch { id: "patch-1".to_string(), - cwd: PathBuf::from("/tmp"), - files: vec![PathBuf::from("/tmp/guardian.txt").abs()], + cwd: test_path_buf("/tmp").abs(), + files: vec![test_path_buf("/tmp/guardian.txt").abs()], patch: "*** Begin Patch\n*** Update File: guardian.txt\n@@\n+hello\n*** End Patch" .to_string(), }, @@ -873,7 +883,7 @@ async fn guardian_review_request_layout_matches_model_visible_request_snapshot() config.model_provider.base_url = Some(format!("{}/v1", server.uri())); let config = Arc::new(config); let models_manager = Arc::new(test_support::models_manager_with_provider( - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::clone(&session.services.auth_manager), config.model_provider.clone(), )); @@ -892,7 +902,7 @@ async fn guardian_review_request_layout_matches_model_visible_request_snapshot() "origin".to_string(), "guardian-approval-mvp".to_string(), ], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the reviewed docs fix to the repo remote.".to_string()), @@ -919,11 +929,11 @@ async fn guardian_review_request_layout_matches_model_visible_request_snapshot() settings.bind(|| { assert_snapshot!( "codex_core__guardian__tests__guardian_review_request_layout", - context_snapshot::format_labeled_requests_snapshot( + normalize_guardian_snapshot_paths(context_snapshot::format_labeled_requests_snapshot( "Guardian review request layout", &[("Guardian Review Request", &request)], &guardian_snapshot_options(), - ) + )) ); }); @@ -939,7 +949,7 @@ async fn build_guardian_prompt_items_includes_parent_session_id() -> anyhow::Res GuardianApprovalRequest::Shell { id: "shell-1".to_string(), command: vec!["git".to_string(), "status".to_string()], - cwd: PathBuf::from("/repo"), + cwd: test_path_buf("/repo").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: None, @@ -1013,7 +1023,7 @@ async fn guardian_reuses_prompt_cache_key_and_appends_prior_reviews() -> anyhow: let first_request = GuardianApprovalRequest::Shell { id: "shell-1".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the first docs fix.".to_string()), @@ -1059,7 +1069,7 @@ async fn guardian_reuses_prompt_cache_key_and_appends_prior_reviews() -> anyhow: "push".to_string(), "--force-with-lease".to_string(), ], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the second docs fix.".to_string()), @@ -1101,7 +1111,7 @@ async fn guardian_reuses_prompt_cache_key_and_appends_prior_reviews() -> anyhow: let third_request = GuardianApprovalRequest::Shell { id: "shell-3".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the third docs fix.".to_string()), @@ -1197,13 +1207,15 @@ async fn guardian_reuses_prompt_cache_key_and_appends_prior_reviews() -> anyhow: "codex_core__guardian__tests__guardian_followup_review_request_layout", format!( "{}\n\nshared_prompt_cache_key: {}\nfollowup_contains_first_rationale: {}", - context_snapshot::format_labeled_requests_snapshot( - "Guardian follow-up review request layout", - &[ - ("Initial Guardian Review Request", &requests[0]), - ("Follow-up Guardian Review Request", &requests[1]), - ], - &guardian_snapshot_options(), + normalize_guardian_snapshot_paths( + context_snapshot::format_labeled_requests_snapshot( + "Guardian follow-up review request layout", + &[ + ("Initial Guardian Review Request", &requests[0]), + ("Follow-up Guardian Review Request", &requests[1]), + ], + &guardian_snapshot_options(), + ) ), first_body["prompt_cache_key"] == second_body["prompt_cache_key"], second_body.to_string().contains(first_rationale), @@ -1239,7 +1251,7 @@ async fn guardian_review_surfaces_responses_api_errors_in_rejection_reason() -> config.user_instructions = None; let config = Arc::new(config); let models_manager = Arc::new(test_support::models_manager_with_provider( - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::clone(&session.services.auth_manager), config.model_provider.clone(), )); @@ -1261,7 +1273,7 @@ async fn guardian_review_surfaces_responses_api_errors_in_rejection_reason() -> GuardianApprovalRequest::Shell { id: "shell-guardian-error".to_string(), command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), + cwd: test_path_buf("/repo/codex-rs/core").abs(), sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, additional_permissions: None, justification: Some("Need to push the reviewed docs fix.".to_string()), @@ -1321,226 +1333,246 @@ async fn guardian_review_surfaces_responses_api_errors_in_rejection_reason() -> Ok(()) } -#[tokio::test(flavor = "current_thread")] +#[tokio::test] async fn guardian_parallel_reviews_fork_from_last_committed_trunk_history() -> anyhow::Result<()> { - let first_assessment = serde_json::json!({ - "risk_level": "low", - "user_authorization": "high", - "outcome": "allow", - "rationale": "first guardian rationale", - }) - .to_string(); - let second_assessment = serde_json::json!({ - "risk_level": "low", - "user_authorization": "high", - "outcome": "allow", - "rationale": "second guardian rationale", - }) - .to_string(); - let third_assessment = serde_json::json!({ - "risk_level": "low", - "user_authorization": "high", - "outcome": "allow", - "rationale": "third guardian rationale", - }) - .to_string(); - let (gate_tx, gate_rx) = tokio::sync::oneshot::channel(); - let (server, _) = start_streaming_sse_server(vec![ - vec![StreamingSseChunk { - gate: None, - body: sse(vec![ - ev_response_created("resp-guardian-1"), - ev_assistant_message("msg-guardian-1", &first_assessment), - ev_completed("resp-guardian-1"), - ]), - }], - vec![ - StreamingSseChunk { + const TEST_STACK_SIZE_BYTES: usize = 2 * 1024 * 1024; + + let handle = + std::thread::Builder::new() + .name("guardian_parallel_reviews_fork_from_last_committed_trunk_history".to_string()) + .stack_size(TEST_STACK_SIZE_BYTES) + .spawn(|| -> anyhow::Result<()> { + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build()?; + runtime.block_on(Box::pin(async { + let first_assessment = serde_json::json!({ + "risk_level": "low", + "user_authorization": "high", + "outcome": "allow", + "rationale": "first guardian rationale", + }) + .to_string(); + let second_assessment = serde_json::json!({ + "risk_level": "low", + "user_authorization": "high", + "outcome": "allow", + "rationale": "second guardian rationale", + }) + .to_string(); + let third_assessment = serde_json::json!({ + "risk_level": "low", + "user_authorization": "high", + "outcome": "allow", + "rationale": "third guardian rationale", + }) + .to_string(); + let (gate_tx, gate_rx) = tokio::sync::oneshot::channel(); + let (server, _) = start_streaming_sse_server(vec![ + vec![StreamingSseChunk { gate: None, - body: sse(vec![ev_response_created("resp-guardian-2")]), - }, - StreamingSseChunk { - gate: Some(gate_rx), body: sse(vec![ - ev_assistant_message("msg-guardian-2", &second_assessment), - ev_completed("resp-guardian-2"), + ev_response_created("resp-guardian-1"), + ev_assistant_message("msg-guardian-1", &first_assessment), + ev_completed("resp-guardian-1"), ]), - }, - ], - vec![StreamingSseChunk { - gate: None, - body: sse(vec![ - ev_response_created("resp-guardian-3"), - ev_assistant_message("msg-guardian-3", &third_assessment), - ev_completed("resp-guardian-3"), - ]), - }], - ]) - .await; + }], + vec![ + StreamingSseChunk { + gate: None, + body: sse(vec![ev_response_created("resp-guardian-2")]), + }, + StreamingSseChunk { + gate: Some(gate_rx), + body: sse(vec![ + ev_assistant_message("msg-guardian-2", &second_assessment), + ev_completed("resp-guardian-2"), + ]), + }, + ], + vec![StreamingSseChunk { + gate: None, + body: sse(vec![ + ev_response_created("resp-guardian-3"), + ev_assistant_message("msg-guardian-3", &third_assessment), + ev_completed("resp-guardian-3"), + ]), + }], + ]) + .await; - let (session, turn) = guardian_test_session_and_turn_with_base_url(server.uri()).await; - seed_guardian_parent_history(&session, &turn).await; + let (session, turn) = guardian_test_session_and_turn_with_base_url(server.uri()).await; + seed_guardian_parent_history(&session, &turn).await; - let initial_request = GuardianApprovalRequest::Shell { - id: "shell-guardian-1".to_string(), - command: vec!["git".to_string(), "status".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), - sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, - additional_permissions: None, - justification: Some("Inspect repo state before proceeding.".to_string()), - }; - assert_eq!( - review_approval_request( + let initial_request = GuardianApprovalRequest::Shell { + id: "shell-guardian-1".to_string(), + command: vec!["git".to_string(), "status".to_string()], + cwd: test_path_buf("/repo/codex-rs/core").abs(), + sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, + additional_permissions: None, + justification: Some("Inspect repo state before proceeding.".to_string()), + }; + assert_eq!( + review_approval_request( + &session, + &turn, + "review-shell-guardian-1".to_string(), + initial_request, + /*retry_reason*/ None + ) + .await, + ReviewDecision::Approved + ); + session + .record_into_history( + &[ + ResponseItem::Message { + id: None, + role: "user".to_string(), + content: vec![ContentItem::InputText { + text: "Please inspect pending changes before pushing.".to_string(), + }], + end_turn: None, + phase: None, + }, + ResponseItem::Message { + id: None, + role: "assistant".to_string(), + content: vec![ContentItem::OutputText { + text: "I need approval to run git diff.".to_string(), + }], + end_turn: None, + phase: None, + }, + ], + turn.as_ref(), + ) + .await; + + let second_request = GuardianApprovalRequest::Shell { + id: "shell-guardian-2".to_string(), + command: vec!["git".to_string(), "diff".to_string()], + cwd: test_path_buf("/repo/codex-rs/core").abs(), + sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, + additional_permissions: None, + justification: Some("Inspect pending changes before proceeding.".to_string()), + }; + let third_request = GuardianApprovalRequest::Shell { + id: "shell-guardian-3".to_string(), + command: vec!["git".to_string(), "push".to_string()], + cwd: test_path_buf("/repo/codex-rs/core").abs(), + sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, + additional_permissions: None, + justification: Some("Inspect whether pushing is safe before proceeding.".to_string()), + }; + + let session_for_second = Arc::clone(&session); + let turn_for_second = Arc::clone(&turn); + let mut second_review = tokio::spawn(async move { + review_approval_request( + &session_for_second, + &turn_for_second, + "review-shell-guardian-2".to_string(), + second_request, + Some("trunk follow-up".to_string()), + ) + .await + }); + + let second_request_observed = tokio::time::timeout(Duration::from_secs(5), async { + loop { + if server.requests().await.len() >= 2 { + break; + } + tokio::task::yield_now().await; + } + }) + .await; + assert!( + second_request_observed.is_ok(), + "second guardian request was not observed" + ); + session + .record_into_history( + &[ + ResponseItem::Message { + id: None, + role: "user".to_string(), + content: vec![ContentItem::InputText { + text: "Now inspect whether pushing is safe.".to_string(), + }], + end_turn: None, + phase: None, + }, + ResponseItem::Message { + id: None, + role: "assistant".to_string(), + content: vec![ContentItem::OutputText { + text: "I need approval to push after the diff check.".to_string(), + }], + end_turn: None, + phase: None, + }, + ], + turn.as_ref(), + ) + .await; + + let third_decision = review_approval_request( &session, &turn, - "review-shell-guardian-1".to_string(), - initial_request, - /*retry_reason*/ None - ) - .await, - ReviewDecision::Approved - ); - session - .record_into_history( - &[ - ResponseItem::Message { - id: None, - role: "user".to_string(), - content: vec![ContentItem::InputText { - text: "Please inspect pending changes before pushing.".to_string(), - }], - end_turn: None, - phase: None, - }, - ResponseItem::Message { - id: None, - role: "assistant".to_string(), - content: vec![ContentItem::OutputText { - text: "I need approval to run git diff.".to_string(), - }], - end_turn: None, - phase: None, - }, - ], - turn.as_ref(), + "review-shell-guardian-3".to_string(), + third_request, + Some("parallel follow-up".to_string()), ) .await; + assert_eq!(third_decision, ReviewDecision::Approved); + let requests = server.requests().await; + assert_eq!(requests.len(), 3); + let third_request_body = serde_json::from_slice::(&requests[2])?; + let third_request_body_text = third_request_body.to_string(); + assert!( + third_request_body_text.contains("first guardian rationale"), + "forked guardian review should include the last committed trunk assessment" + ); + let third_user_message = last_user_message_text_from_body(&third_request_body); + assert!(third_user_message.contains(">>> TRANSCRIPT DELTA START\n")); + assert!( + third_user_message.contains("[5] user: Please inspect pending changes before pushing.") + ); + assert!(third_user_message.contains("[7] user: Now inspect whether pushing is safe.")); + assert!(!third_user_message.contains("[1] user: Please check the repo visibility")); + assert!( + !third_request_body_text.contains("second guardian rationale"), + "forked guardian review should not include the still in-flight trunk assessment" + ); + assert!( + tokio::time::timeout(Duration::from_millis(100), &mut second_review) + .await + .is_err(), + "the trunk guardian review should still be blocked on its gated response" + ); - let second_request = GuardianApprovalRequest::Shell { - id: "shell-guardian-2".to_string(), - command: vec!["git".to_string(), "diff".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), - sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, - additional_permissions: None, - justification: Some("Inspect pending changes before proceeding.".to_string()), - }; - let third_request = GuardianApprovalRequest::Shell { - id: "shell-guardian-3".to_string(), - command: vec!["git".to_string(), "push".to_string()], - cwd: PathBuf::from("/repo/codex-rs/core"), - sandbox_permissions: crate::sandboxing::SandboxPermissions::UseDefault, - additional_permissions: None, - justification: Some("Inspect whether pushing is safe before proceeding.".to_string()), - }; + gate_tx + .send(()) + .expect("second guardian review gate should still be open"); + assert_eq!(second_review.await?, ReviewDecision::Approved); + server.shutdown().await; - let session_for_second = Arc::clone(&session); - let turn_for_second = Arc::clone(&turn); - let mut second_review = tokio::spawn(async move { - review_approval_request( - &session_for_second, - &turn_for_second, - "review-shell-guardian-2".to_string(), - second_request, - Some("trunk follow-up".to_string()), - ) - .await - }); + Ok(()) + })) + })?; - let second_request_observed = tokio::time::timeout(Duration::from_secs(5), async { - loop { - if server.requests().await.len() >= 2 { - break; - } - tokio::task::yield_now().await; - } - }) - .await; - assert!( - second_request_observed.is_ok(), - "second guardian request was not observed" - ); - session - .record_into_history( - &[ - ResponseItem::Message { - id: None, - role: "user".to_string(), - content: vec![ContentItem::InputText { - text: "Now inspect whether pushing is safe.".to_string(), - }], - end_turn: None, - phase: None, - }, - ResponseItem::Message { - id: None, - role: "assistant".to_string(), - content: vec![ContentItem::OutputText { - text: "I need approval to push after the diff check.".to_string(), - }], - end_turn: None, - phase: None, - }, - ], - turn.as_ref(), - ) - .await; - - let third_decision = review_approval_request( - &session, - &turn, - "review-shell-guardian-3".to_string(), - third_request, - Some("parallel follow-up".to_string()), - ) - .await; - assert_eq!(third_decision, ReviewDecision::Approved); - let requests = server.requests().await; - assert_eq!(requests.len(), 3); - let third_request_body = serde_json::from_slice::(&requests[2])?; - let third_request_body_text = third_request_body.to_string(); - assert!( - third_request_body_text.contains("first guardian rationale"), - "forked guardian review should include the last committed trunk assessment" - ); - let third_user_message = last_user_message_text_from_body(&third_request_body); - assert!(third_user_message.contains(">>> TRANSCRIPT DELTA START\n")); - assert!( - third_user_message.contains("[5] user: Please inspect pending changes before pushing.") - ); - assert!(third_user_message.contains("[7] user: Now inspect whether pushing is safe.")); - assert!(!third_user_message.contains("[1] user: Please check the repo visibility")); - assert!( - !third_request_body_text.contains("second guardian rationale"), - "forked guardian review should not include the still in-flight trunk assessment" - ); - assert!( - tokio::time::timeout(Duration::from_millis(100), &mut second_review) - .await - .is_err(), - "the trunk guardian review should still be blocked on its gated response" - ); - - gate_tx - .send(()) - .expect("second guardian review gate should still be open"); - assert_eq!(second_review.await?, ReviewDecision::Approved); - server.shutdown().await; - - Ok(()) + match handle.join() { + Ok(result) => result, + Err(_) => Err(anyhow::anyhow!( + "guardian_parallel_reviews_fork_from_last_committed_trunk_history thread panicked" + )), + } } -#[test] -fn guardian_review_session_config_preserves_parent_network_proxy() { - let mut parent_config = test_config(); +#[tokio::test] +async fn guardian_review_session_config_preserves_parent_network_proxy() { + let mut parent_config = test_config().await; let network = NetworkProxySpec::from_config_and_constraints( NetworkProxyConfig::default(), Some(NetworkConstraints { @@ -1585,9 +1617,9 @@ fn guardian_review_session_config_preserves_parent_network_proxy() { ); } -#[test] -fn guardian_review_session_config_overrides_parent_developer_instructions() { - let mut parent_config = test_config(); +#[tokio::test] +async fn guardian_review_session_config_overrides_parent_developer_instructions() { + let mut parent_config = test_config().await; parent_config.developer_instructions = Some("parent or managed config should not replace guardian policy".to_string()); @@ -1605,9 +1637,9 @@ fn guardian_review_session_config_overrides_parent_developer_instructions() { ); } -#[test] -fn guardian_review_session_config_uses_live_network_proxy_state() { - let mut parent_config = test_config(); +#[tokio::test] +async fn guardian_review_session_config_uses_live_network_proxy_state() { + let mut parent_config = test_config().await; let mut parent_network = NetworkProxyConfig::default(); parent_network.network.enabled = true; parent_network @@ -1649,9 +1681,9 @@ fn guardian_review_session_config_uses_live_network_proxy_state() { ); } -#[test] -fn guardian_review_session_config_rejects_pinned_collab_feature() { - let mut parent_config = test_config(); +#[tokio::test] +async fn guardian_review_session_config_rejects_pinned_collab_feature() { + let mut parent_config = test_config().await; parent_config.features = ManagedFeatures::from_configured( parent_config.features.get().clone(), Some(Sourced { @@ -1677,9 +1709,9 @@ fn guardian_review_session_config_rejects_pinned_collab_feature() { ); } -#[test] -fn guardian_review_session_config_uses_parent_active_model_instead_of_hardcoded_slug() { - let mut parent_config = test_config(); +#[tokio::test] +async fn guardian_review_session_config_uses_parent_active_model_instead_of_hardcoded_slug() { + let mut parent_config = test_config().await; parent_config.model = Some("configured-model".to_string()); let guardian_config = build_guardian_review_session_config_for_test( @@ -1693,8 +1725,8 @@ fn guardian_review_session_config_uses_parent_active_model_instead_of_hardcoded_ assert_eq!(guardian_config.model, Some("active-model".to_string())); } -#[test] -fn guardian_review_session_config_uses_requirements_guardian_policy_config() { +#[tokio::test] +async fn guardian_review_session_config_uses_requirements_guardian_policy_config() { let codex_home = tempfile::tempdir().expect("create temp dir"); let workspace = tempfile::tempdir().expect("create temp dir"); let config_layer_stack = ConfigLayerStack::new( @@ -1709,14 +1741,16 @@ fn guardian_review_session_config_uses_requirements_guardian_policy_config() { ) .expect("config layer stack"); let parent_config = Config::load_config_with_layer_stack( + LOCAL_FS.as_ref(), ConfigToml::default(), ConfigOverrides { cwd: Some(workspace.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), config_layer_stack, ) + .await .expect("load config"); let guardian_config = build_guardian_review_session_config_for_test( @@ -1735,22 +1769,25 @@ fn guardian_review_session_config_uses_requirements_guardian_policy_config() { ); } -#[test] -fn guardian_review_session_config_uses_default_guardian_policy_without_requirements_override() { +#[tokio::test] +async fn guardian_review_session_config_uses_default_guardian_policy_without_requirements_override() +{ let codex_home = tempfile::tempdir().expect("create temp dir"); let workspace = tempfile::tempdir().expect("create temp dir"); let config_layer_stack = ConfigLayerStack::new(Vec::new(), Default::default(), Default::default()) .expect("config layer stack"); let parent_config = Config::load_config_with_layer_stack( + LOCAL_FS.as_ref(), ConfigToml::default(), ConfigOverrides { cwd: Some(workspace.path().to_path_buf()), ..Default::default() }, - codex_home.path().to_path_buf(), + codex_home.abs(), config_layer_stack, ) + .await .expect("load config"); let guardian_config = build_guardian_review_session_config_for_test( diff --git a/codex-rs/core/src/hook_runtime.rs b/codex-rs/core/src/hook_runtime.rs index 0d620119f7..53fa99a888 100644 --- a/codex-rs/core/src/hook_runtime.rs +++ b/codex-rs/core/src/hook_runtime.rs @@ -1,6 +1,9 @@ use std::future::Future; use std::sync::Arc; +use std::time::Duration; +use codex_analytics::HookRunFact; +use codex_analytics::build_track_events_context; use codex_hooks::PostToolUseOutcome; use codex_hooks::PostToolUseRequest; use codex_hooks::PreToolUseOutcome; @@ -8,6 +11,8 @@ use codex_hooks::PreToolUseRequest; use codex_hooks::SessionStartOutcome; use codex_hooks::UserPromptSubmitOutcome; use codex_hooks::UserPromptSubmitRequest; +use codex_otel::HOOK_RUN_DURATION_METRIC; +use codex_otel::HOOK_RUN_METRIC; use codex_protocol::items::TurnItem; use codex_protocol::models::DeveloperInstructions; use codex_protocol::models::ResponseInputItem; @@ -15,7 +20,10 @@ use codex_protocol::models::ResponseItem; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::HookCompletedEvent; +use codex_protocol::protocol::HookEventName; +use codex_protocol::protocol::HookRunStatus; use codex_protocol::protocol::HookRunSummary; +use codex_protocol::protocol::HookSource; use codex_protocol::protocol::HookStartedEvent; use codex_protocol::user_input::UserInput; use serde_json::Value; @@ -96,7 +104,7 @@ pub(crate) async fn run_pending_session_start_hooks( let request = codex_hooks::SessionStartRequest { session_id: sess.conversation_id, - cwd: turn_context.cwd.to_path_buf(), + cwd: turn_context.cwd.clone(), transcript_path: sess.hook_transcript_path().await, model: turn_context.model_info.slug.clone(), permission_mode: hook_permission_mode(turn_context), @@ -124,7 +132,7 @@ pub(crate) async fn run_pre_tool_use_hooks( let request = PreToolUseRequest { session_id: sess.conversation_id, turn_id: turn_context.sub_id.clone(), - cwd: turn_context.cwd.to_path_buf(), + cwd: turn_context.cwd.clone(), transcript_path: sess.hook_transcript_path().await, model: turn_context.model_info.slug.clone(), permission_mode: hook_permission_mode(turn_context), @@ -155,7 +163,7 @@ pub(crate) async fn run_post_tool_use_hooks( let request = PostToolUseRequest { session_id: sess.conversation_id, turn_id: turn_context.sub_id.clone(), - cwd: turn_context.cwd.to_path_buf(), + cwd: turn_context.cwd.clone(), transcript_path: sess.hook_transcript_path().await, model: turn_context.model_info.slug.clone(), permission_mode: hook_permission_mode(turn_context), @@ -180,7 +188,7 @@ pub(crate) async fn run_user_prompt_submit_hooks( let request = UserPromptSubmitRequest { session_id: sess.conversation_id, turn_id: turn_context.sub_id.clone(), - cwd: turn_context.cwd.to_path_buf(), + cwd: turn_context.cwd.clone(), transcript_path: sess.hook_transcript_path().await, model: turn_context.model_info.slug.clone(), permission_mode: hook_permission_mode(turn_context), @@ -316,17 +324,102 @@ async fn emit_hook_started_events( } } -async fn emit_hook_completed_events( +pub(crate) async fn emit_hook_completed_events( sess: &Arc, turn_context: &Arc, completed_events: Vec, ) { for completed in completed_events { + emit_hook_completed_metrics(turn_context, &completed); + track_hook_completed_analytics(sess, turn_context, &completed); sess.send_event(turn_context, EventMsg::HookCompleted(completed)) .await; } } +fn emit_hook_completed_metrics(turn_context: &TurnContext, completed: &HookCompletedEvent) { + let tags = hook_run_metric_tags(&completed.run); + turn_context + .session_telemetry + .counter(HOOK_RUN_METRIC, /*inc*/ 1, &tags); + if let Some(duration_ms) = completed.run.duration_ms + && let Ok(duration_ms) = u64::try_from(duration_ms) + { + turn_context.session_telemetry.record_duration( + HOOK_RUN_DURATION_METRIC, + Duration::from_millis(duration_ms), + &tags, + ); + } +} + +fn track_hook_completed_analytics( + sess: &Arc, + turn_context: &Arc, + completed: &HookCompletedEvent, +) { + let (tracking, hook) = + hook_run_analytics_payload(sess.conversation_id.to_string(), turn_context, completed); + sess.services + .analytics_events_client + .track_hook_run(tracking, hook); +} + +fn hook_run_analytics_payload( + thread_id: String, + turn_context: &TurnContext, + completed: &HookCompletedEvent, +) -> (codex_analytics::TrackEventsContext, HookRunFact) { + ( + build_track_events_context( + turn_context.model_info.slug.clone(), + thread_id, + completed + .turn_id + .clone() + .unwrap_or_else(|| turn_context.sub_id.clone()), + ), + HookRunFact { + event_name: completed.run.event_name, + hook_source: completed.run.source, + status: completed.run.status, + }, + ) +} + +fn hook_run_metric_tags(run: &HookRunSummary) -> [(&'static str, &'static str); 3] { + let hook_name = match run.event_name { + HookEventName::PreToolUse => "PreToolUse", + HookEventName::PostToolUse => "PostToolUse", + HookEventName::SessionStart => "SessionStart", + HookEventName::UserPromptSubmit => "UserPromptSubmit", + HookEventName::Stop => "Stop", + }; + let hook_source = match run.source { + HookSource::System => "system", + HookSource::User => "user", + HookSource::Project => "project", + HookSource::Mdm => "mdm", + HookSource::SessionFlags => "session_flags", + HookSource::LegacyManagedConfigFile => "legacy_managed_config_file", + HookSource::LegacyManagedConfigMdm => "legacy_managed_config_mdm", + HookSource::Unknown => "unknown", + }; + let status = match run.status { + HookRunStatus::Running => "running", + HookRunStatus::Completed => "completed", + HookRunStatus::Failed => "failed", + HookRunStatus::Blocked => "blocked", + HookRunStatus::Stopped => "stopped", + }; + + [ + ("hook_name", hook_name), + ("source", hook_source), + ("status", status), + ] +} + fn hook_permission_mode(turn_context: &TurnContext) -> String { match turn_context.approval_policy.value() { AskForApproval::Never => "bypassPermissions", @@ -341,9 +434,22 @@ fn hook_permission_mode(turn_context: &TurnContext) -> String { #[cfg(test)] mod tests { use codex_protocol::models::ContentItem; + use codex_protocol::protocol::HookEventName; + use codex_protocol::protocol::HookExecutionMode; + use codex_protocol::protocol::HookHandlerType; + use codex_protocol::protocol::HookRunStatus; + use codex_protocol::protocol::HookScope; + use codex_protocol::protocol::HookSource; use pretty_assertions::assert_eq; use super::additional_context_messages; + use super::hook_run_analytics_payload; + use super::hook_run_metric_tags; + use crate::codex::make_session_and_context; + use codex_protocol::protocol::HookCompletedEvent; + use codex_protocol::protocol::HookRunSummary; + use codex_utils_absolute_path::test_support::PathBufExt; + use codex_utils_absolute_path::test_support::test_path_buf; #[test] fn additional_context_messages_stay_separate_and_ordered() { @@ -378,4 +484,86 @@ mod tests { ], ); } + + #[tokio::test] + async fn hook_run_analytics_payload_uses_completed_turn_id() { + let (_session, turn_context) = make_session_and_context().await; + let completed = HookCompletedEvent { + turn_id: Some("turn-from-hook".to_string()), + run: sample_hook_run(HookRunStatus::Blocked, HookSource::Project), + }; + + let (tracking, hook) = + hook_run_analytics_payload("thread-123".to_string(), &turn_context, &completed); + + assert_eq!(tracking.thread_id, "thread-123"); + assert_eq!(tracking.turn_id, "turn-from-hook"); + assert_eq!(tracking.model_slug, turn_context.model_info.slug); + assert_eq!(hook.event_name, HookEventName::Stop); + assert_eq!(hook.hook_source, HookSource::Project); + assert_eq!(hook.status, HookRunStatus::Blocked); + } + + #[tokio::test] + async fn hook_run_analytics_payload_falls_back_to_turn_context_id() { + let (_session, turn_context) = make_session_and_context().await; + let completed = HookCompletedEvent { + turn_id: None, + run: sample_hook_run(HookRunStatus::Failed, HookSource::Unknown), + }; + + let (tracking, hook) = + hook_run_analytics_payload("thread-123".to_string(), &turn_context, &completed); + + assert_eq!(tracking.turn_id, turn_context.sub_id); + assert_eq!(hook.hook_source, HookSource::Unknown); + assert_eq!(hook.status, HookRunStatus::Failed); + } + + #[test] + fn hook_run_metric_tags_match_analytics_shape() { + let run = sample_hook_run(HookRunStatus::Blocked, HookSource::Project); + + assert_eq!( + hook_run_metric_tags(&run), + [ + ("hook_name", "Stop"), + ("source", "project"), + ("status", "blocked"), + ] + ); + } + + #[test] + fn hook_run_metric_tags_include_expanded_hook_sources() { + let run = sample_hook_run(HookRunStatus::Completed, HookSource::LegacyManagedConfigMdm); + + assert_eq!( + hook_run_metric_tags(&run), + [ + ("hook_name", "Stop"), + ("source", "legacy_managed_config_mdm"), + ("status", "completed"), + ] + ); + } + + fn sample_hook_run(status: HookRunStatus, source: HookSource) -> HookRunSummary { + HookRunSummary { + id: "stop:0:/tmp/hooks.json".to_string(), + event_name: HookEventName::Stop, + handler_type: HookHandlerType::Command, + execution_mode: HookExecutionMode::Sync, + scope: HookScope::Turn, + source_path: test_path_buf("/tmp/hooks.json").abs(), + source, + display_order: 0, + status, + status_message: None, + started_at: 10, + completed_at: Some(37), + duration_ms: Some(27), + entries: Vec::new(), + } + } } diff --git a/codex-rs/core/src/installation_id.rs b/codex-rs/core/src/installation_id.rs index 940e17eb01..e9e5445c8c 100644 --- a/codex-rs/core/src/installation_id.rs +++ b/codex-rs/core/src/installation_id.rs @@ -4,19 +4,19 @@ use std::io::Result; use std::io::Seek; use std::io::SeekFrom; use std::io::Write; -use std::path::Path; #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; #[cfg(unix)] use std::os::unix::fs::PermissionsExt; +use codex_utils_absolute_path::AbsolutePathBuf; use tokio::fs; use uuid::Uuid; pub(crate) const INSTALLATION_ID_FILENAME: &str = "installation_id"; -pub(crate) async fn resolve_installation_id(codex_home: &Path) -> Result { +pub(crate) async fn resolve_installation_id(codex_home: &AbsolutePathBuf) -> Result { let path = codex_home.join(INSTALLATION_ID_FILENAME); fs::create_dir_all(codex_home).await?; tokio::task::spawn_blocking(move || { @@ -67,6 +67,7 @@ pub(crate) async fn resolve_installation_id(codex_home: &Path) -> Result mod tests { use super::INSTALLATION_ID_FILENAME; use super::resolve_installation_id; + use core_test_support::PathExt; use pretty_assertions::assert_eq; use tempfile::TempDir; use uuid::Uuid; @@ -77,9 +78,10 @@ mod tests { #[tokio::test] async fn resolve_installation_id_generates_and_persists_uuid() { let codex_home = TempDir::new().expect("create temp dir"); + let codex_home_abs = codex_home.path().abs(); let persisted_path = codex_home.path().join(INSTALLATION_ID_FILENAME); - let installation_id = resolve_installation_id(codex_home.path()) + let installation_id = resolve_installation_id(&codex_home_abs) .await .expect("resolve installation id"); @@ -103,6 +105,7 @@ mod tests { #[tokio::test] async fn resolve_installation_id_reuses_existing_uuid() { let codex_home = TempDir::new().expect("create temp dir"); + let codex_home_abs = codex_home.path().abs(); let existing = Uuid::new_v4().to_string().to_uppercase(); std::fs::write( codex_home.path().join(INSTALLATION_ID_FILENAME), @@ -110,7 +113,7 @@ mod tests { ) .expect("write installation id"); - let resolved = resolve_installation_id(codex_home.path()) + let resolved = resolve_installation_id(&codex_home_abs) .await .expect("resolve installation id"); @@ -125,13 +128,14 @@ mod tests { #[tokio::test] async fn resolve_installation_id_rewrites_invalid_file_contents() { let codex_home = TempDir::new().expect("create temp dir"); + let codex_home_abs = codex_home.path().abs(); std::fs::write( codex_home.path().join(INSTALLATION_ID_FILENAME), "not-a-uuid", ) .expect("write invalid installation id"); - let resolved = resolve_installation_id(codex_home.path()) + let resolved = resolve_installation_id(&codex_home_abs) .await .expect("resolve installation id"); diff --git a/codex-rs/core/src/landlock.rs b/codex-rs/core/src/landlock.rs index 95f107be9c..0884642008 100644 --- a/codex-rs/core/src/landlock.rs +++ b/codex-rs/core/src/landlock.rs @@ -8,9 +8,9 @@ use codex_protocol::protocol::SandboxPolicy; use codex_sandboxing::landlock::CODEX_LINUX_SANDBOX_ARG0; use codex_sandboxing::landlock::allow_network_for_proxy; use codex_sandboxing::landlock::create_linux_sandbox_command_args_for_policies; +use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::HashMap; use std::path::Path; -use std::path::PathBuf; use tokio::process::Child; /// Spawn a shell tool command under the Linux sandbox helper @@ -25,9 +25,9 @@ use tokio::process::Child; pub async fn spawn_command_under_linux_sandbox

( codex_linux_sandbox_exe: P, command: Vec, - command_cwd: PathBuf, + command_cwd: AbsolutePathBuf, sandbox_policy: &SandboxPolicy, - sandbox_policy_cwd: &Path, + sandbox_policy_cwd: &AbsolutePathBuf, use_legacy_landlock: bool, stdio_policy: StdioPolicy, network: Option<&NetworkProxy>, diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index 5e62199d2d..50777ad157 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -5,6 +5,7 @@ // the TUI or the tracing stack). #![deny(clippy::print_stdout, clippy::print_stderr)] +mod agent_identity; mod apply_patch; mod apps; mod arc_monitor; @@ -54,12 +55,11 @@ pub use network_proxy_loader::MtimeConfigReloader; pub use network_proxy_loader::build_network_proxy_state; pub use network_proxy_loader::build_network_proxy_state_and_reloader; mod original_image_detail; -pub use codex_mcp::MCP_SANDBOX_STATE_CAPABILITY; -pub use codex_mcp::MCP_SANDBOX_STATE_METHOD; pub use codex_mcp::SandboxState; mod mcp_openai_file; mod mcp_tool_call; mod memories; +pub use memories::clear_memory_roots_contents; pub(crate) mod mention_syntax; pub(crate) mod message_history; pub(crate) mod utils; @@ -99,9 +99,7 @@ pub(crate) use skills::build_skill_injections; pub(crate) use skills::build_skill_name_counts; pub(crate) use skills::collect_env_var_dependencies; pub(crate) use skills::collect_explicit_skill_mentions; -pub(crate) use skills::config_rules; pub(crate) use skills::injection; -pub(crate) use skills::loader; pub(crate) use skills::manager; pub(crate) use skills::maybe_emit_implicit_skill_invocation; pub(crate) use skills::render_skills_section; @@ -132,14 +130,12 @@ pub type ConversationManager = ThreadManager; pub type NewConversation = NewThread; #[deprecated(note = "use CodexThread")] pub type CodexConversation = CodexThread; -pub(crate) mod project_doc; -pub use project_doc::DEFAULT_PROJECT_DOC_FILENAME; -pub use project_doc::LOCAL_PROJECT_DOC_FILENAME; -pub use project_doc::discover_project_doc_paths; -pub use project_doc::read_project_docs; +pub(crate) mod agents_md; +pub use agents_md::AgentsMdManager; +pub use agents_md::DEFAULT_AGENTS_MD_FILENAME; +pub use agents_md::LOCAL_AGENTS_MD_FILENAME; mod rollout; pub(crate) mod safety; -pub mod seatbelt; mod session_rollout_init_error; pub mod shell; pub(crate) mod shell_snapshot; @@ -152,6 +148,7 @@ mod tools; pub(crate) mod turn_diff_tracker; mod turn_metadata; mod turn_timing; +mod unavailable_tool; pub use rollout::ARCHIVED_SESSIONS_SUBDIR; pub use rollout::Cursor; pub use rollout::EventPersistenceMode; diff --git a/codex-rs/core/src/mcp.rs b/codex-rs/core/src/mcp.rs index 83becdc07e..0d4c26991d 100644 --- a/codex-rs/core/src/mcp.rs +++ b/codex-rs/core/src/mcp.rs @@ -20,22 +20,22 @@ impl McpManager { Self { plugins_manager } } - pub fn configured_servers(&self, config: &Config) -> HashMap { - let mcp_config = config.to_mcp_config(self.plugins_manager.as_ref()); + pub async fn configured_servers(&self, config: &Config) -> HashMap { + let mcp_config = config.to_mcp_config(self.plugins_manager.as_ref()).await; configured_mcp_servers(&mcp_config) } - pub fn effective_servers( + pub async fn effective_servers( &self, config: &Config, auth: Option<&CodexAuth>, ) -> HashMap { - let mcp_config = config.to_mcp_config(self.plugins_manager.as_ref()); + let mcp_config = config.to_mcp_config(self.plugins_manager.as_ref()).await; effective_mcp_servers(&mcp_config, auth) } - pub fn tool_plugin_provenance(&self, config: &Config) -> ToolPluginProvenance { - let mcp_config = config.to_mcp_config(self.plugins_manager.as_ref()); + pub async fn tool_plugin_provenance(&self, config: &Config) -> ToolPluginProvenance { + let mcp_config = config.to_mcp_config(self.plugins_manager.as_ref()).await; collect_tool_plugin_provenance(&mcp_config) } } diff --git a/codex-rs/core/src/mcp_openai_file.rs b/codex-rs/core/src/mcp_openai_file.rs index 587dd4b770..33d0a3f1f0 100644 --- a/codex-rs/core/src/mcp_openai_file.rs +++ b/codex-rs/core/src/mcp_openai_file.rs @@ -115,6 +115,7 @@ async fn build_uploaded_local_argument_value( let upload_auth = CoreAuthProvider { token: Some(token_data.access_token), account_id: token_data.account_id, + is_fedramp_account: auth.is_fedramp_account(), }; let uploaded = upload_local_file( turn_context.config.chatgpt_base_url.trim_end_matches('/'), diff --git a/codex-rs/core/src/mcp_skill_dependencies.rs b/codex-rs/core/src/mcp_skill_dependencies.rs index c4e302307f..c12fe28cb2 100644 --- a/codex-rs/core/src/mcp_skill_dependencies.rs +++ b/codex-rs/core/src/mcp_skill_dependencies.rs @@ -53,7 +53,8 @@ pub(crate) async fn maybe_prompt_and_install_mcp_dependencies( let installed = sess .services .mcp_manager - .configured_servers(config.as_ref()); + .configured_servers(config.as_ref()) + .await; let missing = collect_missing_mcp_dependencies(mentioned_skills, &installed); if missing.is_empty() { return; @@ -86,7 +87,7 @@ pub(crate) async fn maybe_install_mcp_dependencies( } let codex_home = config.codex_home.clone(); - let installed = sess.services.mcp_manager.configured_servers(config); + let installed = sess.services.mcp_manager.configured_servers(config).await; let missing = collect_missing_mcp_dependencies(mentioned_skills, &installed); if missing.is_empty() { return; @@ -197,7 +198,8 @@ pub(crate) async fn maybe_install_mcp_dependencies( let mut refresh_servers = sess .services .mcp_manager - .effective_servers(config, auth.as_ref()); + .effective_servers(config, auth.as_ref()) + .await; for (name, server_config) in &servers { refresh_servers .entry(name.clone()) @@ -362,11 +364,14 @@ fn mcp_dependency_to_server_config( http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -388,11 +393,14 @@ fn mcp_dependency_to_server_config( env_vars: Vec::new(), cwd: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, diff --git a/codex-rs/core/src/mcp_tool_call.rs b/codex-rs/core/src/mcp_tool_call.rs index 2a30ba73c5..b707d51890 100644 --- a/codex-rs/core/src/mcp_tool_call.rs +++ b/codex-rs/core/src/mcp_tool_call.rs @@ -1,6 +1,5 @@ use std::collections::BTreeMap; use std::collections::HashMap; -use std::path::PathBuf; use std::time::Duration; use std::time::Instant; @@ -37,6 +36,7 @@ use codex_analytics::build_track_events_context; use codex_config::types::AppToolApproval; use codex_features::Feature; use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::SandboxState; use codex_mcp::declared_openai_file_input_param_names; use codex_mcp::mcp_permission_prompt_is_auto_approved; use codex_otel::sanitize_metric_tag_value; @@ -55,10 +55,10 @@ use codex_protocol::request_user_input::RequestUserInputResponse; use codex_rmcp_client::ElicitationAction; use codex_rmcp_client::ElicitationResponse; use codex_rollout::state_db; +use codex_utils_absolute_path::AbsolutePathBuf; use rmcp::model::ToolAnnotations; use serde::Deserialize; use serde::Serialize; -use std::path::Path; use std::sync::Arc; use toml_edit::value; use tracing::Instrument; @@ -101,6 +101,9 @@ pub(crate) async fn handle_mcp_tool_call( let metadata = lookup_mcp_tool_metadata(sess.as_ref(), turn_context.as_ref(), &server, &tool_name).await; + let mcp_app_resource_uri = metadata + .as_ref() + .and_then(|metadata| metadata.mcp_app_resource_uri.clone()); let app_tool_policy = if server == CODEX_APPS_MCP_SERVER_NAME { connectors::app_tool_policy( &turn_context.config, @@ -130,6 +133,7 @@ pub(crate) async fn handle_mcp_tool_call( turn_context.as_ref(), &call_id, invocation, + mcp_app_resource_uri.clone(), "MCP tool call blocked by app configuration".to_string(), /*already_started*/ false, ) @@ -161,6 +165,7 @@ pub(crate) async fn handle_mcp_tool_call( let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent { call_id: call_id.clone(), invocation: invocation.clone(), + mcp_app_resource_uri: mcp_app_resource_uri.clone(), }); notify_mcp_tool_call_event(sess.as_ref(), turn_context.as_ref(), tool_call_begin_event).await; @@ -213,6 +218,7 @@ pub(crate) async fn handle_mcp_tool_call( let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id: call_id.clone(), invocation, + mcp_app_resource_uri: mcp_app_resource_uri.clone(), duration, result: result.clone(), }); @@ -239,6 +245,7 @@ pub(crate) async fn handle_mcp_tool_call( turn_context.as_ref(), &call_id, invocation, + mcp_app_resource_uri.clone(), message, /*already_started*/ true, ) @@ -254,6 +261,7 @@ pub(crate) async fn handle_mcp_tool_call( turn_context.as_ref(), &call_id, invocation, + mcp_app_resource_uri.clone(), message, /*already_started*/ true, ) @@ -268,6 +276,7 @@ pub(crate) async fn handle_mcp_tool_call( turn_context.as_ref(), &call_id, invocation, + mcp_app_resource_uri.clone(), message, /*already_started*/ true, ) @@ -325,6 +334,7 @@ pub(crate) async fn handle_mcp_tool_call( let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id: call_id.clone(), invocation, + mcp_app_resource_uri, duration, result: result.clone(), }); @@ -467,6 +477,10 @@ async fn execute_mcp_tool_call( metadata.and_then(|metadata| metadata.openai_file_input_params.as_deref()), ) .await?; + let request_meta = + augment_mcp_tool_request_meta_with_sandbox_state(sess, turn_context, server, request_meta) + .await + .map_err(|e| format!("failed to build MCP tool request metadata: {e:#}"))?; let result = sess .call_tool(server, tool_name, rewritten_arguments, request_meta) .await @@ -480,6 +494,52 @@ async fn execute_mcp_tool_call( ) } +async fn augment_mcp_tool_request_meta_with_sandbox_state( + sess: &Session, + turn_context: &TurnContext, + server: &str, + mut meta: Option, +) -> anyhow::Result> { + let supports_sandbox_state_meta = sess + .services + .mcp_connection_manager + .read() + .await + .server_supports_sandbox_state_meta_capability(server) + .await + .unwrap_or(false); + if !supports_sandbox_state_meta { + return Ok(meta); + } + + let sandbox_state = serde_json::to_value(SandboxState { + sandbox_policy: turn_context.sandbox_policy.get().clone(), + codex_linux_sandbox_exe: turn_context.codex_linux_sandbox_exe.clone(), + sandbox_cwd: turn_context.cwd.to_path_buf(), + use_legacy_landlock: turn_context.features.use_legacy_landlock(), + })?; + + match meta.as_mut() { + Some(serde_json::Value::Object(map)) => { + map.insert( + codex_mcp::MCP_SANDBOX_STATE_META_CAPABILITY.to_string(), + sandbox_state, + ); + } + Some(_) => {} + None => { + let mut map = serde_json::Map::new(); + map.insert( + codex_mcp::MCP_SANDBOX_STATE_META_CAPABILITY.to_string(), + sandbox_state, + ); + meta = Some(serde_json::Value::Object(map)); + } + } + + Ok(meta) +} + async fn maybe_mark_thread_memory_mode_polluted(sess: &Session, turn_context: &TurnContext) { if !turn_context .config @@ -592,11 +652,14 @@ pub(crate) struct McpToolApprovalMetadata { connector_description: Option, tool_title: Option, tool_description: Option, + mcp_app_resource_uri: Option, codex_apps_meta: Option>, openai_file_input_params: Option>, } const MCP_TOOL_CODEX_APPS_META_KEY: &str = "_codex_apps"; +const MCP_TOOL_OPENAI_OUTPUT_TEMPLATE_META_KEY: &str = "openai/outputTemplate"; +const MCP_TOOL_UI_RESOURCE_URI_META_KEY: &str = "ui/resourceUri"; fn custom_mcp_tool_approval_mode( turn_context: &TurnContext, @@ -613,9 +676,14 @@ fn custom_mcp_tool_approval_mode( .and_then(|value| { HashMap::::deserialize(value).ok() }) - .and_then(|servers| servers.get(server).cloned()) - .and_then(|server| server.tools.get(tool_name).cloned()) - .and_then(|tool| tool.approval_mode) + .and_then(|servers| { + let server_config = servers.get(server)?; + server_config + .tools + .get(tool_name) + .and_then(|tool| tool.approval_mode) + .or(server_config.default_tools_approval_mode) + }) .unwrap_or_default() } @@ -1050,6 +1118,7 @@ pub(crate) async fn lookup_mcp_tool_metadata( connector_description, tool_title: tool_info.tool.title, tool_description: tool_info.tool.description.map(std::borrow::Cow::into_owned), + mcp_app_resource_uri: get_mcp_app_resource_uri(tool_info.tool.meta.as_deref()), codex_apps_meta: tool_info .tool .meta @@ -1064,6 +1133,26 @@ pub(crate) async fn lookup_mcp_tool_metadata( }) } +fn get_mcp_app_resource_uri( + meta: Option<&serde_json::Map>, +) -> Option { + meta.and_then(|meta| { + meta.get("ui") + .and_then(serde_json::Value::as_object) + .and_then(|ui| ui.get("resourceUri")) + .and_then(serde_json::Value::as_str) + .or_else(|| { + meta.get(MCP_TOOL_UI_RESOURCE_URI_META_KEY) + .and_then(serde_json::Value::as_str) + }) + .or_else(|| { + meta.get(MCP_TOOL_OPENAI_OUTPUT_TEMPLATE_META_KEY) + .and_then(serde_json::Value::as_str) + }) + .map(str::to_string) + }) +} + async fn lookup_mcp_app_usage_metadata( sess: &Session, server: &str, @@ -1512,7 +1601,7 @@ async fn maybe_persist_mcp_tool_approval( } async fn persist_codex_app_tool_approval( - codex_home: &Path, + codex_home: &AbsolutePathBuf, connector_id: &str, tool_name: &str, ) -> anyhow::Result<()> { @@ -1563,7 +1652,10 @@ async fn persist_custom_mcp_tool_approval( .await } -fn project_mcp_tool_approval_config_folder(config: &Config, server: &str) -> Option { +fn project_mcp_tool_approval_config_folder( + config: &Config, + server: &str, +) -> Option { config .config_layer_stack .layers_high_to_low() @@ -1582,9 +1674,7 @@ fn project_mcp_tool_approval_config_folder(config: &Config, server: &str) -> Opt HashMap::::deserialize(value).ok() })?; if servers.contains_key(server) { - layer - .config_folder() - .map(|folder| folder.as_path().to_path_buf()) + layer.config_folder() } else { None } @@ -1615,6 +1705,7 @@ async fn notify_mcp_tool_call_skip( turn_context: &TurnContext, call_id: &str, invocation: McpInvocation, + mcp_app_resource_uri: Option, message: String, already_started: bool, ) -> Result { @@ -1622,6 +1713,7 @@ async fn notify_mcp_tool_call_skip( let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent { call_id: call_id.to_string(), invocation: invocation.clone(), + mcp_app_resource_uri: mcp_app_resource_uri.clone(), }); notify_mcp_tool_call_event(sess, turn_context, tool_call_begin_event).await; } @@ -1629,6 +1721,7 @@ async fn notify_mcp_tool_call_skip( let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id: call_id.to_string(), invocation, + mcp_app_resource_uri, duration: Duration::ZERO, result: Err(message.clone()), }); diff --git a/codex-rs/core/src/mcp_tool_call_tests.rs b/codex-rs/core/src/mcp_tool_call_tests.rs index 1a26345bf1..a85233c773 100644 --- a/codex-rs/core/src/mcp_tool_call_tests.rs +++ b/codex-rs/core/src/mcp_tool_call_tests.rs @@ -14,6 +14,7 @@ use codex_config::types::McpServerConfig; use codex_config::types::McpServerToolConfig; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::SandboxPolicy; +use core_test_support::PathExt; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_response_created; @@ -58,6 +59,7 @@ fn approval_metadata( connector_description: connector_description.map(str::to_string), tool_title: tool_title.map(str::to_string), tool_description: tool_description.map(str::to_string), + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, } @@ -73,6 +75,35 @@ fn prompt_options( } } +#[test] +fn mcp_app_resource_uri_reads_known_tool_meta_keys() { + let nested = serde_json::json!({ + "ui": { + "resourceUri": "ui://widget/nested.html", + }, + }); + assert_eq!( + get_mcp_app_resource_uri(nested.as_object()), + Some("ui://widget/nested.html".to_string()) + ); + + let flat = serde_json::json!({ + "ui/resourceUri": "ui://widget/flat.html", + }); + assert_eq!( + get_mcp_app_resource_uri(flat.as_object()), + Some("ui://widget/flat.html".to_string()) + ); + + let output_template = serde_json::json!({ + "openai/outputTemplate": "ui://widget/output-template.html", + }); + assert_eq!( + get_mcp_app_resource_uri(output_template.as_object()), + Some("ui://widget/output-template.html".to_string()) + ); +} + #[test] fn approval_required_when_read_only_false_and_destructive() { let annotations = annotations(Some(false), Some(true), /*open_world*/ None); @@ -588,6 +619,7 @@ async fn codex_apps_tool_call_request_meta_includes_turn_metadata_and_codex_apps connector_description: Some("Manage events".to_string()), tool_title: Some("Create Event".to_string()), tool_description: Some("Create a calendar event.".to_string()), + mcp_app_resource_uri: None, codex_apps_meta: Some( serde_json::json!({ "resource_uri": "connector://calendar/tools/calendar_create_event", @@ -745,6 +777,7 @@ fn guardian_mcp_review_request_includes_annotations_when_present() { connector_description: None, tool_title: None, tool_description: None, + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1043,7 +1076,7 @@ fn accepted_elicitation_without_content_defaults_to_accept() { async fn persist_codex_app_tool_approval_writes_tool_override() { let tmp = tempdir().expect("tempdir"); - persist_codex_app_tool_approval(tmp.path(), "calendar", "calendar/list_events") + persist_codex_app_tool_approval(&tmp.path().abs(), "calendar", "calendar/list_events") .await .expect("persist approval"); @@ -1113,6 +1146,43 @@ async fn persist_custom_mcp_tool_approval_writes_tool_override() { assert!(contents.contains("[mcp_servers.docs.tools.search]")); } +#[tokio::test] +async fn custom_mcp_tool_approval_mode_uses_server_default_with_tool_override() { + let tmp = tempdir().expect("tempdir"); + std::fs::write( + tmp.path().join(CONFIG_TOML_FILE), + r#" +[mcp_servers.docs] +command = "docs-server" +default_tools_approval_mode = "approve" + +[mcp_servers.docs.tools.search] +approval_mode = "prompt" +"#, + ) + .expect("seed config"); + let config = ConfigBuilder::default() + .codex_home(tmp.path().to_path_buf()) + .build() + .await + .expect("load config"); + let (_session, mut turn_context) = make_session_and_context().await; + turn_context.config = Arc::new(config); + + assert_eq!( + custom_mcp_tool_approval_mode(&turn_context, "docs", "read"), + AppToolApproval::Approve + ); + assert_eq!( + custom_mcp_tool_approval_mode(&turn_context, "docs", "search"), + AppToolApproval::Prompt + ); + assert_eq!( + custom_mcp_tool_approval_mode(&turn_context, "unknown", "search"), + AppToolApproval::Auto + ); +} + #[tokio::test] async fn maybe_persist_mcp_tool_approval_reloads_session_config() { let (session, turn_context) = make_session_and_context().await; @@ -1216,7 +1286,7 @@ async fn maybe_persist_mcp_tool_approval_writes_project_config_for_project_serve .await .expect("trust project"); let config = ConfigBuilder::default() - .codex_home(codex_home) + .codex_home(codex_home.to_path_buf()) .fallback_cwd(Some(project_dir.path().to_path_buf())) .build() .await @@ -1271,6 +1341,7 @@ async fn approve_mode_skips_when_annotations_do_not_require_approval() { connector_description: None, tool_title: Some("Read Only Tool".to_string()), tool_description: None, + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1313,7 +1384,7 @@ async fn guardian_mode_skips_auto_when_annotations_do_not_require_approval() { config.approvals_reviewer = ApprovalsReviewer::GuardianSubagent; let config = Arc::new(config); let models_manager = Arc::new(crate::test_support::models_manager_with_provider( - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::clone(&session.services.auth_manager), config.model_provider.clone(), )); @@ -1339,6 +1410,7 @@ async fn guardian_mode_skips_auto_when_annotations_do_not_require_approval() { connector_description: None, tool_title: Some("Read Only Tool".to_string()), tool_description: None, + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1388,7 +1460,7 @@ async fn guardian_mode_mcp_denial_returns_rationale_message() { config.approvals_reviewer = ApprovalsReviewer::GuardianSubagent; let config = Arc::new(config); let models_manager = Arc::new(crate::test_support::models_manager_with_provider( - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::clone(&session.services.auth_manager), config.model_provider.clone(), )); @@ -1410,6 +1482,7 @@ async fn guardian_mode_mcp_denial_returns_rationale_message() { connector_description: None, tool_title: Some("Dangerous Tool".to_string()), tool_description: Some("Reads calendar data.".to_string()), + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1461,6 +1534,7 @@ async fn prompt_mode_waits_for_approval_when_annotations_do_not_require_approval connector_description: None, tool_title: Some("Read Only Tool".to_string()), tool_description: None, + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1538,6 +1612,7 @@ async fn approve_mode_blocks_when_arc_returns_interrupt_for_model() { connector_description: Some("Manage events".to_string()), tool_title: Some("Dangerous Tool".to_string()), tool_description: Some("Performs a risky action.".to_string()), + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1608,6 +1683,7 @@ async fn custom_approve_mode_blocks_when_arc_returns_interrupt_for_model() { connector_description: None, tool_title: Some("Dangerous Tool".to_string()), tool_description: Some("Performs a risky action.".to_string()), + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1678,6 +1754,7 @@ async fn approve_mode_blocks_when_arc_returns_interrupt_without_annotations() { connector_description: Some("Manage events".to_string()), tool_title: Some("Dangerous Tool".to_string()), tool_description: Some("Performs a risky action.".to_string()), + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1756,6 +1833,7 @@ async fn full_access_mode_skips_arc_monitor_for_all_approval_modes() { connector_description: Some("Manage events".to_string()), tool_title: Some("Dangerous Tool".to_string()), tool_description: Some("Performs a risky action.".to_string()), + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; @@ -1836,7 +1914,7 @@ async fn approve_mode_routes_arc_ask_user_to_guardian_when_guardian_reviewer_is_ config.approvals_reviewer = ApprovalsReviewer::GuardianSubagent; let config = Arc::new(config); let models_manager = Arc::new(crate::test_support::models_manager_with_provider( - config.codex_home.clone(), + config.codex_home.to_path_buf(), Arc::clone(&session.services.auth_manager), config.model_provider.clone(), )); @@ -1858,6 +1936,7 @@ async fn approve_mode_routes_arc_ask_user_to_guardian_when_guardian_reviewer_is_ connector_description: Some("Manage events".to_string()), tool_title: Some("Dangerous Tool".to_string()), tool_description: Some("Performs a risky action.".to_string()), + mcp_app_resource_uri: None, codex_apps_meta: None, openai_file_input_params: None, }; diff --git a/codex-rs/core/src/mcp_tool_exposure.rs b/codex-rs/core/src/mcp_tool_exposure.rs index d7858c17f0..16aa50cfd4 100644 --- a/codex-rs/core/src/mcp_tool_exposure.rs +++ b/codex-rs/core/src/mcp_tool_exposure.rs @@ -41,9 +41,13 @@ pub(crate) fn build_mcp_tool_exposure( let direct_tools = filter_codex_apps_mcp_tools(all_mcp_tools, explicitly_enabled_connectors, config); + for direct_tool_name in direct_tools.keys() { + deferred_tools.remove(direct_tool_name); + } + McpToolExposure { direct_tools, - deferred_tools: Some(deferred_tools), + deferred_tools: (!deferred_tools.is_empty()).then_some(deferred_tools), } } diff --git a/codex-rs/core/src/memories/README.md b/codex-rs/core/src/memories/README.md index eecaeaea28..a1d365435b 100644 --- a/codex-rs/core/src/memories/README.md +++ b/codex-rs/core/src/memories/README.md @@ -84,7 +84,11 @@ What it does: - `raw_memories.md` (merged raw memories, latest first) - `rollout_summaries/` (one summary file per retained rollout) - prunes stale rollout summaries that are no longer retained -- if there are no inputs, marks the job successful and exits +- finds old resource files from memory extensions under + `memories_extensions//resources/` for extension directories that + have an `instructions.md`, using the memory module retention window +- if there are no Phase 1 inputs or old extension resources, marks the job + successful and exits If there is input, it then: @@ -92,10 +96,13 @@ If there is input, it then: - builds the Phase 2 prompt with a diff of the current Phase 1 input selection versus the last successful Phase 2 selection (`added`, `retained`, `removed`) +- includes old extension resource paths in the prompt diff - runs it with no approvals, no network, and local write access only - disables collab for that agent (to prevent recursive delegation) - watches the agent status and heartbeats the global job lease while it runs - marks the phase-2 job success/failure in the state DB when the agent finishes +- prunes old extension resource files after the consolidation agent completes + and the successful Phase 2 job is recorded Selection diff behavior: diff --git a/codex-rs/core/src/memories/control.rs b/codex-rs/core/src/memories/control.rs index 10bd19246f..4f09d3e74c 100644 --- a/codex-rs/core/src/memories/control.rs +++ b/codex-rs/core/src/memories/control.rs @@ -1,5 +1,16 @@ use std::path::Path; +pub async fn clear_memory_roots_contents(codex_home: &Path) -> std::io::Result<()> { + for memory_root in [ + codex_home.join("memories"), + codex_home.join("memories_extensions"), + ] { + clear_memory_root_contents(memory_root.as_path()).await?; + } + + Ok(()) +} + pub(crate) async fn clear_memory_root_contents(memory_root: &Path) -> std::io::Result<()> { match tokio::fs::symlink_metadata(memory_root).await { Ok(metadata) if metadata.file_type().is_symlink() => { diff --git a/codex-rs/core/src/memories/extensions.rs b/codex-rs/core/src/memories/extensions.rs new file mode 100644 index 0000000000..91c96ebff0 --- /dev/null +++ b/codex-rs/core/src/memories/extensions.rs @@ -0,0 +1,251 @@ +use crate::memories::memory_extensions_root; +use chrono::DateTime; +use chrono::Duration; +use chrono::NaiveDateTime; +use chrono::Utc; +use std::path::Path; +use std::path::PathBuf; +use tracing::warn; + +const FILENAME_TS_FORMAT: &str = "%Y-%m-%dT%H-%M-%S"; +pub(super) const EXTENSION_RESOURCE_RETENTION_DAYS: i64 = 7; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct RemovedExtensionResource { + pub(super) extension: String, + pub(super) resource_path: String, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct PendingExtensionResourceRemoval { + pub(super) removed: RemovedExtensionResource, + path: PathBuf, +} + +pub(super) async fn find_old_extension_resources( + memory_root: &Path, +) -> Vec { + find_old_extension_resources_with_now(memory_root, Utc::now()).await +} + +async fn find_old_extension_resources_with_now( + memory_root: &Path, + now: DateTime, +) -> Vec { + let mut pending = Vec::new(); + let cutoff = now - Duration::days(EXTENSION_RESOURCE_RETENTION_DAYS); + let extensions_root = memory_extensions_root(memory_root); + let mut extensions = match tokio::fs::read_dir(&extensions_root).await { + Ok(extensions) => extensions, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => return pending, + Err(err) => { + warn!( + "failed reading memory extensions root {}: {err}", + extensions_root.display() + ); + return pending; + } + }; + + while let Ok(Some(extension_entry)) = extensions.next_entry().await { + let extension_path = extension_entry.path(); + let Ok(file_type) = extension_entry.file_type().await else { + continue; + }; + if !file_type.is_dir() { + continue; + } + let Some(extension) = extension_path + .file_name() + .and_then(|name| name.to_str()) + .map(ToOwned::to_owned) + else { + continue; + }; + if !tokio::fs::try_exists(extension_path.join("instructions.md")) + .await + .unwrap_or(false) + { + continue; + } + + let resources_path = extension_path.join("resources"); + let mut resources = match tokio::fs::read_dir(&resources_path).await { + Ok(resources) => resources, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => continue, + Err(err) => { + warn!( + "failed reading memory extension resources {}: {err}", + resources_path.display() + ); + continue; + } + }; + + while let Ok(Some(resource_entry)) = resources.next_entry().await { + let resource_file_path = resource_entry.path(); + let Ok(file_type) = resource_entry.file_type().await else { + continue; + }; + if !file_type.is_file() { + continue; + } + let Some(file_name) = resource_file_path + .file_name() + .and_then(|name| name.to_str()) + else { + continue; + }; + if !file_name.ends_with(".md") { + continue; + } + let Some(resource_timestamp) = resource_timestamp(file_name) else { + continue; + }; + if resource_timestamp > cutoff { + continue; + } + + pending.push(PendingExtensionResourceRemoval { + removed: RemovedExtensionResource { + extension: extension.clone(), + resource_path: format!("resources/{file_name}"), + }, + path: resource_file_path, + }); + } + } + + pending.sort_by(|left, right| { + left.removed + .extension + .cmp(&right.removed.extension) + .then_with(|| left.removed.resource_path.cmp(&right.removed.resource_path)) + }); + pending +} + +pub(super) async fn remove_extension_resources(resources: &[PendingExtensionResourceRemoval]) { + for resource in resources { + if let Err(err) = tokio::fs::remove_file(&resource.path).await + && err.kind() != std::io::ErrorKind::NotFound + { + warn!( + "failed pruning old memory extension resource {}: {err}", + resource.path.display() + ); + } + } +} + +fn resource_timestamp(file_name: &str) -> Option> { + let timestamp = file_name.get(..19)?; + let naive = NaiveDateTime::parse_from_str(timestamp, FILENAME_TS_FORMAT).ok()?; + Some(DateTime::from_naive_utc_and_offset(naive, Utc)) +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use tempfile::TempDir; + + #[tokio::test] + async fn finds_only_old_resources_from_extensions_with_instructions() { + let codex_home = TempDir::new().expect("create temp codex home"); + let memory_root = codex_home.path().join("memories"); + let extensions_root = memory_extensions_root(&memory_root); + let telepathy_resources = extensions_root.join("telepathy/resources"); + tokio::fs::create_dir_all(&telepathy_resources) + .await + .expect("create telepathy resources"); + tokio::fs::write( + extensions_root.join("telepathy/instructions.md"), + "instructions", + ) + .await + .expect("write telepathy instructions"); + + let now = DateTime::from_naive_utc_and_offset( + NaiveDateTime::parse_from_str("2026-04-14T12-00-00", FILENAME_TS_FORMAT) + .expect("parse now"), + Utc, + ); + let old_file = telepathy_resources.join("2026-04-06T11-59-59-abcd-10min-old.md"); + let exact_cutoff_file = + telepathy_resources.join("2026-04-07T12-00-00-abcd-10min-cutoff.md"); + let recent_file = telepathy_resources.join("2026-04-08T12-00-00-abcd-10min-recent.md"); + let invalid_file = telepathy_resources.join("not-a-timestamp.md"); + for file in [&old_file, &exact_cutoff_file, &recent_file, &invalid_file] { + tokio::fs::write(file, "resource") + .await + .expect("write telepathy resource"); + } + + let ignored_resources = extensions_root.join("ignored/resources"); + tokio::fs::create_dir_all(&ignored_resources) + .await + .expect("create ignored resources"); + let ignored_old_file = ignored_resources.join("2026-04-06T11-59-59-abcd-10min-old.md"); + tokio::fs::write(&ignored_old_file, "ignored") + .await + .expect("write ignored resource"); + + let pending = find_old_extension_resources_with_now(&memory_root, now).await; + + assert_eq!( + pending + .iter() + .map(|resource| resource.removed.clone()) + .collect::>(), + vec![ + RemovedExtensionResource { + extension: "telepathy".to_string(), + resource_path: "resources/2026-04-06T11-59-59-abcd-10min-old.md".to_string(), + }, + RemovedExtensionResource { + extension: "telepathy".to_string(), + resource_path: "resources/2026-04-07T12-00-00-abcd-10min-cutoff.md".to_string(), + }, + ] + ); + assert!( + tokio::fs::try_exists(&old_file) + .await + .expect("check old file before remove") + ); + assert!( + tokio::fs::try_exists(&exact_cutoff_file) + .await + .expect("check cutoff file before remove") + ); + + remove_extension_resources(&pending).await; + + assert!( + !tokio::fs::try_exists(&old_file) + .await + .expect("check old file") + ); + assert!( + !tokio::fs::try_exists(&exact_cutoff_file) + .await + .expect("check cutoff file") + ); + assert!( + tokio::fs::try_exists(&recent_file) + .await + .expect("check recent file") + ); + assert!( + tokio::fs::try_exists(&invalid_file) + .await + .expect("check invalid file") + ); + assert!( + tokio::fs::try_exists(&ignored_old_file) + .await + .expect("check ignored old file") + ); + } +} diff --git a/codex-rs/core/src/memories/mod.rs b/codex-rs/core/src/memories/mod.rs index 194c27c907..d796063d2d 100644 --- a/codex-rs/core/src/memories/mod.rs +++ b/codex-rs/core/src/memories/mod.rs @@ -17,7 +17,7 @@ pub(crate) mod usage; use codex_protocol::openai_models::ReasoningEffort; -pub(crate) use control::clear_memory_root_contents; +pub use control::clear_memory_roots_contents; /// Starts the memory startup pipeline for eligible root sessions. /// This is the single entrypoint that `codex` uses to trigger memory startup. /// @@ -30,6 +30,8 @@ mod artifacts { pub(super) const RAW_MEMORIES_FILENAME: &str = "raw_memories.md"; } +mod extensions; + /// Phase 1 (startup extraction). mod phase_one { /// Default model used for phase 1. @@ -65,7 +67,7 @@ mod phase_one { /// Phase 2 (aka `Consolidation`). mod phase_two { /// Default model used for phase 2. - pub(super) const MODEL: &str = "gpt-5.3-codex"; + pub(super) const MODEL: &str = "gpt-5.4"; /// Default reasoning effort used for phase 2. pub(super) const REASONING_EFFORT: super::ReasoningEffort = super::ReasoningEffort::Medium; /// Lease duration (seconds) for phase-2 consolidation job ownership. @@ -96,10 +98,11 @@ mod metrics { pub(super) const MEMORY_PHASE_TWO_TOKEN_USAGE: &str = "codex.memory.phase2.token_usage"; } +use codex_utils_absolute_path::AbsolutePathBuf; use std::path::Path; use std::path::PathBuf; -pub fn memory_root(codex_home: &Path) -> PathBuf { +pub fn memory_root(codex_home: &AbsolutePathBuf) -> AbsolutePathBuf { codex_home.join("memories") } diff --git a/codex-rs/core/src/memories/phase1.rs b/codex-rs/core/src/memories/phase1.rs index 37cb0e9f48..c005f4da4e 100644 --- a/codex-rs/core/src/memories/phase1.rs +++ b/codex-rs/core/src/memories/phase1.rs @@ -477,9 +477,10 @@ mod job { } }) .collect::>(); - serde_json::to_string(&filtered).map_err(|err| { + let serialized = serde_json::to_string(&filtered).map_err(|err| { CodexErr::InvalidRequest(format!("failed to serialize rollout memory: {err}")) - }) + })?; + Ok(redact_secrets(serialized)) } fn sanitize_response_item_for_memories(item: &ResponseItem) -> Option { diff --git a/codex-rs/core/src/memories/phase1_tests.rs b/codex-rs/core/src/memories/phase1_tests.rs index 9d824ab2b1..89bde1a877 100644 --- a/codex-rs/core/src/memories/phase1_tests.rs +++ b/codex-rs/core/src/memories/phase1_tests.rs @@ -3,6 +3,8 @@ use super::JobResult; use super::aggregate_stats; use super::job::serialize_filtered_rollout_response_items; use codex_protocol::models::ContentItem; +use codex_protocol::models::FunctionCallOutputBody; +use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::RolloutItem; use codex_protocol::protocol::TokenUsage; @@ -72,6 +74,25 @@ fn serializes_memory_rollout_with_agents_removed_but_environment_kept() { ); } +#[test] +fn serializes_memory_rollout_redacts_secrets_before_prompt_upload() { + let serialized = serialize_filtered_rollout_response_items(&[RolloutItem::ResponseItem( + ResponseItem::FunctionCallOutput { + call_id: "call_123".to_string(), + output: FunctionCallOutputPayload { + body: FunctionCallOutputBody::Text( + r#"{"token":"sk-abcdefghijklmnopqrstuvwxyz123456"}"#.to_string(), + ), + success: Some(true), + }, + }, + )]) + .expect("serialize"); + + assert!(!serialized.contains("sk-abcdefghijklmnopqrstuvwxyz123456")); + assert!(serialized.contains("[REDACTED_SECRET]")); +} + #[test] fn count_outcomes_sums_token_usage_across_all_jobs() { let counts = aggregate_stats(vec![ diff --git a/codex-rs/core/src/memories/phase2.rs b/codex-rs/core/src/memories/phase2.rs index 203a19075c..dd194edfbf 100644 --- a/codex-rs/core/src/memories/phase2.rs +++ b/codex-rs/core/src/memories/phase2.rs @@ -3,6 +3,9 @@ use crate::agent::status::is_final as is_final_agent_status; use crate::codex::Session; use crate::codex::emit_subagent_session_started; use crate::config::Config; +use crate::memories::extensions::PendingExtensionResourceRemoval; +use crate::memories::extensions::find_old_extension_resources; +use crate::memories::extensions::remove_extension_resources; use crate::memories::memory_root; use crate::memories::metrics; use crate::memories::phase_two; @@ -21,7 +24,6 @@ use codex_protocol::protocol::TokenUsage; use codex_protocol::user_input::UserInput; use codex_state::Stage1Output; use codex_state::StateRuntime; -use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::HashSet; use std::sync::Arc; use std::time::Duration; @@ -113,7 +115,12 @@ pub(super) async fn run(session: &Arc, config: Arc) { job::failed(session, db, &claim, "failed_rebuild_raw_memories").await; return; } - if raw_memories.is_empty() { + let pending_extension_resource_removals = find_old_extension_resources(&root).await; + let removed_extension_resources = pending_extension_resource_removals + .iter() + .map(|resource| resource.removed.clone()) + .collect::>(); + if raw_memories.is_empty() && pending_extension_resource_removals.is_empty() { // We check only after sync of the file system. job::succeed( session, @@ -128,7 +135,7 @@ pub(super) async fn run(session: &Arc, config: Arc) { } // 5. Spawn the agent - let prompt = agent::get_prompt(config, &selection); + let prompt = agent::get_prompt(config, &selection, &removed_extension_resources); let source = SessionSource::SubAgent(SubAgentSource::MemoryConsolidation); let thread_id = match session .services @@ -171,6 +178,7 @@ pub(super) async fn run(session: &Arc, config: Arc) { claim, new_watermark, raw_memories.clone(), + pending_extension_resource_removals, thread_id, phase_two_e2e_timer, ); @@ -269,15 +277,15 @@ mod job { completion_watermark: i64, selected_outputs: &[codex_state::Stage1Output], reason: &'static str, - ) { + ) -> bool { session.services.session_telemetry.counter( metrics::MEMORY_PHASE_TWO_JOBS, /*inc*/ 1, &[("status", reason)], ); - let _ = db - .mark_global_phase2_job_succeeded(&claim.token, completion_watermark, selected_outputs) - .await; + db.mark_global_phase2_job_succeeded(&claim.token, completion_watermark, selected_outputs) + .await + .unwrap_or(false) } } @@ -288,16 +296,7 @@ mod agent { let root = memory_root(&config.codex_home); let mut agent_config = config.as_ref().clone(); - match AbsolutePathBuf::from_absolute_path(root) { - Ok(root) => agent_config.cwd = root, - Err(err) => { - warn!( - "memory phase-2 consolidation could not set cwd from codex_home {}: {err}", - agent_config.codex_home.display() - ); - return None; - } - } + agent_config.cwd = root; // Consolidation threads must never feed back into phase-1 memory generation. agent_config.memories.generate_memories = false; // Approval policy @@ -308,14 +307,7 @@ mod agent { let _ = agent_config.features.disable(Feature::MemoryTool); // Sandbox policy - let mut writable_roots = Vec::new(); - match AbsolutePathBuf::from_absolute_path(agent_config.codex_home.clone()) { - Ok(codex_home) => writable_roots.push(codex_home), - Err(err) => warn!( - "memory phase-2 consolidation could not add codex_home writable root {}: {err}", - agent_config.codex_home.display() - ), - } + let writable_roots = vec![agent_config.codex_home.clone()]; // The consolidation agent only needs local codex_home write access and no network. let consolidation_sandbox_policy = SandboxPolicy::WorkspaceWrite { writable_roots, @@ -345,9 +337,10 @@ mod agent { pub(super) fn get_prompt( config: Arc, selection: &codex_state::Phase2InputSelection, + removed_extension_resources: &[crate::memories::extensions::RemovedExtensionResource], ) -> Vec { let root = memory_root(&config.codex_home); - let prompt = build_consolidation_prompt(&root, selection); + let prompt = build_consolidation_prompt(&root, selection, removed_extension_resources); vec![UserInput::Text { text: prompt, text_elements: vec![], @@ -360,6 +353,7 @@ mod agent { claim: Claim, new_watermark: i64, selected_outputs: Vec, + pending_extension_resource_removals: Vec, thread_id: ThreadId, phase_two_e2e_timer: Option, ) { @@ -396,7 +390,7 @@ mod agent { if let Some(token_usage) = agent_control.get_total_token_usage(thread_id).await { emit_token_usage_metrics(&session, &token_usage); } - job::succeed( + if job::succeed( &session, &db, &claim, @@ -404,7 +398,10 @@ mod agent { &selected_outputs, "succeeded", ) - .await; + .await + { + remove_extension_resources(&pending_extension_resource_removals).await; + } } else { job::failed(&session, &db, &claim, "failed_agent").await; } diff --git a/codex-rs/core/src/memories/prompts.rs b/codex-rs/core/src/memories/prompts.rs index 079ccd5c6a..fc85917c9b 100644 --- a/codex-rs/core/src/memories/prompts.rs +++ b/codex-rs/core/src/memories/prompts.rs @@ -1,3 +1,5 @@ +use crate::memories::extensions::EXTENSION_RESOURCE_RETENTION_DAYS; +use crate::memories::extensions::RemovedExtensionResource; use crate::memories::memory_extensions_root; use crate::memories::memory_root; use crate::memories::phase_one; @@ -6,9 +8,11 @@ use codex_protocol::openai_models::ModelInfo; use codex_state::Phase2InputSelection; use codex_state::Stage1Output; use codex_state::Stage1OutputRef; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_output_truncation::TruncationPolicy; use codex_utils_output_truncation::truncate_text; use codex_utils_template::Template; +use std::fmt::Write as _; use std::path::Path; use std::sync::LazyLock; use tokio::fs; @@ -61,8 +65,9 @@ Memory extensions (under {{ memory_extensions_root }}/): source. If the user has any memory extensions, you MUST read the instructions for each extension to -determine how to use the memory source. If it has no extension folders, continue with the standard -memory inputs only. +determine how to use the memory source. If the Phase 2 diff lists removed memory extension +resources, use that extension-specific deletion diff to remove stale memories derived only from +those resources. If it has no extension folders, continue with the standard memory inputs only. "#; const MEMORY_EXTENSIONS_PRIMARY_INPUTS: &str = r#" @@ -72,12 +77,16 @@ Under `{{ memory_extensions_root }}/`: - `/instructions.md` - If extension folders exist, read each instructions.md first and follow it when interpreting that extension's memory source. + +If the Phase 2 diff lists removed memory extension resources, use that extension-specific deletion +diff to remove stale memories derived only from those resources. "#; /// Builds the consolidation subagent prompt for a specific memory root. pub(super) fn build_consolidation_prompt( memory_root: &Path, selection: &Phase2InputSelection, + removed_extension_resources: &[RemovedExtensionResource], ) -> String { let memory_extensions_root = memory_extensions_root(memory_root); let memory_extensions_exist = memory_extensions_root.is_dir(); @@ -99,7 +108,8 @@ pub(super) fn build_consolidation_prompt( } else { String::new() }; - let phase2_input_selection = render_phase2_input_selection(selection); + let phase2_input_selection = + render_phase2_input_selection(selection, removed_extension_resources); CONSOLIDATION_PROMPT_TEMPLATE .render([ ("memory_root", memory_root.as_str()), @@ -130,7 +140,10 @@ fn render_memory_extensions_block(template: &Template, memory_extensions_root: & }) } -fn render_phase2_input_selection(selection: &Phase2InputSelection) -> String { +fn render_phase2_input_selection( + selection: &Phase2InputSelection, + removed_extension_resources: &[RemovedExtensionResource], +) -> String { let retained = selection.retained_thread_ids.len(); let added = selection.selected.len().saturating_sub(retained); let selected = if selection.selected.is_empty() { @@ -159,11 +172,29 @@ fn render_phase2_input_selection(selection: &Phase2InputSelection) -> String { .join("\n") }; - format!( + let mut rendered = format!( "- selected inputs this run: {}\n- newly added since the last successful Phase 2 run: {added}\n- retained from the last successful Phase 2 run: {retained}\n- removed from the last successful Phase 2 run: {}\n\nCurrent selected Phase 1 inputs:\n{selected}\n\nRemoved from the last successful Phase 2 selection:\n{removed}\n", selection.selected.len(), selection.removed.len(), - ) + ); + + if !removed_extension_resources.is_empty() { + rendered.push_str("\nMemory extension resources removed by retention pruning:\n"); + let _ = writeln!( + rendered, + "- retention window: {EXTENSION_RESOURCE_RETENTION_DAYS} days" + ); + let mut current_extension = ""; + for removed_resource in removed_extension_resources { + if removed_resource.extension != current_extension { + current_extension = &removed_resource.extension; + let _ = writeln!(rendered, "- extension: {current_extension}"); + } + let _ = writeln!(rendered, " - {}", removed_resource.resource_path); + } + } + + rendered } fn render_selected_input_line(item: &Stage1Output, retained: bool) -> String { @@ -231,7 +262,9 @@ pub(super) fn build_stage_one_input_message( /// Build prompt used for read path. This prompt must be added to the developer instructions. In /// case of large memory files, the `memory_summary.md` is truncated at /// [phase_one::MEMORY_TOOL_DEVELOPER_INSTRUCTIONS_SUMMARY_TOKEN_LIMIT]. -pub(crate) async fn build_memory_tool_developer_instructions(codex_home: &Path) -> Option { +pub(crate) async fn build_memory_tool_developer_instructions( + codex_home: &AbsolutePathBuf, +) -> Option { let base_path = memory_root(codex_home); let memory_summary_path = base_path.join("memory_summary.md"); let memory_summary = fs::read_to_string(&memory_summary_path) diff --git a/codex-rs/core/src/memories/prompts_tests.rs b/codex-rs/core/src/memories/prompts_tests.rs index 02df333382..959bb6911d 100644 --- a/codex-rs/core/src/memories/prompts_tests.rs +++ b/codex-rs/core/src/memories/prompts_tests.rs @@ -1,5 +1,8 @@ use super::*; +use crate::memories::extensions::RemovedExtensionResource; use codex_models_manager::model_info::model_info_from_slug; +use codex_state::Phase2InputSelection; +use core_test_support::PathExt; use pretty_assertions::assert_eq; use tempfile::tempdir; use tokio::fs as tokio_fs; @@ -54,61 +57,39 @@ fn build_stage_one_input_message_uses_default_limit_when_model_context_window_mi } #[test] -fn build_consolidation_prompt_renders_embedded_template() { +fn build_consolidation_prompt_includes_removed_extension_resources() { let temp = tempdir().unwrap(); - let memories_dir = temp.path().join("memories"); + let memory_root = temp.path().join("memories"); + std::fs::create_dir_all(temp.path().join("memories_extensions")).unwrap(); + let removed_extension_resources = vec![ + RemovedExtensionResource { + extension: "telepathy".to_string(), + resource_path: "resources/2026-04-06T11-59-59-abcd-10min-old.md".to_string(), + }, + RemovedExtensionResource { + extension: "telepathy".to_string(), + resource_path: "resources/2026-04-07T12-00-00-abcd-10min-cutoff.md".to_string(), + }, + ]; - let prompt = build_consolidation_prompt(&memories_dir, &Phase2InputSelection::default()); + let prompt = build_consolidation_prompt( + &memory_root, + &Phase2InputSelection::default(), + &removed_extension_resources, + ); - assert!(prompt.contains(&format!( - "Folder structure (under {}/):", - memories_dir.display() - ))); - assert!(!prompt.contains("Memory extensions (under")); - assert!(!prompt.contains("/instructions.md")); - assert!(prompt.contains("**Diff since last consolidation:**")); - assert!(prompt.contains("- selected inputs this run: 0")); -} - -#[tokio::test] -async fn build_consolidation_prompt_points_to_extensions_without_inlining_them() { - let temp = tempdir().unwrap(); - let memories_dir = temp.path().join("memories"); - let extension_dir = temp.path().join("memories_extensions/tape_recorder"); - tokio_fs::create_dir_all(extension_dir.join("resources")) - .await - .unwrap(); - tokio_fs::write( - extension_dir.join("instructions.md"), - "source-specific instructions\n", - ) - .await - .unwrap(); - tokio_fs::write( - extension_dir.join("resources/notes.md"), - "source-specific resource\n", - ) - .await - .unwrap(); - - let prompt = build_consolidation_prompt(&memories_dir, &Phase2InputSelection::default()); - let memory_extensions_dir = temp.path().join("memories_extensions"); - - assert!(prompt.contains(&format!( - "Memory extensions (under {}/)", - memory_extensions_dir.display() - ))); - assert!(prompt.contains(&format!("Under `{}/`:", memory_extensions_dir.display()))); - assert!(prompt.contains("/instructions.md")); - assert!(prompt.contains("Optional source-specific inputs:")); - assert!(!prompt.contains("source-specific instructions")); - assert!(!prompt.contains("source-specific resource")); + assert!(prompt.contains("Memory extension resources removed by retention pruning:")); + assert!(prompt.contains("- retention window: 7 days")); + assert!(prompt.contains("- extension: telepathy")); + assert!(prompt.contains(" - resources/2026-04-06T11-59-59-abcd-10min-old.md")); + assert!(prompt.contains(" - resources/2026-04-07T12-00-00-abcd-10min-cutoff.md")); + assert!(prompt.contains("extension-specific deletion diff")); } #[tokio::test] async fn build_memory_tool_developer_instructions_renders_embedded_template() { let temp = tempdir().unwrap(); - let codex_home = temp.path(); + let codex_home = temp.path().abs(); let memories_dir = codex_home.join("memories"); tokio_fs::create_dir_all(&memories_dir).await.unwrap(); tokio_fs::write( @@ -118,7 +99,7 @@ async fn build_memory_tool_developer_instructions_renders_embedded_template() { .await .unwrap(); - let instructions = build_memory_tool_developer_instructions(codex_home) + let instructions = build_memory_tool_developer_instructions(&codex_home) .await .unwrap(); diff --git a/codex-rs/core/src/memories/tests.rs b/codex-rs/core/src/memories/tests.rs index fd9202b990..ab3538af67 100644 --- a/codex-rs/core/src/memories/tests.rs +++ b/codex-rs/core/src/memories/tests.rs @@ -1,6 +1,6 @@ +use super::control::clear_memory_root_contents; use super::storage::rebuild_raw_memories_file_from_memories; use super::storage::sync_rollout_summaries_from_memories; -use crate::memories::clear_memory_root_contents; use crate::memories::ensure_layout; use crate::memories::memory_root; use crate::memories::raw_memories_file; @@ -10,6 +10,7 @@ use chrono::Utc; use codex_config::types::DEFAULT_MEMORIES_MAX_RAW_MEMORIES_FOR_CONSOLIDATION; use codex_protocol::ThreadId; use codex_state::Stage1Output; +use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use serde_json::Value; use std::path::PathBuf; @@ -17,8 +18,7 @@ use tempfile::tempdir; #[test] fn memory_root_uses_shared_global_path() { - let dir = tempdir().expect("tempdir"); - let codex_home = dir.path().join("codex"); + let codex_home = AbsolutePathBuf::current_dir().expect("cwd").join("codex"); assert_eq!(memory_root(&codex_home), codex_home.join("memories")); } @@ -424,6 +424,7 @@ mod phase2 { use crate::memories::phase2; use crate::memories::raw_memories_file; use crate::memories::rollout_summaries_dir; + use chrono::Duration as ChronoDuration; use chrono::Utc; use codex_config::Constrained; use codex_login::CodexAuth; @@ -435,7 +436,6 @@ mod phase2 { use codex_state::Phase2JobClaimOutcome; use codex_state::Stage1Output; use codex_state::ThreadMetadataBuilder; - use core_test_support::PathBufExt; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -468,13 +468,15 @@ mod phase2 { impl DispatchHarness { async fn new() -> Self { let codex_home = tempfile::tempdir().expect("create temp codex home"); - let mut config = test_config(); - config.codex_home = codex_home.path().to_path_buf(); - config.cwd = config.codex_home.abs(); + let mut config = test_config().await; + config.codex_home = + codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(codex_home.path()) + .expect("codex home is absolute"); + config.cwd = config.codex_home.clone(); let config = Arc::new(config); let state_db = codex_state::StateRuntime::init( - config.codex_home.clone(), + config.codex_home.to_path_buf(), config.model_provider_id.clone(), ) .await @@ -483,7 +485,7 @@ mod phase2 { let manager = ThreadManager::with_models_provider_and_home_for_tests( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), - config.codex_home.clone(), + config.codex_home.to_path_buf(), std::sync::Arc::new(codex_exec_server::EnvironmentManager::new( /*exec_server_url*/ None, )), @@ -507,7 +509,8 @@ mod phase2 { thread_id, self.config .codex_home - .join(format!("rollout-{thread_id}.jsonl")), + .join(format!("rollout-{thread_id}.jsonl")) + .to_path_buf(), Utc::now(), SessionSource::Cli, ); @@ -676,7 +679,10 @@ mod phase2 { .expect("get consolidation thread"); let config_snapshot = subagent.config_snapshot().await; pretty_assertions::assert_eq!(config_snapshot.approval_policy, AskForApproval::Never); - pretty_assertions::assert_eq!(config_snapshot.cwd, memory_root(&harness.config.codex_home)); + pretty_assertions::assert_eq!( + config_snapshot.cwd.as_path(), + memory_root(&harness.config.codex_home).as_path() + ); match config_snapshot.sandbox_policy { SandboxPolicy::WorkspaceWrite { writable_roots, .. } => { assert!( @@ -889,13 +895,15 @@ mod phase2 { #[tokio::test] async fn dispatch_marks_job_for_retry_when_spawn_agent_fails() { let codex_home = tempfile::tempdir().expect("create temp codex home"); - let mut config = test_config(); - config.codex_home = codex_home.path().to_path_buf(); - config.cwd = config.codex_home.abs(); + let mut config = test_config().await; + config.codex_home = + codex_utils_absolute_path::AbsolutePathBuf::from_absolute_path(codex_home.path()) + .expect("codex home is absolute"); + config.cwd = config.codex_home.clone(); let config = Arc::new(config); let state_db = codex_state::StateRuntime::init( - config.codex_home.clone(), + config.codex_home.to_path_buf(), config.model_provider_id.clone(), ) .await @@ -909,7 +917,10 @@ mod phase2 { let thread_id = ThreadId::new(); let mut metadata_builder = ThreadMetadataBuilder::new( thread_id, - config.codex_home.join(format!("rollout-{thread_id}.jsonl")), + config + .codex_home + .join(format!("rollout-{thread_id}.jsonl")) + .to_path_buf(), Utc::now(), SessionSource::Cli, ); @@ -950,6 +961,28 @@ mod phase2 { "stage-1 success should enqueue global consolidation" ); + let telepathy_resources = config + .codex_home + .join("memories_extensions/telepathy/resources"); + tokio::fs::create_dir_all(&telepathy_resources) + .await + .expect("create telepathy resources"); + tokio::fs::write( + config + .codex_home + .join("memories_extensions/telepathy/instructions.md"), + "instructions", + ) + .await + .expect("write telepathy instructions"); + let old_file = telepathy_resources.join(format!( + "{}-abcd-10min-old.md", + (Utc::now() - ChronoDuration::days(8)).format("%Y-%m-%dT%H-%M-%S") + )); + tokio::fs::write(&old_file, "old resource") + .await + .expect("write old extension resource"); + phase2::run(&session, Arc::clone(&config)).await; let retry_claim = state_db @@ -961,5 +994,11 @@ mod phase2 { Phase2JobClaimOutcome::SkippedNotDirty, "spawn failures should leave the job in retry backoff instead of running" ); + assert!( + tokio::fs::try_exists(&old_file) + .await + .expect("check old extension resource"), + "spawn failures should not prune extension resources before retry" + ); } } diff --git a/codex-rs/core/src/message_history.rs b/codex-rs/core/src/message_history.rs index e2d5510857..3458ec7306 100644 --- a/codex-rs/core/src/message_history.rs +++ b/codex-rs/core/src/message_history.rs @@ -26,7 +26,6 @@ use std::io::Seek; use std::io::SeekFrom; use std::io::Write; use std::path::Path; -use std::path::PathBuf; use serde::Deserialize; use serde::Serialize; @@ -37,6 +36,7 @@ use tokio::io::AsyncReadExt; use crate::config::Config; use codex_config::types::HistoryPersistence; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_protocol::ThreadId; #[cfg(unix)] @@ -60,10 +60,8 @@ pub struct HistoryEntry { pub text: String, } -fn history_filepath(config: &Config) -> PathBuf { - let mut path = config.codex_home.clone(); - path.push(HISTORY_FILENAME); - path +fn history_filepath(config: &Config) -> AbsolutePathBuf { + config.codex_home.join(HISTORY_FILENAME) } /// Append a `text` entry associated with `conversation_id` to the history file. diff --git a/codex-rs/core/src/network_proxy_loader.rs b/codex-rs/core/src/network_proxy_loader.rs index 8d387ca968..af4280bfb4 100644 --- a/codex-rs/core/src/network_proxy_loader.rs +++ b/codex-rs/core/src/network_proxy_loader.rs @@ -16,6 +16,7 @@ use codex_config::CONFIG_TOML_FILE; use codex_config::permissions_toml::NetworkToml; use codex_config::permissions_toml::PermissionsToml; use codex_config::permissions_toml::overlay_network_domain_permissions; +use codex_exec_server::LOCAL_FS; use codex_network_proxy::ConfigReloader; use codex_network_proxy::ConfigState; use codex_network_proxy::NetworkProxyConfig; @@ -25,8 +26,8 @@ use codex_network_proxy::NetworkProxyState; use codex_network_proxy::build_config_state; use codex_network_proxy::normalize_host; use codex_network_proxy::validate_policy_against_constraints; +use codex_utils_absolute_path::AbsolutePathBuf; use serde::Deserialize; -use std::path::PathBuf; use std::sync::Arc; use tokio::sync::RwLock; @@ -46,6 +47,7 @@ async fn build_config_state_with_mtimes() -> Result<(ConfigState, Vec Vec { .iter() .filter_map(|layer| { let path = match &layer.name { - ConfigLayerSource::System { file } => Some(file.as_path().to_path_buf()), - ConfigLayerSource::User { file } => Some(file.as_path().to_path_buf()), - ConfigLayerSource::Project { dot_codex_folder } => Some( - dot_codex_folder - .join(CONFIG_TOML_FILE) - .as_path() - .to_path_buf(), - ), - ConfigLayerSource::LegacyManagedConfigTomlFromFile { file } => { - Some(file.as_path().to_path_buf()) + ConfigLayerSource::System { file } => Some(file.clone()), + ConfigLayerSource::User { file } => Some(file.clone()), + ConfigLayerSource::Project { dot_codex_folder } => { + Some(dot_codex_folder.join(CONFIG_TOML_FILE)) } + ConfigLayerSource::LegacyManagedConfigTomlFromFile { file } => Some(file.clone()), _ => None, }; path.map(LayerMtime::new) @@ -265,12 +262,12 @@ fn is_user_controlled_layer(layer: &ConfigLayerSource) -> bool { #[derive(Clone)] struct LayerMtime { - path: PathBuf, + path: AbsolutePathBuf, mtime: Option, } impl LayerMtime { - fn new(path: PathBuf) -> Self { + fn new(path: AbsolutePathBuf) -> Self { let mtime = path.metadata().and_then(|m| m.modified()).ok(); Self { path, mtime } } diff --git a/codex-rs/core/src/original_image_detail.rs b/codex-rs/core/src/original_image_detail.rs index c3e5c3d9a5..adfed321b8 100644 --- a/codex-rs/core/src/original_image_detail.rs +++ b/codex-rs/core/src/original_image_detail.rs @@ -1,2 +1,3 @@ pub(crate) use codex_tools::can_request_original_image_detail; pub(crate) use codex_tools::normalize_output_image_detail; +pub(crate) use codex_tools::sanitize_original_image_detail; diff --git a/codex-rs/core/src/otel_init.rs b/codex-rs/core/src/otel_init.rs index 4a0e7cd984..41914570f3 100644 --- a/codex-rs/core/src/otel_init.rs +++ b/codex-rs/core/src/otel_init.rs @@ -83,7 +83,7 @@ pub fn build_provider( OtelProvider::from(&OtelSettings { service_name: service_name.to_string(), service_version: service_version.to_string(), - codex_home: config.codex_home.clone(), + codex_home: config.codex_home.to_path_buf(), environment: config.otel.environment.to_string(), exporter, trace_exporter, diff --git a/codex-rs/core/src/personality_migration.rs b/codex-rs/core/src/personality_migration.rs index 52cabf55de..4b53a2629e 100644 --- a/codex-rs/core/src/personality_migration.rs +++ b/codex-rs/core/src/personality_migration.rs @@ -1,14 +1,10 @@ use crate::config::edit::ConfigEditsBuilder; -use crate::rollout::ARCHIVED_SESSIONS_SUBDIR; -use crate::rollout::SESSIONS_SUBDIR; -use crate::rollout::list::ThreadListConfig; -use crate::rollout::list::ThreadListLayout; -use crate::rollout::list::ThreadSortKey; -use crate::rollout::list::get_threads_in_root; use codex_config::config_toml::ConfigToml; use codex_protocol::config_types::Personality; -use codex_protocol::protocol::SessionSource; -use codex_rollout::state_db; +use codex_thread_store::ListThreadsParams; +use codex_thread_store::LocalThreadStore; +use codex_thread_store::ThreadSortKey; +use codex_thread_store::ThreadStore; use std::io; use std::path::Path; use tokio::fs::OpenOptions; @@ -64,57 +60,33 @@ pub async fn maybe_migrate_personality( } async fn has_recorded_sessions(codex_home: &Path, default_provider: &str) -> io::Result { - let allowed_sources: &[SessionSource] = &[]; + let store = LocalThreadStore::new(codex_rollout::RolloutConfig { + codex_home: codex_home.to_path_buf(), + sqlite_home: codex_home.to_path_buf(), + cwd: codex_home.to_path_buf(), + model_provider_id: default_provider.to_string(), + generate_memories: false, + }); + if has_threads(&store, /*archived*/ false).await? { + return Ok(true); + } + has_threads(&store, /*archived*/ true).await +} - if let Some(state_db_ctx) = state_db::open_if_present(codex_home, default_provider).await - && let Some(ids) = state_db::list_thread_ids_db( - Some(state_db_ctx.as_ref()), - codex_home, - /*page_size*/ 1, - /*cursor*/ None, - ThreadSortKey::CreatedAt, - allowed_sources, - /*model_providers*/ None, - /*archived_only*/ false, - "personality_migration", - ) +async fn has_threads(store: &LocalThreadStore, archived: bool) -> io::Result { + store + .list_threads(ListThreadsParams { + page_size: 1, + cursor: None, + sort_key: ThreadSortKey::CreatedAt, + allowed_sources: Vec::new(), + model_providers: None, + archived, + search_term: None, + }) .await - && !ids.is_empty() - { - return Ok(true); - } - - let sessions = get_threads_in_root( - codex_home.join(SESSIONS_SUBDIR), - /*page_size*/ 1, - /*cursor*/ None, - ThreadSortKey::CreatedAt, - ThreadListConfig { - allowed_sources, - model_providers: None, - default_provider, - layout: ThreadListLayout::NestedByDate, - }, - ) - .await?; - if !sessions.items.is_empty() { - return Ok(true); - } - - let archived_sessions = get_threads_in_root( - codex_home.join(ARCHIVED_SESSIONS_SUBDIR), - /*page_size*/ 1, - /*cursor*/ None, - ThreadSortKey::CreatedAt, - ThreadListConfig { - allowed_sources, - model_providers: None, - default_provider, - layout: ThreadListLayout::Flat, - }, - ) - .await?; - Ok(!archived_sessions.items.is_empty()) + .map(|page| !page.items.is_empty()) + .map_err(io::Error::other) } async fn create_marker(marker_path: &Path) -> io::Result<()> { diff --git a/codex-rs/core/src/personality_migration_tests.rs b/codex-rs/core/src/personality_migration_tests.rs index de1070ad34..4aef53a5c4 100644 --- a/codex-rs/core/src/personality_migration_tests.rs +++ b/codex-rs/core/src/personality_migration_tests.rs @@ -7,6 +7,8 @@ use codex_protocol::protocol::SessionMeta; use codex_protocol::protocol::SessionMetaLine; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::UserMessageEvent; +use codex_rollout::ARCHIVED_SESSIONS_SUBDIR; +use codex_rollout::SESSIONS_SUBDIR; use pretty_assertions::assert_eq; use tempfile::TempDir; use tokio::io::AsyncWriteExt; @@ -25,6 +27,16 @@ async fn write_session_with_user_event(codex_home: &Path) -> io::Result<()> { .join("2025") .join("01") .join("01"); + write_rollout_with_user_event(&dir, thread_id).await +} + +async fn write_archived_session_with_user_event(codex_home: &Path) -> io::Result<()> { + let thread_id = ThreadId::new(); + let dir = codex_home.join(ARCHIVED_SESSIONS_SUBDIR); + write_rollout_with_user_event(&dir, thread_id).await +} + +async fn write_rollout_with_user_event(dir: &Path, thread_id: ThreadId) -> io::Result<()> { tokio::fs::create_dir_all(&dir).await?; let file_path = dir.join(format!("rollout-{TEST_TIMESTAMP}-{thread_id}.jsonl")); let mut file = tokio::fs::File::create(&file_path).await?; @@ -85,6 +97,22 @@ async fn applies_when_sessions_exist_and_no_personality() -> io::Result<()> { Ok(()) } +#[tokio::test] +async fn applies_when_only_archived_sessions_exist_and_no_personality() -> io::Result<()> { + let temp = TempDir::new()?; + write_archived_session_with_user_event(temp.path()).await?; + + let config_toml = ConfigToml::default(); + let status = maybe_migrate_personality(temp.path(), &config_toml).await?; + + assert_eq!(status, PersonalityMigrationStatus::Applied); + assert!(temp.path().join(PERSONALITY_MIGRATION_FILENAME).exists()); + + let persisted = read_config_toml(temp.path()).await?; + assert_eq!(persisted.personality, Some(Personality::Pragmatic)); + Ok(()) +} + #[tokio::test] async fn skips_when_marker_exists() -> io::Result<()> { let temp = TempDir::new()?; diff --git a/codex-rs/core/src/plugins/discoverable.rs b/codex-rs/core/src/plugins/discoverable.rs index 8c630619b2..e856815a7c 100644 --- a/codex-rs/core/src/plugins/discoverable.rs +++ b/codex-rs/core/src/plugins/discoverable.rs @@ -4,7 +4,6 @@ use tracing::warn; use super::OPENAI_CURATED_MARKETPLACE_NAME; use super::PluginCapabilitySummary; -use super::PluginReadRequest; use super::PluginsManager; use crate::config::Config; use codex_config::types::ToolSuggestDiscoverableType; @@ -22,14 +21,14 @@ const TOOL_SUGGEST_DISCOVERABLE_PLUGIN_ALLOWLIST: &[&str] = &[ "figma@openai-curated", ]; -pub(crate) fn list_tool_suggest_discoverable_plugins( +pub(crate) async fn list_tool_suggest_discoverable_plugins( config: &Config, ) -> anyhow::Result> { if !config.features.enabled(Feature::Plugins) { return Ok(Vec::new()); } - let plugins_manager = PluginsManager::new(config.codex_home.clone()); + let plugins_manager = PluginsManager::new(config.codex_home.to_path_buf()); let configured_plugin_ids = config .tool_suggest .discoverables @@ -47,6 +46,7 @@ pub(crate) fn list_tool_suggest_discoverable_plugins( else { return Ok(Vec::new()); }; + let curated_marketplace_name = curated_marketplace.name; let mut discoverable_plugins = Vec::::new(); for plugin in curated_marketplace.plugins { @@ -58,17 +58,13 @@ pub(crate) fn list_tool_suggest_discoverable_plugins( } let plugin_id = plugin.id.clone(); - let plugin_name = plugin.name.clone(); - match plugins_manager.read_plugin_for_config( - config, - &PluginReadRequest { - plugin_name, - marketplace_path: curated_marketplace.path.clone(), - }, - ) { + match plugins_manager + .read_plugin_detail_for_marketplace_plugin(config, &curated_marketplace_name, plugin) + .await + { Ok(plugin) => { - let plugin: PluginCapabilitySummary = plugin.plugin.into(); + let plugin: PluginCapabilitySummary = plugin.into(); discoverable_plugins.push(DiscoverablePluginInfo { id: plugin.config_name, name: plugin.display_name, diff --git a/codex-rs/core/src/plugins/discoverable_tests.rs b/codex-rs/core/src/plugins/discoverable_tests.rs index 70ac887cb6..f330418b0b 100644 --- a/codex-rs/core/src/plugins/discoverable_tests.rs +++ b/codex-rs/core/src/plugins/discoverable_tests.rs @@ -9,6 +9,9 @@ use codex_tools::DiscoverablePluginInfo; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use tempfile::tempdir; +use tracing::Level; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_test::internal::MockWriter; #[tokio::test] async fn list_tool_suggest_discoverable_plugins_returns_uninstalled_curated_plugins() { @@ -18,7 +21,9 @@ async fn list_tool_suggest_discoverable_plugins_returns_uninstalled_curated_plug write_plugins_feature_config(codex_home.path()); let config = load_plugins_config(codex_home.path()).await; - let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config).unwrap(); + let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config) + .await + .unwrap(); assert_eq!( discoverable_plugins, @@ -48,7 +53,9 @@ plugins = false ); let config = load_plugins_config(codex_home.path()).await; - let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config).unwrap(); + let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config) + .await + .unwrap(); assert_eq!(discoverable_plugins, Vec::::new()); } @@ -68,7 +75,9 @@ async fn list_tool_suggest_discoverable_plugins_normalizes_description() { ); let config = load_plugins_config(codex_home.path()).await; - let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config).unwrap(); + let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config) + .await + .unwrap(); assert_eq!( discoverable_plugins, @@ -103,7 +112,9 @@ async fn list_tool_suggest_discoverable_plugins_omits_installed_curated_plugins( .expect("plugin should install"); let refreshed_config = load_plugins_config(codex_home.path()).await; - let discoverable_plugins = list_tool_suggest_discoverable_plugins(&refreshed_config).unwrap(); + let discoverable_plugins = list_tool_suggest_discoverable_plugins(&refreshed_config) + .await + .unwrap(); assert_eq!(discoverable_plugins, Vec::::new()); } @@ -124,7 +135,9 @@ discoverables = [{ type = "plugin", id = "sample@openai-curated" }] ); let config = load_plugins_config(codex_home.path()).await; - let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config).unwrap(); + let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config) + .await + .unwrap(); assert_eq!( discoverable_plugins, @@ -140,3 +153,67 @@ discoverables = [{ type = "plugin", id = "sample@openai-curated" }] }] ); } + +#[tokio::test] +async fn list_tool_suggest_discoverable_plugins_does_not_reload_marketplace_per_plugin() { + let codex_home = tempdir().expect("tempdir should succeed"); + let curated_root = crate::plugins::curated_plugins_repo_path(codex_home.path()); + write_openai_curated_marketplace( + &curated_root, + &["slack", "build-ios-apps", "life-science-research"], + ); + write_plugins_feature_config(codex_home.path()); + + let too_long_prompt = "x".repeat(129); + for plugin_name in ["build-ios-apps", "life-science-research"] { + write_file( + &curated_root.join(format!("plugins/{plugin_name}/.codex-plugin/plugin.json")), + &format!( + r#"{{ + "name": "{plugin_name}", + "description": "Plugin that includes skills, MCP servers, and app connectors", + "interface": {{ + "defaultPrompt": "{too_long_prompt}" + }} +}}"# + ), + ); + } + + let config = load_plugins_config(codex_home.path()).await; + let buffer: &'static std::sync::Mutex> = + Box::leak(Box::new(std::sync::Mutex::new(Vec::new()))); + let subscriber = tracing_subscriber::fmt() + .with_level(true) + .with_ansi(false) + .with_max_level(Level::WARN) + .with_span_events(FmtSpan::NONE) + .with_writer(MockWriter::new(buffer)) + .finish(); + let _guard = tracing::subscriber::set_default(subscriber); + + let discoverable_plugins = list_tool_suggest_discoverable_plugins(&config) + .await + .unwrap(); + + assert_eq!(discoverable_plugins.len(), 1); + assert_eq!(discoverable_plugins[0].id, "slack@openai-curated"); + + let logs = String::from_utf8(buffer.lock().expect("buffer lock").clone()) + .expect("utf8 logs") + .replace('\\', "/"); + assert_eq!(logs.matches("ignoring interface.defaultPrompt").count(), 2); + let normalized_logs = logs.replace('\\', "/"); + assert_eq!( + normalized_logs + .matches("build-ios-apps/.codex-plugin/plugin.json") + .count(), + 1 + ); + assert_eq!( + normalized_logs + .matches("life-science-research/.codex-plugin/plugin.json") + .count(), + 1 + ); +} diff --git a/codex-rs/core/src/plugins/injection.rs b/codex-rs/core/src/plugins/injection.rs index 3e5bb6ecb5..2f6ec26260 100644 --- a/codex-rs/core/src/plugins/injection.rs +++ b/codex-rs/core/src/plugins/injection.rs @@ -1,6 +1,7 @@ use std::collections::BTreeSet; use std::collections::HashMap; +use codex_connectors::metadata::connector_display_label; use codex_protocol::models::DeveloperInstructions; use codex_protocol::models::ResponseItem; @@ -46,7 +47,7 @@ pub(crate) fn build_plugin_injections( .iter() .any(|plugin_name| plugin_name == &plugin.display_name) }) - .map(connectors::connector_display_label) + .map(connector_display_label) .collect::>() .into_iter() .collect::>(); diff --git a/codex-rs/core/src/plugins/installed_marketplaces.rs b/codex-rs/core/src/plugins/installed_marketplaces.rs index edfc7d895a..a6f80139a9 100644 --- a/codex-rs/core/src/plugins/installed_marketplaces.rs +++ b/codex-rs/core/src/plugins/installed_marketplaces.rs @@ -1,4 +1,5 @@ use crate::config::Config; +use codex_core_plugins::marketplace::find_marketplace_manifest_path; use codex_utils_absolute_path::AbsolutePathBuf; use std::path::Path; use std::path::PathBuf; @@ -45,13 +46,30 @@ pub(crate) fn installed_marketplace_roots_from_config( ); return None; } - let path = default_install_root.join(marketplace_name); - path.join(".agents/plugins/marketplace.json") - .is_file() - .then_some(path) + let path = resolve_configured_marketplace_root( + marketplace_name, + marketplace, + &default_install_root, + )?; + find_marketplace_manifest_path(&path).map(|_| path) }) .filter_map(|path| AbsolutePathBuf::try_from(path).ok()) .collect::>(); roots.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); roots } + +pub(crate) fn resolve_configured_marketplace_root( + marketplace_name: &str, + marketplace: &toml::Value, + default_install_root: &Path, +) -> Option { + match marketplace.get("source_type").and_then(toml::Value::as_str) { + Some("local") => marketplace + .get("source") + .and_then(toml::Value::as_str) + .filter(|source| !source.is_empty()) + .map(PathBuf::from), + _ => Some(default_install_root.join(marketplace_name)), + } +} diff --git a/codex-rs/core/src/plugins/manager.rs b/codex-rs/core/src/plugins/manager.rs index 166c094591..7a5991b03a 100644 --- a/codex-rs/core/src/plugins/manager.rs +++ b/codex-rs/core/src/plugins/manager.rs @@ -1,51 +1,53 @@ -use super::LoadedPlugin; use super::PluginLoadOutcome; -use super::PluginManifestPaths; use super::curated_plugins_repo_path; use super::installed_marketplaces::installed_marketplace_roots_from_config; -use super::load_plugin_manifest; -use super::manifest::PluginManifestInterface; -use super::marketplace::MarketplaceError; -use super::marketplace::MarketplaceInterface; -use super::marketplace::MarketplaceListError; -use super::marketplace::MarketplacePluginAuthPolicy; -use super::marketplace::MarketplacePluginPolicy; -use super::marketplace::MarketplacePluginSource; -use super::marketplace::ResolvedMarketplacePlugin; -use super::marketplace::list_marketplaces; -use super::marketplace::load_marketplace; -use super::marketplace::resolve_marketplace_plugin; use super::read_curated_plugins_sha; -use super::remote::RemotePluginFetchError; -use super::remote::RemotePluginMutationError; -use super::remote::enable_remote_plugin; -use super::remote::fetch_remote_featured_plugin_ids; -use super::remote::fetch_remote_plugin_status; -use super::remote::uninstall_remote_plugin; use super::startup_sync::start_startup_remote_plugin_sync_once; -use super::store::PluginInstallResult as StorePluginInstallResult; -use super::store::PluginStore; -use super::store::PluginStoreError; -use super::store::plugin_version_for_source; use super::sync_openai_plugins_repo; use crate::SkillMetadata; -use crate::config::CONFIG_TOML_FILE; use crate::config::Config; use crate::config::ConfigService; use crate::config::ConfigServiceError; use crate::config::edit::ConfigEdit; use crate::config::edit::ConfigEditsBuilder; use crate::config_loader::ConfigLayerStack; -use crate::config_rules::SkillConfigRules; -use crate::config_rules::resolve_disabled_skill_paths; -use crate::config_rules::skill_config_rules_from_stack; -use crate::loader::SkillRoot; -use crate::loader::load_skills_from_roots; use codex_analytics::AnalyticsEventsClient; use codex_app_server_protocol::ConfigValueWriteParams; use codex_app_server_protocol::MergeStrategy; -use codex_config::types::McpServerConfig; use codex_config::types::PluginConfig; +use codex_core_plugins::loader::configured_curated_plugin_ids_from_codex_home; +use codex_core_plugins::loader::installed_plugin_telemetry_metadata; +use codex_core_plugins::loader::load_plugin_apps; +use codex_core_plugins::loader::load_plugin_mcp_servers; +use codex_core_plugins::loader::load_plugin_skills; +use codex_core_plugins::loader::load_plugins_from_layer_stack; +use codex_core_plugins::loader::log_plugin_load_errors; +use codex_core_plugins::loader::plugin_telemetry_metadata_from_root; +use codex_core_plugins::loader::refresh_curated_plugin_cache; +use codex_core_plugins::loader::refresh_non_curated_plugin_cache; +use codex_core_plugins::loader::refresh_non_curated_plugin_cache_force_reinstall; +use codex_core_plugins::manifest::PluginManifestInterface; +use codex_core_plugins::manifest::load_plugin_manifest; +use codex_core_plugins::marketplace::MarketplaceError; +use codex_core_plugins::marketplace::MarketplaceInterface; +use codex_core_plugins::marketplace::MarketplaceListError; +use codex_core_plugins::marketplace::MarketplacePluginAuthPolicy; +use codex_core_plugins::marketplace::MarketplacePluginPolicy; +use codex_core_plugins::marketplace::MarketplacePluginSource; +use codex_core_plugins::marketplace::ResolvedMarketplacePlugin; +use codex_core_plugins::marketplace::list_marketplaces; +use codex_core_plugins::marketplace::load_marketplace; +use codex_core_plugins::marketplace::resolve_marketplace_plugin; +use codex_core_plugins::marketplace_upgrade::ConfiguredMarketplaceUpgradeError; +use codex_core_plugins::marketplace_upgrade::ConfiguredMarketplaceUpgradeOutcome; +use codex_core_plugins::marketplace_upgrade::configured_git_marketplace_names; +use codex_core_plugins::marketplace_upgrade::upgrade_configured_git_marketplaces; +use codex_core_plugins::remote::RemotePluginFetchError; +use codex_core_plugins::remote::RemotePluginMutationError; +use codex_core_plugins::remote::RemotePluginServiceConfig; +use codex_core_plugins::store::PluginInstallResult as StorePluginInstallResult; +use codex_core_plugins::store::PluginStore; +use codex_core_plugins::store::PluginStoreError; use codex_features::Feature; use codex_login::AuthManager; use codex_login::CodexAuth; @@ -53,19 +55,12 @@ use codex_plugin::AppConnectorId; use codex_plugin::PluginCapabilitySummary; use codex_plugin::PluginId; use codex_plugin::PluginIdError; -use codex_plugin::PluginTelemetryMetadata; use codex_plugin::prompt_safe_plugin_description; use codex_protocol::protocol::Product; -use codex_protocol::protocol::SkillScope; use codex_utils_absolute_path::AbsolutePathBuf; -use serde::Deserialize; -use serde_json::Map as JsonMap; -use serde_json::Value as JsonValue; use serde_json::json; use std::collections::HashMap; use std::collections::HashSet; -use std::fs; -use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use std::sync::RwLock; @@ -77,9 +72,6 @@ use toml_edit::value; use tracing::info; use tracing::warn; -const DEFAULT_SKILLS_DIR_NAME: &str = "skills"; -const DEFAULT_MCP_CONFIG_FILE: &str = ".mcp.json"; -const DEFAULT_APP_CONFIG_FILE: &str = ".app.json"; pub const OPENAI_CURATED_MARKETPLACE_NAME: &str = "openai-curated"; pub const OPENAI_CURATED_MARKETPLACE_DISPLAY_NAME: &str = "OpenAI Curated"; static CURATED_REPO_SYNC_STARTED: AtomicBool = AtomicBool::new(false); @@ -101,13 +93,36 @@ struct CachedFeaturedPluginIds { featured_plugin_ids: Vec, } +#[derive(Clone, PartialEq, Eq)] +struct NonCuratedCacheRefreshRequest { + roots: Vec, + mode: NonCuratedCacheRefreshMode, +} + +#[derive(Clone, Copy, PartialEq, Eq)] +enum NonCuratedCacheRefreshMode { + IfVersionChanged, + ForceReinstall, +} + #[derive(Default)] struct NonCuratedCacheRefreshState { - requested_roots: Option>, - last_refreshed_roots: Option>, + requested: Option, + last_refreshed: Option, in_flight: bool, } +#[derive(Default)] +struct ConfiguredMarketplaceUpgradeState { + in_flight: bool, +} + +fn remote_plugin_service_config(config: &Config) -> RemotePluginServiceConfig { + RemotePluginServiceConfig { + chatgpt_base_url: config.chatgpt_base_url.clone(), + } +} + fn featured_plugin_ids_cache_key( config: &Config, auth: Option<&CodexAuth>, @@ -168,7 +183,7 @@ pub struct PluginDetail { pub installed: bool, pub enabled: bool, pub skills: Vec, - pub disabled_skill_paths: HashSet, + pub disabled_skill_paths: HashSet, pub apps: Vec, pub mcp_server_names: Vec, } @@ -321,6 +336,7 @@ pub struct PluginsManager { codex_home: PathBuf, store: PluginStore, featured_plugin_ids_cache: RwLock>, + configured_marketplace_upgrade_state: RwLock, non_curated_cache_refresh_state: RwLock, cached_enabled_outcome: RwLock>, remote_sync_lock: Mutex<()>, @@ -348,6 +364,9 @@ impl PluginsManager { codex_home: codex_home.clone(), store: PluginStore::new(codex_home), featured_plugin_ids_cache: RwLock::new(None), + configured_marketplace_upgrade_state: RwLock::new( + ConfiguredMarketplaceUpgradeState::default(), + ), non_curated_cache_refresh_state: RwLock::new(NonCuratedCacheRefreshState::default()), cached_enabled_outcome: RwLock::new(None), remote_sync_lock: Mutex::new(()), @@ -374,11 +393,12 @@ impl PluginsManager { } } - pub fn plugins_for_config(&self, config: &Config) -> PluginLoadOutcome { + pub async fn plugins_for_config(&self, config: &Config) -> PluginLoadOutcome { self.plugins_for_config_with_force_reload(config, /*force_reload*/ false) + .await } - pub(crate) fn plugins_for_config_with_force_reload( + pub(crate) async fn plugins_for_config_with_force_reload( &self, config: &Config, force_reload: bool, @@ -395,7 +415,8 @@ impl PluginsManager { &config.config_layer_stack, &self.store, self.restriction_product, - ); + ) + .await; log_plugin_load_errors(&outcome); let mut cache = match self.cached_enabled_outcome.write() { Ok(cache) => cache, @@ -419,15 +440,16 @@ impl PluginsManager { } /// Resolve plugin skill roots for a config layer stack without touching the plugins cache. - pub fn effective_skill_roots_for_layer_stack( + pub async fn effective_skill_roots_for_layer_stack( &self, config_layer_stack: &ConfigLayerStack, plugins_feature_enabled: bool, - ) -> Vec { + ) -> Vec { if !plugins_feature_enabled { return Vec::new(); } load_plugins_from_layer_stack(config_layer_stack, &self.store, self.restriction_product) + .await .effective_skill_roots() } @@ -499,8 +521,12 @@ impl PluginsManager { if let Some(featured_plugin_ids) = self.cached_featured_plugin_ids(&cache_key) { return Ok(featured_plugin_ids); } - let featured_plugin_ids = - fetch_remote_featured_plugin_ids(config, auth, self.restriction_product).await?; + let featured_plugin_ids = codex_core_plugins::remote::fetch_remote_featured_plugin_ids( + &remote_plugin_service_config(config), + auth, + self.restriction_product, + ) + .await?; self.write_featured_plugin_ids_cache(cache_key, &featured_plugin_ids); Ok(featured_plugin_ids) } @@ -532,9 +558,13 @@ impl PluginsManager { // This only forwards the backend mutation before the local install flow. We rely on // `plugin/list(forceRemoteSync=true)` to sync local state rather than doing an extra // reconcile pass here. - enable_remote_plugin(config, auth, &plugin_id) - .await - .map_err(PluginInstallError::from)?; + codex_core_plugins::remote::enable_remote_plugin( + &remote_plugin_service_config(config), + auth, + &plugin_id, + ) + .await + .map_err(PluginInstallError::from)?; self.install_resolved_plugin(resolved).await } @@ -585,10 +615,10 @@ impl PluginsManager { Err(err) => err.into_inner().clone(), }; if let Some(analytics_events_client) = analytics_events_client { - analytics_events_client.track_plugin_installed(plugin_telemetry_metadata_from_root( - &result.plugin_id, - result.installed_path.as_path(), - )); + analytics_events_client.track_plugin_installed( + plugin_telemetry_metadata_from_root(&result.plugin_id, &result.installed_path) + .await, + ); } Ok(PluginInstallOutcome { @@ -615,17 +645,22 @@ impl PluginsManager { // This only forwards the backend mutation before the local uninstall flow. We rely on // `plugin/list(forceRemoteSync=true)` to sync local state rather than doing an extra // reconcile pass here. - uninstall_remote_plugin(config, auth, &plugin_key) - .await - .map_err(PluginUninstallError::from)?; + codex_core_plugins::remote::uninstall_remote_plugin( + &remote_plugin_service_config(config), + auth, + &plugin_key, + ) + .await + .map_err(PluginUninstallError::from)?; self.uninstall_plugin_id(plugin_id).await } async fn uninstall_plugin_id(&self, plugin_id: PluginId) -> Result<(), PluginUninstallError> { - let plugin_telemetry = self - .store - .active_plugin_root(&plugin_id) - .map(|_| installed_plugin_telemetry_metadata(self.codex_home.as_path(), &plugin_id)); + let plugin_telemetry = if self.store.active_plugin_root(&plugin_id).is_some() { + Some(installed_plugin_telemetry_metadata(self.codex_home.as_path(), &plugin_id).await) + } else { + None + }; let store = self.store.clone(); let plugin_id_for_store = plugin_id.clone(); tokio::task::spawn_blocking(move || store.uninstall(&plugin_id_for_store)) @@ -665,9 +700,12 @@ impl PluginsManager { } info!("starting remote plugin sync"); - let remote_plugins = fetch_remote_plugin_status(config, auth) - .await - .map_err(PluginRemoteSyncError::from)?; + let remote_plugins = codex_core_plugins::remote::fetch_remote_plugin_status( + &remote_plugin_service_config(config), + auth, + ) + .await + .map_err(PluginRemoteSyncError::from)?; let configured_plugins = configured_plugins_from_stack(&config.config_layer_stack); let curated_marketplace_root = curated_plugins_repo_path(self.codex_home.as_path()); let curated_marketplace_path = AbsolutePathBuf::try_from( @@ -931,7 +969,7 @@ impl PluginsManager { }) } - pub fn read_plugin_for_config( + pub async fn read_plugin_for_config( &self, config: &Config, request: &PluginReadRequest, @@ -952,13 +990,6 @@ impl PluginsManager { marketplace_name, }); }; - if !self.restriction_product_matches(plugin.policy.products.as_deref()) { - return Err(MarketplaceError::PluginNotFound { - plugin_name: request.plugin_name.clone(), - marketplace_name, - }); - } - let plugin_id = PluginId::new(plugin.name.clone(), marketplace.name.clone()).map_err( |err| match err { PluginIdError::Invalid(message) => MarketplaceError::InvalidPlugin(message), @@ -966,6 +997,53 @@ impl PluginsManager { )?; let plugin_key = plugin_id.as_key(); let (installed_plugins, enabled_plugins) = self.configured_plugin_states(config); + let plugin = self + .read_plugin_detail_for_marketplace_plugin( + config, + &marketplace.name, + ConfiguredMarketplacePlugin { + id: plugin_key.clone(), + name: plugin.name, + source: plugin.source, + policy: plugin.policy, + interface: plugin.interface, + installed: installed_plugins.contains(&plugin_key), + enabled: enabled_plugins.contains(&plugin_key), + }, + ) + .await?; + + Ok(PluginReadOutcome { + marketplace_name: if marketplace.name == OPENAI_CURATED_MARKETPLACE_NAME { + OPENAI_CURATED_MARKETPLACE_DISPLAY_NAME.to_string() + } else { + marketplace.name + }, + marketplace_path: marketplace.path, + plugin, + }) + } + + pub(crate) async fn read_plugin_detail_for_marketplace_plugin( + &self, + config: &Config, + marketplace_name: &str, + plugin: ConfiguredMarketplacePlugin, + ) -> Result { + if !self.restriction_product_matches(plugin.policy.products.as_deref()) { + return Err(MarketplaceError::PluginNotFound { + plugin_name: plugin.name, + marketplace_name: marketplace_name.to_string(), + }); + } + + let plugin_id = + PluginId::new(plugin.name.clone(), marketplace_name.to_string()).map_err(|err| { + match err { + PluginIdError::Invalid(message) => MarketplaceError::InvalidPlugin(message), + } + })?; + let plugin_key = plugin_id.as_key(); let source_path = match &plugin.source { MarketplacePluginSource::Local { path } => path.clone(), }; @@ -981,47 +1059,37 @@ impl PluginsManager { })?; let description = manifest.description.clone(); let manifest_paths = &manifest.paths; - let skill_config_rules = skill_config_rules_from_stack(&config.config_layer_stack); + let skill_config_rules = codex_core_skills::config_rules::skill_config_rules_from_stack( + &config.config_layer_stack, + ); let resolved_skills = load_plugin_skills( - source_path.as_path(), + &source_path, manifest_paths, self.restriction_product, &skill_config_rules, - ); - let apps = load_plugin_apps(source_path.as_path()); - let mcp_config_paths = plugin_mcp_config_paths(source_path.as_path(), manifest_paths); - let mut mcp_server_names = Vec::new(); - for mcp_config_path in mcp_config_paths { - mcp_server_names.extend( - load_mcp_servers_from_file(source_path.as_path(), &mcp_config_path) - .mcp_servers - .into_keys(), - ); - } + ) + .await; + let apps = load_plugin_apps(source_path.as_path()).await; + let mut mcp_server_names = load_plugin_mcp_servers(source_path.as_path()) + .await + .into_keys() + .collect::>(); mcp_server_names.sort_unstable(); mcp_server_names.dedup(); - Ok(PluginReadOutcome { - marketplace_name: if marketplace.name == OPENAI_CURATED_MARKETPLACE_NAME { - OPENAI_CURATED_MARKETPLACE_DISPLAY_NAME.to_string() - } else { - marketplace.name - }, - marketplace_path: marketplace.path, - plugin: PluginDetail { - id: plugin_key.clone(), - name: plugin.name, - description, - source: plugin.source, - policy: plugin.policy, - interface: plugin.interface, - installed: installed_plugins.contains(&plugin_key), - enabled: enabled_plugins.contains(&plugin_key), - skills: resolved_skills.skills, - disabled_skill_paths: resolved_skills.disabled_skill_paths, - apps, - mcp_server_names, - }, + Ok(PluginDetail { + id: plugin_key, + name: plugin.name, + description, + source: plugin.source, + policy: plugin.policy, + interface: plugin.interface, + installed: plugin.installed, + enabled: plugin.enabled, + skills: resolved_skills.skills, + disabled_skill_paths: resolved_skills.disabled_skill_paths, + apps, + mcp_server_names, }) } @@ -1032,6 +1100,57 @@ impl PluginsManager { ) { if config.features.enabled(Feature::Plugins) { self.start_curated_repo_sync(); + let should_spawn_marketplace_auto_upgrade = { + let mut state = match self.configured_marketplace_upgrade_state.write() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + if state.in_flight { + false + } else { + state.in_flight = true; + true + } + }; + if should_spawn_marketplace_auto_upgrade { + let manager = Arc::clone(self); + let config = config.clone(); + if let Err(err) = std::thread::Builder::new() + .name("plugins-marketplace-auto-upgrade".to_string()) + .spawn(move || { + let outcome = manager.upgrade_configured_marketplaces_for_config( + &config, /*marketplace_name*/ None, + ); + match outcome { + Ok(outcome) => { + for error in outcome.errors { + warn!( + marketplace = error.marketplace_name, + error = %error.message, + "failed to auto-upgrade configured marketplace" + ); + } + } + Err(err) => { + warn!("failed to auto-upgrade configured marketplaces: {err}"); + } + } + + let mut state = match manager.configured_marketplace_upgrade_state.write() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + state.in_flight = false; + }) + { + let mut state = match self.configured_marketplace_upgrade_state.write() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + state.in_flight = false; + warn!("failed to start configured marketplace auto-upgrade task: {err}"); + } + } start_startup_remote_plugin_sync_once( Arc::clone(self), self.codex_home.clone(), @@ -1056,16 +1175,74 @@ impl PluginsManager { } } - pub fn maybe_start_non_curated_plugin_cache_refresh_for_roots( + pub fn upgrade_configured_marketplaces_for_config( + &self, + config: &Config, + marketplace_name: Option<&str>, + ) -> Result { + if let Some(marketplace_name) = marketplace_name + && !configured_git_marketplace_names(&config.config_layer_stack) + .iter() + .any(|name| name == marketplace_name) + { + return Err(format!( + "marketplace `{marketplace_name}` is not configured as a Git marketplace" + )); + } + + let mut outcome = upgrade_configured_git_marketplaces( + self.codex_home.as_path(), + &config.config_layer_stack, + marketplace_name, + ); + if !outcome.upgraded_roots.is_empty() { + match refresh_non_curated_plugin_cache_force_reinstall( + self.codex_home.as_path(), + &outcome.upgraded_roots, + ) { + Ok(cache_refreshed) => { + if cache_refreshed { + self.clear_cache(); + } + } + Err(err) => { + self.clear_cache(); + outcome.errors.push(ConfiguredMarketplaceUpgradeError { + marketplace_name: marketplace_name + .unwrap_or("all configured marketplaces") + .to_string(), + message: format!( + "failed to refresh installed plugin cache after marketplace upgrade: {err}" + ), + }); + } + } + } + Ok(outcome) + } + + pub fn maybe_start_non_curated_plugin_cache_refresh( self: &Arc, roots: &[AbsolutePathBuf], + ) { + self.schedule_non_curated_plugin_cache_refresh( + roots, + NonCuratedCacheRefreshMode::IfVersionChanged, + ); + } + + fn schedule_non_curated_plugin_cache_refresh( + self: &Arc, + roots: &[AbsolutePathBuf], + mode: NonCuratedCacheRefreshMode, ) { let mut roots = roots.to_vec(); - roots.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); + roots.sort_unstable(); roots.dedup(); if roots.is_empty() { return; } + let request = NonCuratedCacheRefreshRequest { roots, mode }; let should_spawn = { let mut state = match self.non_curated_cache_refresh_state.write() { @@ -1073,13 +1250,25 @@ impl PluginsManager { Err(err) => err.into_inner(), }; // Collapse repeated plugin/list requests onto one worker and only queue another pass - // when the requested roots set actually changes. - if state.requested_roots.as_ref() == Some(&roots) - || (!state.in_flight && state.last_refreshed_roots.as_ref() == Some(&roots)) + // when the requested roots set actually changes. Forced reinstall requests are not + // deduped against the last completed pass because the same marketplace root path can + // point at newly activated files after an auto-upgrade. + if state.requested.as_ref() == Some(&request) + || (mode == NonCuratedCacheRefreshMode::IfVersionChanged + && !state.in_flight + && state.last_refreshed.as_ref() == Some(&request)) { return; } - state.requested_roots = Some(roots); + if mode == NonCuratedCacheRefreshMode::IfVersionChanged + && state.requested.as_ref().is_some_and(|requested| { + requested.mode == NonCuratedCacheRefreshMode::ForceReinstall + && requested.roots == request.roots + }) + { + return; + } + state.requested = Some(request); if state.in_flight { false } else { @@ -1101,7 +1290,7 @@ impl PluginsManager { Err(err) => err.into_inner(), }; state.in_flight = false; - state.requested_roots = None; + state.requested = None; warn!("failed to start non-curated plugin cache refresh task: {err}"); } } @@ -1117,13 +1306,8 @@ impl PluginsManager { .spawn( move || match sync_openai_plugins_repo(codex_home.as_path()) { Ok(curated_plugin_version) => { - let configured_curated_plugin_ids = curated_plugin_ids_from_config_keys( - configured_plugins_from_codex_home( - codex_home.as_path(), - "failed to read user config while refreshing curated plugin cache", - "failed to parse user config while refreshing curated plugin cache", - ), - ); + let configured_curated_plugin_ids = + configured_curated_plugin_ids_from_codex_home(codex_home.as_path()); match refresh_curated_plugin_cache( codex_home.as_path(), &curated_plugin_version, @@ -1155,15 +1339,15 @@ impl PluginsManager { fn run_non_curated_plugin_cache_refresh_loop(self: Arc) { loop { - let roots = { + let request = { let state = match self.non_curated_cache_refresh_state.read() { Ok(state) => state, Err(err) => err.into_inner(), }; - state.requested_roots.clone() + state.requested.clone() }; - let Some(roots) = roots else { + let Some(request) = request else { let mut state = match self.non_curated_cache_refresh_state.write() { Ok(state) => state, Err(err) => err.into_inner(), @@ -1172,30 +1356,40 @@ impl PluginsManager { return; }; - let refreshed = - match refresh_non_curated_plugin_cache(self.codex_home.as_path(), &roots) { - Ok(cache_refreshed) => { - if cache_refreshed { - self.clear_cache(); - } - true - } - Err(err) => { + let refresh_result = match request.mode { + NonCuratedCacheRefreshMode::IfVersionChanged => { + refresh_non_curated_plugin_cache(self.codex_home.as_path(), &request.roots) + } + NonCuratedCacheRefreshMode::ForceReinstall => { + refresh_non_curated_plugin_cache_force_reinstall( + self.codex_home.as_path(), + &request.roots, + ) + } + }; + let refreshed = match refresh_result { + Ok(cache_refreshed) => { + if cache_refreshed { self.clear_cache(); - warn!("failed to refresh non-curated plugin cache: {err}"); - false } - }; + true + } + Err(err) => { + self.clear_cache(); + warn!("failed to refresh non-curated plugin cache: {err}"); + false + } + }; let mut state = match self.non_curated_cache_refresh_state.write() { Ok(state) => state, Err(err) => err.into_inner(), }; if refreshed { - state.last_refreshed_roots = Some(roots.clone()); + state.last_refreshed = Some(request.clone()); } - if state.requested_roots.as_ref() == Some(&roots) { - state.requested_roots = None; + if state.requested.as_ref() == Some(&request) { + state.requested = None; state.in_flight = false; return; } @@ -1238,7 +1432,7 @@ impl PluginsManager { { roots.push(curated_repo_root); } - roots.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); + roots.sort_unstable(); roots.dedup(); roots } @@ -1309,224 +1503,7 @@ impl PluginUninstallError { } } -fn log_plugin_load_errors(outcome: &PluginLoadOutcome) { - for plugin in outcome - .plugins() - .iter() - .filter(|plugin| plugin.error.is_some()) - { - if let Some(error) = plugin.error.as_deref() { - warn!( - plugin = plugin.config_name, - path = %plugin.root.display(), - "failed to load plugin: {error}" - ); - } - } -} - -#[derive(Debug, Default, Deserialize)] -#[serde(rename_all = "camelCase")] -struct PluginMcpFile { - #[serde(default)] - mcp_servers: HashMap, -} - -#[derive(Debug, Default, Deserialize)] -#[serde(rename_all = "camelCase")] -struct PluginAppFile { - #[serde(default)] - apps: HashMap, -} - -#[derive(Debug, Default, Deserialize)] -struct PluginAppConfig { - id: String, -} - -pub(crate) fn load_plugins_from_layer_stack( - config_layer_stack: &ConfigLayerStack, - store: &PluginStore, - restriction_product: Option, -) -> PluginLoadOutcome { - let skill_config_rules = skill_config_rules_from_stack(config_layer_stack); - let mut configured_plugins: Vec<_> = configured_plugins_from_stack(config_layer_stack) - .into_iter() - .collect(); - configured_plugins.sort_unstable_by(|(a, _), (b, _)| a.cmp(b)); - - let mut plugins = Vec::with_capacity(configured_plugins.len()); - let mut seen_mcp_server_names = HashMap::::new(); - for (configured_name, plugin) in configured_plugins { - let loaded_plugin = load_plugin( - configured_name.clone(), - &plugin, - store, - restriction_product, - &skill_config_rules, - ); - for name in loaded_plugin.mcp_servers.keys() { - if let Some(previous_plugin) = - seen_mcp_server_names.insert(name.clone(), configured_name.clone()) - { - warn!( - plugin = configured_name, - previous_plugin, - server = name, - "skipping duplicate plugin MCP server name" - ); - } - } - plugins.push(loaded_plugin); - } - - PluginLoadOutcome::from_plugins(plugins) -} - -fn refresh_curated_plugin_cache( - codex_home: &Path, - plugin_version: &str, - configured_curated_plugin_ids: &[PluginId], -) -> Result { - let store = PluginStore::new(codex_home.to_path_buf()); - let curated_marketplace_path = AbsolutePathBuf::try_from( - curated_plugins_repo_path(codex_home).join(".agents/plugins/marketplace.json"), - ) - .map_err(|_| "local curated marketplace is not available".to_string())?; - let curated_marketplace = load_marketplace(&curated_marketplace_path) - .map_err(|err| format!("failed to load curated marketplace for cache refresh: {err}"))?; - - let mut plugin_sources = HashMap::::new(); - for plugin in curated_marketplace.plugins { - let plugin_name = plugin.name; - if plugin_sources.contains_key(&plugin_name) { - warn!( - plugin = plugin_name, - marketplace = OPENAI_CURATED_MARKETPLACE_NAME, - "ignoring duplicate curated plugin entry during cache refresh" - ); - continue; - } - let source_path = match plugin.source { - MarketplacePluginSource::Local { path } => path, - }; - plugin_sources.insert(plugin_name, source_path); - } - - let mut cache_refreshed = false; - for plugin_id in configured_curated_plugin_ids { - if store.active_plugin_version(plugin_id).as_deref() == Some(plugin_version) { - continue; - } - - let Some(source_path) = plugin_sources.get(&plugin_id.plugin_name).cloned() else { - warn!( - plugin = plugin_id.plugin_name, - marketplace = OPENAI_CURATED_MARKETPLACE_NAME, - "configured curated plugin no longer exists in curated marketplace during cache refresh" - ); - continue; - }; - - store - .install_with_version(source_path, plugin_id.clone(), plugin_version.to_string()) - .map_err(|err| { - format!( - "failed to refresh curated plugin cache for {}: {err}", - plugin_id.as_key() - ) - })?; - cache_refreshed = true; - } - - Ok(cache_refreshed) -} - -fn refresh_non_curated_plugin_cache( - codex_home: &Path, - additional_roots: &[AbsolutePathBuf], -) -> Result { - let configured_non_curated_plugin_ids = - non_curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( - codex_home, - "failed to read user config while refreshing non-curated plugin cache", - "failed to parse user config while refreshing non-curated plugin cache", - )); - if configured_non_curated_plugin_ids.is_empty() { - return Ok(false); - } - let configured_non_curated_plugin_keys = configured_non_curated_plugin_ids - .iter() - .map(PluginId::as_key) - .collect::>(); - - let store = PluginStore::new(codex_home.to_path_buf()); - let marketplace_outcome = list_marketplaces(additional_roots) - .map_err(|err| format!("failed to discover marketplaces for cache refresh: {err}"))?; - let mut plugin_sources = HashMap::::new(); - - for marketplace in marketplace_outcome.marketplaces { - if marketplace.name == OPENAI_CURATED_MARKETPLACE_NAME { - continue; - } - - for plugin in marketplace.plugins { - let plugin_id = - PluginId::new(plugin.name.clone(), marketplace.name.clone()).map_err(|err| { - match err { - PluginIdError::Invalid(message) => { - format!("failed to prepare non-curated plugin cache refresh: {message}") - } - } - })?; - let plugin_key = plugin_id.as_key(); - if !configured_non_curated_plugin_keys.contains(&plugin_key) { - continue; - } - if plugin_sources.contains_key(&plugin_key) { - warn!( - plugin = plugin.name, - marketplace = marketplace.name, - "ignoring duplicate non-curated plugin entry during cache refresh" - ); - continue; - } - - let source_path = match plugin.source { - MarketplacePluginSource::Local { path } => path, - }; - let plugin_version = plugin_version_for_source(source_path.as_path()) - .map_err(|err| format!("failed to read plugin version for {plugin_key}: {err}"))?; - plugin_sources.insert(plugin_key, (source_path, plugin_version)); - } - } - - let mut cache_refreshed = false; - for plugin_id in configured_non_curated_plugin_ids { - let plugin_key = plugin_id.as_key(); - let Some((source_path, plugin_version)) = plugin_sources.get(&plugin_key).cloned() else { - warn!( - plugin = plugin_id.plugin_name, - marketplace = plugin_id.marketplace_name, - "configured non-curated plugin no longer exists in discovered marketplaces during cache refresh" - ); - continue; - }; - - if store.active_plugin_version(&plugin_id).as_deref() == Some(plugin_version.as_str()) { - continue; - } - - store - .install_with_version(source_path, plugin_id.clone(), plugin_version) - .map_err(|err| format!("failed to refresh plugin cache for {plugin_key}: {err}"))?; - cache_refreshed = true; - } - - Ok(cache_refreshed) -} - -fn configured_plugins_from_stack( +pub(crate) fn configured_plugins_from_stack( config_layer_stack: &ConfigLayerStack, ) -> HashMap { // Plugin entries remain persisted user config only. @@ -1551,508 +1528,6 @@ fn configured_plugins_from_user_config_value( } } -fn configured_plugins_from_codex_home( - codex_home: &Path, - read_error_message: &str, - parse_error_message: &str, -) -> HashMap { - let config_path = codex_home.join(CONFIG_TOML_FILE); - let user_config = match fs::read_to_string(&config_path) { - Ok(user_config) => user_config, - Err(err) if err.kind() == std::io::ErrorKind::NotFound => return HashMap::new(), - Err(err) => { - warn!( - path = %config_path.display(), - error = %err, - "{read_error_message}" - ); - return HashMap::new(); - } - }; - - let user_config = match toml::from_str::(&user_config) { - Ok(user_config) => user_config, - Err(err) => { - warn!( - path = %config_path.display(), - error = %err, - "{parse_error_message}" - ); - return HashMap::new(); - } - }; - - configured_plugins_from_user_config_value(&user_config) -} - -fn configured_plugin_ids( - configured_plugins: HashMap, - invalid_plugin_key_message: &str, -) -> Vec { - configured_plugins - .into_keys() - .filter_map(|plugin_key| match PluginId::parse(&plugin_key) { - Ok(plugin_id) => Some(plugin_id), - Err(err) => { - warn!( - plugin_key, - error = %err, - "{invalid_plugin_key_message}" - ); - None - } - }) - .collect() -} - -fn curated_plugin_ids_from_config_keys( - configured_plugins: HashMap, -) -> Vec { - let mut configured_curated_plugin_ids = configured_plugin_ids( - configured_plugins, - "ignoring invalid configured plugin key during curated sync setup", - ) - .into_iter() - .filter(|plugin_id| plugin_id.marketplace_name == OPENAI_CURATED_MARKETPLACE_NAME) - .collect::>(); - configured_curated_plugin_ids.sort_unstable_by_key(PluginId::as_key); - configured_curated_plugin_ids -} - -fn non_curated_plugin_ids_from_config_keys( - configured_plugins: HashMap, -) -> Vec { - let mut configured_non_curated_plugin_ids = configured_plugin_ids( - configured_plugins, - "ignoring invalid plugin key during non-curated cache refresh setup", - ) - .into_iter() - .filter(|plugin_id| plugin_id.marketplace_name != OPENAI_CURATED_MARKETPLACE_NAME) - .collect::>(); - configured_non_curated_plugin_ids.sort_unstable_by_key(PluginId::as_key); - configured_non_curated_plugin_ids -} - -fn load_plugin( - config_name: String, - plugin: &PluginConfig, - store: &PluginStore, - restriction_product: Option, - skill_config_rules: &SkillConfigRules, -) -> LoadedPlugin { - let plugin_id = PluginId::parse(&config_name); - let active_plugin_root = plugin_id - .as_ref() - .ok() - .and_then(|plugin_id| store.active_plugin_root(plugin_id)); - let root = active_plugin_root - .clone() - .unwrap_or_else(|| match &plugin_id { - Ok(plugin_id) => store.plugin_base_root(plugin_id), - Err(_) => store.root().clone(), - }); - let mut loaded_plugin = LoadedPlugin { - config_name, - manifest_name: None, - manifest_description: None, - root, - enabled: plugin.enabled, - skill_roots: Vec::new(), - disabled_skill_paths: HashSet::new(), - has_enabled_skills: false, - mcp_servers: HashMap::new(), - apps: Vec::new(), - error: None, - }; - - if !plugin.enabled { - return loaded_plugin; - } - - let plugin_root = match plugin_id { - Ok(_) => match active_plugin_root { - Some(plugin_root) => plugin_root, - None => { - loaded_plugin.error = Some("plugin is not installed".to_string()); - return loaded_plugin; - } - }, - Err(err) => { - loaded_plugin.error = Some(err.to_string()); - return loaded_plugin; - } - }; - - if !plugin_root.as_path().is_dir() { - loaded_plugin.error = Some("path does not exist or is not a directory".to_string()); - return loaded_plugin; - } - - let Some(manifest) = load_plugin_manifest(plugin_root.as_path()) else { - loaded_plugin.error = Some("missing or invalid .codex-plugin/plugin.json".to_string()); - return loaded_plugin; - }; - - let manifest_paths = &manifest.paths; - loaded_plugin.manifest_name = manifest - .interface - .as_ref() - .and_then(|interface| interface.display_name.as_deref()) - .map(str::trim) - .filter(|display_name| !display_name.is_empty()) - .map(str::to_string) - .or_else(|| Some(manifest.name.clone())); - loaded_plugin.manifest_description = manifest.description.clone(); - loaded_plugin.skill_roots = plugin_skill_roots(plugin_root.as_path(), manifest_paths); - let resolved_skills = load_plugin_skills( - plugin_root.as_path(), - manifest_paths, - restriction_product, - skill_config_rules, - ); - let has_enabled_skills = resolved_skills.has_enabled_skills(); - loaded_plugin.disabled_skill_paths = resolved_skills.disabled_skill_paths; - loaded_plugin.has_enabled_skills = has_enabled_skills; - let mut mcp_servers = HashMap::new(); - for mcp_config_path in plugin_mcp_config_paths(plugin_root.as_path(), manifest_paths) { - let plugin_mcp = load_mcp_servers_from_file(plugin_root.as_path(), &mcp_config_path); - for (name, config) in plugin_mcp.mcp_servers { - if mcp_servers.insert(name.clone(), config).is_some() { - warn!( - plugin = %plugin_root.display(), - path = %mcp_config_path.display(), - server = name, - "plugin MCP file overwrote an earlier server definition" - ); - } - } - } - loaded_plugin.mcp_servers = mcp_servers; - loaded_plugin.apps = load_plugin_apps(plugin_root.as_path()); - loaded_plugin -} - -struct ResolvedPluginSkills { - skills: Vec, - disabled_skill_paths: HashSet, - had_errors: bool, -} - -impl ResolvedPluginSkills { - fn has_enabled_skills(&self) -> bool { - // Keep the plugin visible in capability summaries if skill loading was partial. - self.had_errors - || self - .skills - .iter() - .any(|skill| !self.disabled_skill_paths.contains(&skill.path_to_skills_md)) - } -} - -fn load_plugin_skills( - plugin_root: &Path, - manifest_paths: &PluginManifestPaths, - restriction_product: Option, - skill_config_rules: &SkillConfigRules, -) -> ResolvedPluginSkills { - let outcome = load_skills_from_roots( - plugin_skill_roots(plugin_root, manifest_paths) - .into_iter() - .map(|path| SkillRoot { - path, - scope: SkillScope::User, - }), - ); - let had_errors = !outcome.errors.is_empty(); - let skills = outcome - .skills - .into_iter() - .filter(|skill| skill.matches_product_restriction_for_product(restriction_product)) - .collect::>(); - let disabled_skill_paths = resolve_disabled_skill_paths(&skills, skill_config_rules); - - ResolvedPluginSkills { - skills, - disabled_skill_paths, - had_errors, - } -} - -fn plugin_skill_roots(plugin_root: &Path, manifest_paths: &PluginManifestPaths) -> Vec { - let mut paths = default_skill_roots(plugin_root); - if let Some(path) = &manifest_paths.skills { - paths.push(path.to_path_buf()); - } - paths.sort_unstable(); - paths.dedup(); - paths -} - -fn default_skill_roots(plugin_root: &Path) -> Vec { - let skills_dir = plugin_root.join(DEFAULT_SKILLS_DIR_NAME); - if skills_dir.is_dir() { - vec![skills_dir] - } else { - Vec::new() - } -} - -fn plugin_mcp_config_paths( - plugin_root: &Path, - manifest_paths: &PluginManifestPaths, -) -> Vec { - if let Some(path) = &manifest_paths.mcp_servers { - return vec![path.clone()]; - } - default_mcp_config_paths(plugin_root) -} - -fn default_mcp_config_paths(plugin_root: &Path) -> Vec { - let mut paths = Vec::new(); - let default_path = plugin_root.join(DEFAULT_MCP_CONFIG_FILE); - if default_path.is_file() - && let Ok(default_path) = AbsolutePathBuf::try_from(default_path) - { - paths.push(default_path); - } - paths.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); - paths.dedup_by(|left, right| left.as_path() == right.as_path()); - paths -} - -pub fn load_plugin_apps(plugin_root: &Path) -> Vec { - if let Some(manifest) = load_plugin_manifest(plugin_root) { - return load_apps_from_paths( - plugin_root, - plugin_app_config_paths(plugin_root, &manifest.paths), - ); - } - load_apps_from_paths(plugin_root, default_app_config_paths(plugin_root)) -} - -fn plugin_app_config_paths( - plugin_root: &Path, - manifest_paths: &PluginManifestPaths, -) -> Vec { - if let Some(path) = &manifest_paths.apps { - return vec![path.clone()]; - } - default_app_config_paths(plugin_root) -} - -fn default_app_config_paths(plugin_root: &Path) -> Vec { - let mut paths = Vec::new(); - let default_path = plugin_root.join(DEFAULT_APP_CONFIG_FILE); - if default_path.is_file() - && let Ok(default_path) = AbsolutePathBuf::try_from(default_path) - { - paths.push(default_path); - } - paths.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); - paths.dedup_by(|left, right| left.as_path() == right.as_path()); - paths -} - -fn load_apps_from_paths( - plugin_root: &Path, - app_config_paths: Vec, -) -> Vec { - let mut connector_ids = Vec::new(); - for app_config_path in app_config_paths { - let Ok(contents) = fs::read_to_string(app_config_path.as_path()) else { - continue; - }; - let parsed = match serde_json::from_str::(&contents) { - Ok(parsed) => parsed, - Err(err) => { - warn!( - path = %app_config_path.display(), - "failed to parse plugin app config: {err}" - ); - continue; - } - }; - - let mut apps: Vec = parsed.apps.into_values().collect(); - apps.sort_unstable_by(|left, right| left.id.cmp(&right.id)); - - connector_ids.extend(apps.into_iter().filter_map(|app| { - if app.id.trim().is_empty() { - warn!( - plugin = %plugin_root.display(), - "plugin app config is missing an app id" - ); - None - } else { - Some(AppConnectorId(app.id)) - } - })); - } - connector_ids.dedup(); - connector_ids -} - -pub fn plugin_telemetry_metadata_from_root( - plugin_id: &PluginId, - plugin_root: &Path, -) -> PluginTelemetryMetadata { - let Some(manifest) = load_plugin_manifest(plugin_root) else { - return PluginTelemetryMetadata::from_plugin_id(plugin_id); - }; - - let manifest_paths = &manifest.paths; - let has_skills = !plugin_skill_roots(plugin_root, manifest_paths).is_empty(); - let mut mcp_server_names = Vec::new(); - for path in plugin_mcp_config_paths(plugin_root, manifest_paths) { - mcp_server_names.extend( - load_mcp_servers_from_file(plugin_root, &path) - .mcp_servers - .into_keys(), - ); - } - mcp_server_names.sort_unstable(); - mcp_server_names.dedup(); - - PluginTelemetryMetadata { - plugin_id: plugin_id.clone(), - capability_summary: Some(PluginCapabilitySummary { - config_name: plugin_id.as_key(), - display_name: plugin_id.plugin_name.clone(), - description: None, - has_skills, - mcp_server_names, - app_connector_ids: load_plugin_apps(plugin_root), - }), - } -} - -pub fn load_plugin_mcp_servers(plugin_root: &Path) -> HashMap { - let Some(manifest) = load_plugin_manifest(plugin_root) else { - return HashMap::new(); - }; - - let mut mcp_servers = HashMap::new(); - for mcp_config_path in plugin_mcp_config_paths(plugin_root, &manifest.paths) { - let plugin_mcp = load_mcp_servers_from_file(plugin_root, &mcp_config_path); - for (name, config) in plugin_mcp.mcp_servers { - mcp_servers.entry(name).or_insert(config); - } - } - - mcp_servers -} - -pub fn installed_plugin_telemetry_metadata( - codex_home: &Path, - plugin_id: &PluginId, -) -> PluginTelemetryMetadata { - let store = PluginStore::new(codex_home.to_path_buf()); - let Some(plugin_root) = store.active_plugin_root(plugin_id) else { - return PluginTelemetryMetadata::from_plugin_id(plugin_id); - }; - - plugin_telemetry_metadata_from_root(plugin_id, plugin_root.as_path()) -} - -fn load_mcp_servers_from_file( - plugin_root: &Path, - mcp_config_path: &AbsolutePathBuf, -) -> PluginMcpDiscovery { - let Ok(contents) = fs::read_to_string(mcp_config_path.as_path()) else { - return PluginMcpDiscovery::default(); - }; - let parsed = match serde_json::from_str::(&contents) { - Ok(parsed) => parsed, - Err(err) => { - warn!( - path = %mcp_config_path.display(), - "failed to parse plugin MCP config: {err}" - ); - return PluginMcpDiscovery::default(); - } - }; - normalize_plugin_mcp_servers( - plugin_root, - parsed.mcp_servers, - mcp_config_path.to_string_lossy().as_ref(), - ) -} - -fn normalize_plugin_mcp_servers( - plugin_root: &Path, - plugin_mcp_servers: HashMap, - source: &str, -) -> PluginMcpDiscovery { - let mut mcp_servers = HashMap::new(); - - for (name, config_value) in plugin_mcp_servers { - let normalized = normalize_plugin_mcp_server_value(plugin_root, config_value); - match serde_json::from_value::(JsonValue::Object(normalized)) { - Ok(config) => { - mcp_servers.insert(name, config); - } - Err(err) => { - warn!( - plugin = %plugin_root.display(), - server = name, - "failed to parse plugin MCP server from {source}: {err}" - ); - } - } - } - - PluginMcpDiscovery { mcp_servers } -} - -fn normalize_plugin_mcp_server_value( - plugin_root: &Path, - value: JsonValue, -) -> JsonMap { - let mut object = match value { - JsonValue::Object(object) => object, - _ => return JsonMap::new(), - }; - - if let Some(JsonValue::String(transport_type)) = object.remove("type") { - match transport_type.as_str() { - "http" | "streamable_http" | "streamable-http" => {} - "stdio" => {} - other => { - warn!( - plugin = %plugin_root.display(), - transport = other, - "plugin MCP server uses an unknown transport type" - ); - } - } - } - - if let Some(JsonValue::Object(oauth)) = object.remove("oauth") - && oauth.contains_key("callbackPort") - { - warn!( - plugin = %plugin_root.display(), - "plugin MCP server OAuth callbackPort is ignored; Codex uses global MCP OAuth callback settings" - ); - } - - if let Some(JsonValue::String(cwd)) = object.get("cwd") - && !Path::new(cwd).is_absolute() - { - object.insert( - "cwd".to_string(), - JsonValue::String(plugin_root.join(cwd).display().to_string()), - ); - } - - object -} - -#[derive(Debug, Default)] -struct PluginMcpDiscovery { - mcp_servers: HashMap, -} - #[cfg(test)] #[path = "manager_tests.rs"] mod tests; diff --git a/codex-rs/core/src/plugins/manager_tests.rs b/codex-rs/core/src/plugins/manager_tests.rs index 4856ea7711..2b2f486586 100644 --- a/codex-rs/core/src/plugins/manager_tests.rs +++ b/codex-rs/core/src/plugins/manager_tests.rs @@ -6,7 +6,6 @@ use crate::config_loader::ConfigLayerStack; use crate::config_loader::ConfigRequirements; use crate::config_loader::ConfigRequirementsToml; use crate::plugins::LoadedPlugin; -use crate::plugins::MarketplacePluginInstallPolicy; use crate::plugins::PluginLoadOutcome; use crate::plugins::marketplace_install_root; use crate::plugins::test_support::TEST_CURATED_PLUGIN_SHA; @@ -14,11 +13,17 @@ use crate::plugins::test_support::write_curated_plugin_sha_with as write_curated use crate::plugins::test_support::write_file; use crate::plugins::test_support::write_openai_curated_marketplace; use codex_app_server_protocol::ConfigLayerSource; +use codex_config::McpServerConfig; use codex_config::types::McpServerTransportConfig; +use codex_core_plugins::loader::refresh_non_curated_plugin_cache; +use codex_core_plugins::loader::refresh_non_curated_plugin_cache_force_reinstall; +use codex_core_plugins::marketplace::MarketplacePluginInstallPolicy; use codex_login::CodexAuth; use codex_protocol::protocol::Product; +use codex_utils_absolute_path::test_support::PathBufExt; use pretty_assertions::assert_eq; use std::fs; +use std::path::Path; use tempfile::TempDir; use toml::Value; use wiremock::Mock; @@ -81,10 +86,12 @@ fn plugin_config_toml(enabled: bool, plugins_feature_enabled: bool) -> String { toml::to_string(&Value::Table(root)).expect("plugin test config should serialize") } -fn load_plugins_from_config(config_toml: &str, codex_home: &Path) -> PluginLoadOutcome { +async fn load_plugins_from_config(config_toml: &str, codex_home: &Path) -> PluginLoadOutcome { write_file(&codex_home.join(CONFIG_TOML_FILE), config_toml); - let config = load_config_blocking(codex_home, codex_home); - PluginsManager::new(codex_home.to_path_buf()).plugins_for_config(&config) + let config = load_config(codex_home, codex_home).await; + PluginsManager::new(codex_home.to_path_buf()) + .plugins_for_config(&config) + .await } async fn load_config(codex_home: &Path, cwd: &Path) -> crate::config::Config { @@ -96,16 +103,8 @@ async fn load_config(codex_home: &Path, cwd: &Path) -> crate::config::Config { .expect("config should load") } -fn load_config_blocking(codex_home: &Path, cwd: &Path) -> crate::config::Config { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("tokio runtime should build") - .block_on(load_config(codex_home, cwd)) -} - -#[test] -fn load_plugins_loads_default_skills_and_mcp_servers() { +#[tokio::test] +async fn load_plugins_loads_default_skills_and_mcp_servers() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -152,7 +151,8 @@ fn load_plugins_loads_default_skills_and_mcp_servers() { let outcome = load_plugins_from_config( &plugin_config_toml(/*enabled*/ true, /*plugins_feature_enabled*/ true), codex_home.path(), - ); + ) + .await; assert_eq!( outcome.plugins(), @@ -164,7 +164,7 @@ fn load_plugins_loads_default_skills_and_mcp_servers() { ), root: AbsolutePathBuf::try_from(plugin_root.clone()).unwrap(), enabled: true, - skill_roots: vec![plugin_root.join("skills")], + skill_roots: vec![plugin_root.join("skills").abs()], disabled_skill_paths: HashSet::new(), has_enabled_skills: true, mcp_servers: HashMap::from([( @@ -176,11 +176,14 @@ fn load_plugins_loads_default_skills_and_mcp_servers() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -205,7 +208,7 @@ fn load_plugins_loads_default_skills_and_mcp_servers() { ); assert_eq!( outcome.effective_skill_roots(), - vec![plugin_root.join("skills")] + vec![plugin_root.join("skills").abs()] ); assert_eq!(outcome.effective_mcp_servers().len(), 1); assert_eq!( @@ -214,8 +217,8 @@ fn load_plugins_loads_default_skills_and_mcp_servers() { ); } -#[test] -fn load_plugins_resolves_disabled_skill_names_against_loaded_plugin_skills() { +#[tokio::test] +async fn load_plugins_resolves_disabled_skill_names_against_loaded_plugin_skills() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -242,8 +245,10 @@ enabled = false [plugins."sample@test"] enabled = true "#; - let outcome = load_plugins_from_config(config_toml, codex_home.path()); - let skill_path = dunce::canonicalize(skill_path).expect("skill path should canonicalize"); + let outcome = load_plugins_from_config(config_toml, codex_home.path()).await; + let skill_path = dunce::canonicalize(skill_path) + .expect("skill path should canonicalize") + .abs(); assert_eq!( outcome.plugins()[0].disabled_skill_paths, @@ -253,8 +258,8 @@ enabled = true assert!(outcome.capability_summaries().is_empty()); } -#[test] -fn load_plugins_ignores_unknown_disabled_skill_names() { +#[tokio::test] +async fn load_plugins_ignores_unknown_disabled_skill_names() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -280,7 +285,7 @@ enabled = false [plugins."sample@test"] enabled = true "#; - let outcome = load_plugins_from_config(config_toml, codex_home.path()); + let outcome = load_plugins_from_config(config_toml, codex_home.path()).await; assert!(outcome.plugins()[0].disabled_skill_paths.is_empty()); assert!(outcome.plugins()[0].has_enabled_skills); @@ -297,8 +302,8 @@ enabled = true ); } -#[test] -fn plugin_telemetry_metadata_uses_default_mcp_config_path() { +#[tokio::test] +async fn plugin_telemetry_metadata_uses_default_mcp_config_path() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -325,8 +330,9 @@ fn plugin_telemetry_metadata_uses_default_mcp_config_path() { let metadata = plugin_telemetry_metadata_from_root( &PluginId::parse("sample@test").expect("plugin id should parse"), - &plugin_root, - ); + &plugin_root.abs(), + ) + .await; assert_eq!( metadata.capability_summary, @@ -341,8 +347,8 @@ fn plugin_telemetry_metadata_uses_default_mcp_config_path() { ); } -#[test] -fn capability_summary_sanitizes_plugin_descriptions_to_one_line() { +#[tokio::test] +async fn capability_summary_sanitizes_plugin_descriptions_to_one_line() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -364,7 +370,8 @@ fn capability_summary_sanitizes_plugin_descriptions_to_one_line() { let outcome = load_plugins_from_config( &plugin_config_toml(/*enabled*/ true, /*plugins_feature_enabled*/ true), codex_home.path(), - ); + ) + .await; assert_eq!( outcome.plugins()[0].manifest_description.as_deref(), @@ -376,8 +383,8 @@ fn capability_summary_sanitizes_plugin_descriptions_to_one_line() { ); } -#[test] -fn capability_summary_truncates_overlong_plugin_descriptions() { +#[tokio::test] +async fn capability_summary_truncates_overlong_plugin_descriptions() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -402,7 +409,8 @@ fn capability_summary_truncates_overlong_plugin_descriptions() { let outcome = load_plugins_from_config( &plugin_config_toml(/*enabled*/ true, /*plugins_feature_enabled*/ true), codex_home.path(), - ); + ) + .await; assert_eq!( outcome.plugins()[0].manifest_description.as_deref(), @@ -414,8 +422,8 @@ fn capability_summary_truncates_overlong_plugin_descriptions() { ); } -#[test] -fn load_plugins_uses_manifest_configured_component_paths() { +#[tokio::test] +async fn load_plugins_uses_manifest_configured_component_paths() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -485,13 +493,14 @@ fn load_plugins_uses_manifest_configured_component_paths() { let outcome = load_plugins_from_config( &plugin_config_toml(/*enabled*/ true, /*plugins_feature_enabled*/ true), codex_home.path(), - ); + ) + .await; assert_eq!( outcome.plugins()[0].skill_roots, vec![ - plugin_root.join("custom-skills"), - plugin_root.join("skills") + plugin_root.join("custom-skills").abs(), + plugin_root.join("skills").abs() ] ); assert_eq!( @@ -505,11 +514,14 @@ fn load_plugins_uses_manifest_configured_component_paths() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -524,8 +536,8 @@ fn load_plugins_uses_manifest_configured_component_paths() { ); } -#[test] -fn load_plugins_ignores_manifest_component_paths_without_dot_slash() { +#[tokio::test] +async fn load_plugins_ignores_manifest_component_paths_without_dot_slash() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -595,11 +607,12 @@ fn load_plugins_ignores_manifest_component_paths_without_dot_slash() { let outcome = load_plugins_from_config( &plugin_config_toml(/*enabled*/ true, /*plugins_feature_enabled*/ true), codex_home.path(), - ); + ) + .await; assert_eq!( outcome.plugins()[0].skill_roots, - vec![plugin_root.join("skills")] + vec![plugin_root.join("skills").abs()] ); assert_eq!( outcome.plugins()[0].mcp_servers, @@ -612,11 +625,14 @@ fn load_plugins_ignores_manifest_component_paths_without_dot_slash() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -631,8 +647,8 @@ fn load_plugins_ignores_manifest_component_paths_without_dot_slash() { ); } -#[test] -fn load_plugins_preserves_disabled_plugins_without_effective_contributions() { +#[tokio::test] +async fn load_plugins_preserves_disabled_plugins_without_effective_contributions() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -660,7 +676,8 @@ fn load_plugins_preserves_disabled_plugins_without_effective_contributions() { /*enabled*/ false, /*plugins_feature_enabled*/ true, ), codex_home.path(), - ); + ) + .await; assert_eq!( outcome.plugins(), @@ -682,8 +699,8 @@ fn load_plugins_preserves_disabled_plugins_without_effective_contributions() { assert!(outcome.effective_mcp_servers().is_empty()); } -#[test] -fn effective_apps_dedupes_connector_ids_across_plugins() { +#[tokio::test] +async fn effective_apps_dedupes_connector_ids_across_plugins() { let codex_home = TempDir::new().unwrap(); let plugin_a_root = codex_home .path() @@ -745,7 +762,7 @@ fn effective_apps_dedupes_connector_ids_across_plugins() { let config_toml = toml::to_string(&Value::Table(root)).expect("plugin test config should serialize"); - let outcome = load_plugins_from_config(&config_toml, codex_home.path()); + let outcome = load_plugins_from_config(&config_toml, codex_home.path()).await; assert_eq!( outcome.effective_apps(), @@ -767,11 +784,14 @@ fn capability_index_filters_inactive_and_zero_capability_plugins() { http_headers: None, env_http_headers: None, }, + experimental_environment: None, enabled: true, required: false, + supports_parallel_tool_calls: false, disabled_reason: None, startup_timeout_sec: None, tool_timeout_sec: None, + default_tools_approval_mode: None, enabled_tools: None, disabled_tools: None, scopes: None, @@ -799,7 +819,7 @@ fn capability_index_filters_inactive_and_zero_capability_plugins() { }; let outcome = PluginLoadOutcome::from_plugins(vec![ LoadedPlugin { - skill_roots: vec![codex_home.path().join("skills-plugin/skills")], + skill_roots: vec![codex_home.path().join("skills-plugin/skills").abs()], has_enabled_skills: true, ..plugin("skills@test", "skills-plugin", "skills-plugin") }, @@ -816,7 +836,7 @@ fn capability_index_filters_inactive_and_zero_capability_plugins() { plugin("empty@test", "empty-plugin", "empty-plugin"), LoadedPlugin { enabled: false, - skill_roots: vec![codex_home.path().join("disabled-plugin/skills")], + skill_roots: vec![codex_home.path().join("disabled-plugin/skills").abs()], apps: vec![connector("connector_hidden")], ..plugin("disabled@test", "disabled-plugin", "disabled-plugin") }, @@ -851,8 +871,8 @@ fn capability_index_filters_inactive_and_zero_capability_plugins() { ); } -#[test] -fn load_plugins_returns_empty_when_feature_disabled() { +#[tokio::test] +async fn load_plugins_returns_empty_when_feature_disabled() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -874,14 +894,16 @@ fn load_plugins_returns_empty_when_feature_disabled() { ), ); - let config = load_config_blocking(codex_home.path(), codex_home.path()); - let outcome = PluginsManager::new(codex_home.path().to_path_buf()).plugins_for_config(&config); + let config = load_config(codex_home.path(), codex_home.path()).await; + let outcome = PluginsManager::new(codex_home.path().to_path_buf()) + .plugins_for_config(&config) + .await; assert_eq!(outcome, PluginLoadOutcome::default()); } -#[test] -fn load_plugins_rejects_invalid_plugin_keys() { +#[tokio::test] +async fn load_plugins_rejects_invalid_plugin_keys() { let codex_home = TempDir::new().unwrap(); let plugin_root = codex_home .path() @@ -908,7 +930,8 @@ fn load_plugins_rejects_invalid_plugin_keys() { let outcome = load_plugins_from_config( &toml::to_string(&Value::Table(root)).expect("plugin test config should serialize"), codex_home.path(), - ); + ) + .await; assert_eq!(outcome.plugins().len(), 1); assert_eq!( @@ -1339,6 +1362,7 @@ enabled = true marketplace_path, }, ) + .await .unwrap_err(); assert!(matches!(err, MarketplaceError::PluginsDisabled)); @@ -1403,6 +1427,7 @@ enabled = false .unwrap(), }, ) + .await .unwrap(); assert!(outcome.plugin.disabled_skill_paths.is_empty()); @@ -2464,14 +2489,10 @@ enabled = true ); assert_eq!( - curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( - tmp.path(), - "failed to read user config while refreshing curated plugin cache", - "failed to parse user config while refreshing curated plugin cache", - )) - .into_iter() - .map(|plugin_id| plugin_id.as_key()) - .collect::>(), + configured_curated_plugin_ids_from_codex_home(tmp.path()) + .into_iter() + .map(|plugin_id| plugin_id.as_key()) + .collect::>(), vec!["slack@openai-curated".to_string()] ); @@ -2483,11 +2504,7 @@ plugins = true ); assert_eq!( - curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( - tmp.path(), - "failed to read user config while refreshing curated plugin cache", - "failed to parse user config while refreshing curated plugin cache", - )), + configured_curated_plugin_ids_from_codex_home(tmp.path()), Vec::::new() ); } @@ -2665,6 +2682,68 @@ enabled = true ); } +#[test] +fn refresh_non_curated_plugin_cache_force_reinstalls_current_local_version() { + let tmp = tempfile::tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + fs::create_dir_all(repo_root.join(".agents/plugins")).unwrap(); + write_plugin(&repo_root, "sample-plugin", "sample-plugin"); + fs::write(repo_root.join("sample-plugin/skills/SKILL.md"), "new skill").unwrap(); + write_file( + &repo_root.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample-plugin", + "source": { + "source": "local", + "path": "./sample-plugin" + } + } + ] +}"#, + ); + write_plugin( + &tmp.path().join("plugins/cache/debug"), + "sample-plugin/local", + "sample-plugin", + ); + fs::write( + tmp.path() + .join("plugins/cache/debug/sample-plugin/local/skills/SKILL.md"), + "old skill", + ) + .unwrap(); + write_file( + &tmp.path().join(CONFIG_TOML_FILE), + r#"[features] +plugins = true + +[plugins."sample-plugin@debug"] +enabled = true +"#, + ); + + assert!( + refresh_non_curated_plugin_cache_force_reinstall( + tmp.path(), + &[AbsolutePathBuf::try_from(repo_root).unwrap()], + ) + .expect("cache refresh should reinstall unchanged local version") + ); + + assert_eq!( + fs::read_to_string( + tmp.path() + .join("plugins/cache/debug/sample-plugin/local/skills/SKILL.md") + ) + .unwrap(), + "new skill" + ); +} + #[test] fn refresh_non_curated_plugin_cache_ignores_invalid_unconfigured_plugin_versions() { let tmp = tempfile::tempdir().unwrap(); @@ -2720,8 +2799,8 @@ enabled = true ); } -#[test] -fn load_plugins_ignores_project_config_files() { +#[tokio::test] +async fn load_plugins_ignores_project_config_files() { let codex_home = TempDir::new().unwrap(); let project_root = codex_home.path().join("project"); let plugin_root = codex_home @@ -2757,7 +2836,8 @@ fn load_plugins_ignores_project_config_files() { &stack, &PluginStore::new(codex_home.path().to_path_buf()), Some(Product::Codex), - ); + ) + .await; assert_eq!(outcome, PluginLoadOutcome::default()); } diff --git a/codex-rs/core/src/plugins/marketplace_add.rs b/codex-rs/core/src/plugins/marketplace_add.rs new file mode 100644 index 0000000000..3ea0445837 --- /dev/null +++ b/codex-rs/core/src/plugins/marketplace_add.rs @@ -0,0 +1,397 @@ +use super::OPENAI_CURATED_MARKETPLACE_NAME; +use super::marketplace_install_root; +use codex_utils_absolute_path::AbsolutePathBuf; +use std::fs; +use std::path::Path; +use std::path::PathBuf; +use tempfile::Builder; + +mod install; +mod metadata; +mod source; + +use install::clone_git_source; +use install::ensure_marketplace_destination_is_inside_install_root; +use install::marketplace_staging_root; +use install::replace_marketplace_root; +use install::safe_marketplace_dir_name; +use metadata::MarketplaceInstallMetadata; +use metadata::find_marketplace_root_by_name; +use metadata::installed_marketplace_root_for_source; +use metadata::record_added_marketplace_entry; +use source::MarketplaceSource; +pub(crate) use source::parse_marketplace_source; +use source::stage_marketplace_source; +use source::validate_marketplace_source_root; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MarketplaceAddRequest { + pub source: String, + pub ref_name: Option, + pub sparse_paths: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MarketplaceAddOutcome { + pub marketplace_name: String, + pub source_display: String, + pub installed_root: AbsolutePathBuf, + pub already_added: bool, +} + +#[derive(Debug, thiserror::Error)] +pub enum MarketplaceAddError { + #[error("{0}")] + InvalidRequest(String), + #[error("{0}")] + Internal(String), +} + +pub async fn add_marketplace( + codex_home: PathBuf, + request: MarketplaceAddRequest, +) -> Result { + tokio::task::spawn_blocking(move || add_marketplace_sync(codex_home.as_path(), request)) + .await + .map_err(|err| MarketplaceAddError::Internal(format!("failed to add marketplace: {err}")))? +} + +fn add_marketplace_sync( + codex_home: &Path, + request: MarketplaceAddRequest, +) -> Result { + add_marketplace_sync_with_cloner(codex_home, request, clone_git_source) +} + +fn add_marketplace_sync_with_cloner( + codex_home: &Path, + request: MarketplaceAddRequest, + clone_source: F, +) -> Result +where + F: Fn(&str, Option<&str>, &[String], &Path) -> Result<(), MarketplaceAddError>, +{ + let MarketplaceAddRequest { + source, + ref_name, + sparse_paths, + } = request; + let source = parse_marketplace_source(&source, ref_name)?; + if !sparse_paths.is_empty() && !matches!(source, MarketplaceSource::Git { .. }) { + return Err(MarketplaceAddError::InvalidRequest( + "--sparse is only supported for git marketplace sources".to_string(), + )); + } + + let install_root = marketplace_install_root(codex_home); + fs::create_dir_all(&install_root).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to create marketplace install directory {}: {err}", + install_root.display() + )) + })?; + + let install_metadata = MarketplaceInstallMetadata::from_source(&source, &sparse_paths); + if let Some(existing_root) = + installed_marketplace_root_for_source(codex_home, &install_root, &install_metadata)? + { + let marketplace_name = validate_marketplace_source_root(&existing_root)?; + record_added_marketplace_entry(codex_home, &marketplace_name, &install_metadata)?; + return Ok(MarketplaceAddOutcome { + marketplace_name, + source_display: source.display(), + installed_root: AbsolutePathBuf::try_from(existing_root).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to resolve installed marketplace root: {err}" + )) + })?, + already_added: true, + }); + } + + if let MarketplaceSource::Local { path } = &source { + let marketplace_name = validate_marketplace_source_root(path)?; + if marketplace_name == OPENAI_CURATED_MARKETPLACE_NAME { + return Err(MarketplaceAddError::InvalidRequest(format!( + "marketplace '{OPENAI_CURATED_MARKETPLACE_NAME}' is reserved and cannot be added from {}", + source.display() + ))); + } + if find_marketplace_root_by_name(codex_home, &install_root, &marketplace_name)?.is_some() { + return Err(MarketplaceAddError::InvalidRequest(format!( + "marketplace '{marketplace_name}' is already added from a different source; remove it before adding {}", + source.display() + ))); + } + record_added_marketplace_entry(codex_home, &marketplace_name, &install_metadata)?; + return Ok(MarketplaceAddOutcome { + marketplace_name, + source_display: source.display(), + installed_root: AbsolutePathBuf::try_from(path.clone()).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to resolve installed marketplace root: {err}" + )) + })?, + already_added: false, + }); + } + + let staging_root = marketplace_staging_root(&install_root); + fs::create_dir_all(&staging_root).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to create marketplace staging directory {}: {err}", + staging_root.display() + )) + })?; + let staged_root = Builder::new() + .prefix("marketplace-add-") + .tempdir_in(&staging_root) + .map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to create temporary marketplace directory in {}: {err}", + staging_root.display() + )) + })?; + let staged_root = staged_root.keep(); + + stage_marketplace_source(&source, &sparse_paths, &staged_root, clone_source)?; + + let marketplace_name = validate_marketplace_source_root(&staged_root)?; + if marketplace_name == OPENAI_CURATED_MARKETPLACE_NAME { + return Err(MarketplaceAddError::InvalidRequest(format!( + "marketplace '{OPENAI_CURATED_MARKETPLACE_NAME}' is reserved and cannot be added from {}", + source.display() + ))); + } + + let destination = install_root.join(safe_marketplace_dir_name(&marketplace_name)?); + ensure_marketplace_destination_is_inside_install_root(&install_root, &destination)?; + if destination.exists() { + return Err(MarketplaceAddError::InvalidRequest(format!( + "marketplace '{marketplace_name}' is already added from a different source; remove it before adding {}", + source.display() + ))); + } + + replace_marketplace_root(&staged_root, &destination).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to install marketplace at {}: {err}", + destination.display() + )) + })?; + if let Err(err) = + record_added_marketplace_entry(codex_home, &marketplace_name, &install_metadata) + { + if let Err(rollback_err) = fs::rename(&destination, &staged_root) { + return Err(MarketplaceAddError::Internal(format!( + "{err}; additionally failed to roll back installed marketplace at {}: {rollback_err}", + destination.display() + ))); + } + return Err(err); + } + + Ok(MarketplaceAddOutcome { + marketplace_name, + source_display: source.display(), + installed_root: AbsolutePathBuf::try_from(destination).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to resolve installed marketplace root: {err}" + )) + })?, + already_added: false, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use anyhow::Result; + use pretty_assertions::assert_eq; + use tempfile::TempDir; + + #[test] + fn add_marketplace_sync_installs_marketplace_and_updates_config() -> Result<()> { + let codex_home = TempDir::new()?; + let source_root = TempDir::new()?; + write_marketplace_source(source_root.path(), "remote copy")?; + + let result = add_marketplace_sync_with_cloner( + codex_home.path(), + MarketplaceAddRequest { + source: "https://github.com/owner/repo.git".to_string(), + ref_name: None, + sparse_paths: Vec::new(), + }, + |_url, _ref_name, _sparse_paths, destination| { + copy_dir_all(source_root.path(), destination) + .map_err(|err| MarketplaceAddError::Internal(err.to_string())) + }, + )?; + + assert_eq!(result.marketplace_name, "debug"); + assert_eq!(result.source_display, "https://github.com/owner/repo.git"); + assert!(!result.already_added); + assert!( + result + .installed_root + .as_path() + .join(".agents/plugins/marketplace.json") + .is_file() + ); + + let config = fs::read_to_string(codex_home.path().join(codex_config::CONFIG_TOML_FILE))?; + assert!(config.contains("[marketplaces.debug]")); + assert!(config.contains("source_type = \"git\"")); + assert!(config.contains("source = \"https://github.com/owner/repo.git\"")); + Ok(()) + } + + #[test] + fn add_marketplace_sync_installs_local_directory_source_and_updates_config() -> Result<()> { + let codex_home = TempDir::new()?; + let source_root = TempDir::new()?; + write_marketplace_source(source_root.path(), "local copy")?; + + let result = add_marketplace_sync_with_cloner( + codex_home.path(), + MarketplaceAddRequest { + source: source_root.path().display().to_string(), + ref_name: None, + sparse_paths: Vec::new(), + }, + |_url, _ref_name, _sparse_paths, _destination| { + panic!("git cloner should not be called for local marketplace sources") + }, + )?; + + let expected_source = source_root.path().canonicalize()?.display().to_string(); + assert_eq!(result.marketplace_name, "debug"); + assert_eq!(result.source_display, expected_source); + assert_eq!( + result.installed_root.as_path(), + source_root.path().canonicalize()? + ); + assert!(!result.already_added); + assert!( + !marketplace_install_root(codex_home.path()) + .join("debug") + .exists() + ); + + let config = fs::read_to_string(codex_home.path().join(codex_config::CONFIG_TOML_FILE))?; + let config: toml::Value = toml::from_str(&config)?; + assert_eq!( + config["marketplaces"]["debug"]["source_type"].as_str(), + Some("local") + ); + assert_eq!( + config["marketplaces"]["debug"]["source"].as_str(), + Some(expected_source.as_str()) + ); + Ok(()) + } + + #[test] + fn add_marketplace_sync_rejects_sparse_checkout_for_local_directory_source() -> Result<()> { + let codex_home = TempDir::new()?; + let source_root = TempDir::new()?; + write_marketplace_source(source_root.path(), "local copy")?; + + let err = add_marketplace_sync_with_cloner( + codex_home.path(), + MarketplaceAddRequest { + source: source_root.path().display().to_string(), + ref_name: None, + sparse_paths: vec![".agents".to_string()], + }, + |_url, _ref_name, _sparse_paths, _destination| { + panic!("git cloner should not be called for local marketplace sources") + }, + ) + .unwrap_err(); + + assert_eq!( + err.to_string(), + "--sparse is only supported for git marketplace sources" + ); + assert!( + !codex_home + .path() + .join(codex_config::CONFIG_TOML_FILE) + .exists() + ); + Ok(()) + } + + #[test] + fn add_marketplace_sync_treats_existing_local_directory_source_as_already_added() -> Result<()> + { + let codex_home = TempDir::new()?; + let source_root = TempDir::new()?; + write_marketplace_source(source_root.path(), "local copy")?; + + let request = MarketplaceAddRequest { + source: source_root.path().display().to_string(), + ref_name: None, + sparse_paths: Vec::new(), + }; + let first_result = add_marketplace_sync_with_cloner(codex_home.path(), request.clone(), { + |_url, _ref_name, _sparse_paths, _destination| { + panic!("git cloner should not be called for local marketplace sources") + } + })?; + let second_result = add_marketplace_sync_with_cloner(codex_home.path(), request, { + |_url, _ref_name, _sparse_paths, _destination| { + panic!("git cloner should not be called for local marketplace sources") + } + })?; + + assert!(!first_result.already_added); + assert!(second_result.already_added); + assert_eq!(second_result.installed_root, first_result.installed_root); + + Ok(()) + } + + fn write_marketplace_source(source: &Path, marker: &str) -> std::io::Result<()> { + fs::create_dir_all(source.join(".agents/plugins"))?; + fs::create_dir_all(source.join("plugins/sample/.codex-plugin"))?; + fs::write( + source.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample", + "source": { + "source": "local", + "path": "./plugins/sample" + } + } + ] +}"#, + )?; + fs::write( + source.join("plugins/sample/.codex-plugin/plugin.json"), + r#"{"name":"sample"}"#, + )?; + fs::write(source.join("plugins/sample/marker.txt"), marker)?; + Ok(()) + } + + fn copy_dir_all(source: &Path, destination: &Path) -> std::io::Result<()> { + fs::create_dir_all(destination)?; + for entry in fs::read_dir(source)? { + let entry = entry?; + let source_path = entry.path(); + let destination_path = destination.join(entry.file_name()); + if source_path.is_dir() { + copy_dir_all(&source_path, &destination_path)?; + } else { + fs::copy(&source_path, &destination_path)?; + } + } + Ok(()) + } +} diff --git a/codex-rs/core/src/plugins/marketplace_add/install.rs b/codex-rs/core/src/plugins/marketplace_add/install.rs new file mode 100644 index 0000000000..1ecfa050d3 --- /dev/null +++ b/codex-rs/core/src/plugins/marketplace_add/install.rs @@ -0,0 +1,137 @@ +use super::MarketplaceAddError; +use std::fs; +use std::path::Path; +use std::path::PathBuf; +use std::process::Command; + +pub(super) fn clone_git_source( + url: &str, + ref_name: Option<&str>, + sparse_paths: &[String], + destination: &Path, +) -> Result<(), MarketplaceAddError> { + let destination_string = destination.to_string_lossy().to_string(); + if sparse_paths.is_empty() { + run_git( + &["clone", url, destination_string.as_str()], + /*cwd*/ None, + )?; + if let Some(ref_name) = ref_name { + run_git( + &["checkout", ref_name], + Some(Path::new(&destination_string)), + )?; + } + return Ok(()); + } + + run_git( + &[ + "clone", + "--filter=blob:none", + "--no-checkout", + url, + destination_string.as_str(), + ], + /*cwd*/ None, + )?; + let mut sparse_args = vec!["sparse-checkout", "set"]; + sparse_args.extend(sparse_paths.iter().map(String::as_str)); + run_git(&sparse_args, Some(destination))?; + run_git(&["checkout", ref_name.unwrap_or("HEAD")], Some(destination))?; + Ok(()) +} + +pub(super) fn safe_marketplace_dir_name( + marketplace_name: &str, +) -> Result { + let safe = marketplace_name + .chars() + .map(|ch| { + if ch.is_ascii_alphanumeric() || matches!(ch, '-' | '_' | '.') { + ch + } else { + '-' + } + }) + .collect::(); + let safe = safe.trim_matches('.').to_string(); + if safe.is_empty() || safe == ".." { + return Err(MarketplaceAddError::InvalidRequest(format!( + "marketplace name '{marketplace_name}' cannot be used as an install directory" + ))); + } + Ok(safe) +} + +pub(super) fn ensure_marketplace_destination_is_inside_install_root( + install_root: &Path, + destination: &Path, +) -> Result<(), MarketplaceAddError> { + let install_root = install_root.canonicalize().map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to resolve marketplace install root {}: {err}", + install_root.display() + )) + })?; + let destination_parent = destination + .parent() + .ok_or_else(|| { + MarketplaceAddError::Internal("marketplace destination has no parent".to_string()) + })? + .canonicalize() + .map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to resolve marketplace destination parent {}: {err}", + destination.display() + )) + })?; + if !destination_parent.starts_with(&install_root) { + return Err(MarketplaceAddError::InvalidRequest(format!( + "marketplace destination {} is outside install root {}", + destination.display(), + install_root.display() + ))); + } + Ok(()) +} + +pub(super) fn replace_marketplace_root( + staged_root: &Path, + destination: &Path, +) -> std::io::Result<()> { + if let Some(parent) = destination.parent() { + fs::create_dir_all(parent)?; + } + fs::rename(staged_root, destination) +} + +pub(super) fn marketplace_staging_root(install_root: &Path) -> PathBuf { + install_root.join(".staging") +} + +fn run_git(args: &[&str], cwd: Option<&Path>) -> Result<(), MarketplaceAddError> { + let mut command = Command::new("git"); + command.args(args); + command.env("GIT_TERMINAL_PROMPT", "0"); + if let Some(cwd) = cwd { + command.current_dir(cwd); + } + + let output = command.output().map_err(|err| { + MarketplaceAddError::Internal(format!("failed to run git {}: {err}", args.join(" "))) + })?; + if output.status.success() { + return Ok(()); + } + + let stderr = String::from_utf8_lossy(&output.stderr); + let stdout = String::from_utf8_lossy(&output.stdout); + Err(MarketplaceAddError::Internal(format!( + "git {} failed with status {}\nstdout:\n{}\nstderr:\n{}", + args.join(" "), + output.status, + stdout.trim(), + stderr.trim() + ))) +} diff --git a/codex-rs/core/src/plugins/marketplace_add/metadata.rs b/codex-rs/core/src/plugins/marketplace_add/metadata.rs new file mode 100644 index 0000000000..ccded11c87 --- /dev/null +++ b/codex-rs/core/src/plugins/marketplace_add/metadata.rs @@ -0,0 +1,315 @@ +use super::MarketplaceAddError; +use super::MarketplaceSource; +use crate::plugins::installed_marketplaces::resolve_configured_marketplace_root; +use codex_config::CONFIG_TOML_FILE; +use codex_config::MarketplaceConfigUpdate; +use codex_config::record_user_marketplace; +use codex_core_plugins::marketplace::validate_marketplace_root; +use std::fs; +use std::io::ErrorKind; +use std::path::Path; +use std::path::PathBuf; +use std::time::SystemTime; +use std::time::UNIX_EPOCH; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct MarketplaceInstallMetadata { + source: InstalledMarketplaceSource, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum InstalledMarketplaceSource { + Git { + url: String, + ref_name: Option, + sparse_paths: Vec, + }, + Local { + path: String, + }, +} + +pub(super) fn record_added_marketplace_entry( + codex_home: &Path, + marketplace_name: &str, + install_metadata: &MarketplaceInstallMetadata, +) -> Result<(), MarketplaceAddError> { + let source = install_metadata.config_source(); + let timestamp = utc_timestamp_now()?; + let update = MarketplaceConfigUpdate { + last_updated: ×tamp, + last_revision: None, + source_type: install_metadata.config_source_type(), + source: &source, + ref_name: install_metadata.ref_name(), + sparse_paths: install_metadata.sparse_paths(), + }; + + record_user_marketplace(codex_home, marketplace_name, &update).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to add marketplace '{marketplace_name}' to user config.toml: {err}" + )) + }) +} + +pub(super) fn installed_marketplace_root_for_source( + codex_home: &Path, + install_root: &Path, + install_metadata: &MarketplaceInstallMetadata, +) -> Result, MarketplaceAddError> { + let config_path = codex_home.join(CONFIG_TOML_FILE); + let config = match fs::read_to_string(&config_path) { + Ok(config) => config, + Err(err) if err.kind() == ErrorKind::NotFound => return Ok(None), + Err(err) => { + return Err(MarketplaceAddError::Internal(format!( + "failed to read user config {}: {err}", + config_path.display() + ))); + } + }; + let config: toml::Value = toml::from_str(&config).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to parse user config {}: {err}", + config_path.display() + )) + })?; + let Some(marketplaces) = config.get("marketplaces").and_then(toml::Value::as_table) else { + return Ok(None); + }; + + for (marketplace_name, marketplace) in marketplaces { + if !install_metadata.matches_config(marketplace) { + continue; + } + let Some(root) = + resolve_configured_marketplace_root(marketplace_name, marketplace, install_root) + else { + continue; + }; + if validate_marketplace_root(&root).is_ok() { + return Ok(Some(root)); + } + } + + Ok(None) +} + +pub(super) fn find_marketplace_root_by_name( + codex_home: &Path, + install_root: &Path, + marketplace_name: &str, +) -> Result, MarketplaceAddError> { + let config_path = codex_home.join(CONFIG_TOML_FILE); + let config = match fs::read_to_string(&config_path) { + Ok(config) => config, + Err(err) if err.kind() == ErrorKind::NotFound => return Ok(None), + Err(err) => { + return Err(MarketplaceAddError::Internal(format!( + "failed to read user config {}: {err}", + config_path.display() + ))); + } + }; + let config: toml::Value = toml::from_str(&config).map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to parse user config {}: {err}", + config_path.display() + )) + })?; + let Some(marketplace) = config + .get("marketplaces") + .and_then(toml::Value::as_table) + .and_then(|marketplaces| marketplaces.get(marketplace_name)) + else { + return Ok(None); + }; + + let Some(root) = + resolve_configured_marketplace_root(marketplace_name, marketplace, install_root) + else { + return Ok(None); + }; + if validate_marketplace_root(&root).is_ok() { + Ok(Some(root)) + } else { + Ok(None) + } +} + +impl MarketplaceInstallMetadata { + pub(super) fn from_source(source: &MarketplaceSource, sparse_paths: &[String]) -> Self { + let source = match source { + MarketplaceSource::Git { url, ref_name } => InstalledMarketplaceSource::Git { + url: url.clone(), + ref_name: ref_name.clone(), + sparse_paths: sparse_paths.to_vec(), + }, + MarketplaceSource::Local { path } => InstalledMarketplaceSource::Local { + path: path.display().to_string(), + }, + }; + Self { source } + } + + fn config_source_type(&self) -> &'static str { + match &self.source { + InstalledMarketplaceSource::Git { .. } => "git", + InstalledMarketplaceSource::Local { .. } => "local", + } + } + + fn config_source(&self) -> String { + match &self.source { + InstalledMarketplaceSource::Git { url, .. } => url.clone(), + InstalledMarketplaceSource::Local { path } => path.clone(), + } + } + + fn ref_name(&self) -> Option<&str> { + match &self.source { + InstalledMarketplaceSource::Git { ref_name, .. } => ref_name.as_deref(), + InstalledMarketplaceSource::Local { .. } => None, + } + } + + fn sparse_paths(&self) -> &[String] { + match &self.source { + InstalledMarketplaceSource::Git { sparse_paths, .. } => sparse_paths, + InstalledMarketplaceSource::Local { .. } => &[], + } + } + + fn matches_config(&self, marketplace: &toml::Value) -> bool { + marketplace.get("source_type").and_then(toml::Value::as_str) + == Some(self.config_source_type()) + && marketplace.get("source").and_then(toml::Value::as_str) + == Some(self.config_source().as_str()) + && marketplace.get("ref").and_then(toml::Value::as_str) == self.ref_name() + && config_sparse_paths(marketplace) == self.sparse_paths() + } +} + +fn config_sparse_paths(marketplace: &toml::Value) -> Vec { + marketplace + .get("sparse_paths") + .and_then(toml::Value::as_array) + .map(|paths| { + paths + .iter() + .filter_map(toml::Value::as_str) + .map(str::to_string) + .collect() + }) + .unwrap_or_default() +} + +fn utc_timestamp_now() -> Result { + let duration = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|err| { + MarketplaceAddError::Internal(format!("system clock is before Unix epoch: {err}")) + })?; + Ok(format_utc_timestamp(duration.as_secs() as i64)) +} + +fn format_utc_timestamp(seconds_since_epoch: i64) -> String { + const SECONDS_PER_DAY: i64 = 86_400; + let days = seconds_since_epoch.div_euclid(SECONDS_PER_DAY); + let seconds_of_day = seconds_since_epoch.rem_euclid(SECONDS_PER_DAY); + let (year, month, day) = civil_from_days(days); + let hour = seconds_of_day / 3_600; + let minute = (seconds_of_day % 3_600) / 60; + let second = seconds_of_day % 60; + format!("{year:04}-{month:02}-{day:02}T{hour:02}:{minute:02}:{second:02}Z") +} + +fn civil_from_days(days_since_epoch: i64) -> (i64, i64, i64) { + let days = days_since_epoch + 719_468; + let era = if days >= 0 { days } else { days - 146_096 } / 146_097; + let day_of_era = days - era * 146_097; + let year_of_era = + (day_of_era - day_of_era / 1_460 + day_of_era / 36_524 - day_of_era / 146_096) / 365; + let mut year = year_of_era + era * 400; + let day_of_year = day_of_era - (365 * year_of_era + year_of_era / 4 - year_of_era / 100); + let month_prime = (5 * day_of_year + 2) / 153; + let day = day_of_year - (153 * month_prime + 2) / 5 + 1; + let month = month_prime + if month_prime < 10 { 3 } else { -9 }; + year += if month <= 2 { 1 } else { 0 }; + (year, month, day) +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use tempfile::TempDir; + + #[test] + fn utc_timestamp_formats_unix_epoch_as_rfc3339_utc() { + assert_eq!( + format_utc_timestamp(/*seconds_since_epoch*/ 0), + "1970-01-01T00:00:00Z" + ); + assert_eq!( + format_utc_timestamp(/*seconds_since_epoch*/ 1_775_779_200), + "2026-04-10T00:00:00Z" + ); + } + + #[test] + fn installed_marketplace_root_for_source_propagates_config_read_errors() { + let codex_home = TempDir::new().unwrap(); + let config_path = codex_home.path().join(CONFIG_TOML_FILE); + fs::create_dir(&config_path).unwrap(); + + let install_root = codex_home.path().join("marketplaces"); + let source = MarketplaceSource::Git { + url: "https://github.com/owner/repo.git".to_string(), + ref_name: None, + }; + let install_metadata = MarketplaceInstallMetadata::from_source(&source, &[]); + + let err = installed_marketplace_root_for_source( + codex_home.path(), + &install_root, + &install_metadata, + ) + .unwrap_err(); + + assert!( + err.to_string().contains(&format!( + "failed to read user config {}:", + config_path.display() + )), + "unexpected error: {err}" + ); + } + + #[test] + fn installed_marketplace_root_for_source_uses_local_source_root() { + let codex_home = TempDir::new().unwrap(); + let install_root = codex_home.path().join("marketplaces"); + let source_root = codex_home.path().join("source"); + fs::create_dir_all(source_root.join(".agents/plugins")).unwrap(); + fs::write( + source_root.join(".agents/plugins/marketplace.json"), + r#"{"name":"debug","plugins":[]}"#, + ) + .unwrap(); + let source = MarketplaceSource::Local { + path: source_root.clone(), + }; + let install_metadata = MarketplaceInstallMetadata::from_source(&source, &[]); + record_added_marketplace_entry(codex_home.path(), "debug", &install_metadata).unwrap(); + + let root = installed_marketplace_root_for_source( + codex_home.path(), + &install_root, + &install_metadata, + ) + .unwrap(); + + assert_eq!(root, Some(source_root)); + } +} diff --git a/codex-rs/core/src/plugins/marketplace_add/source.rs b/codex-rs/core/src/plugins/marketplace_add/source.rs new file mode 100644 index 0000000000..e723c8865a --- /dev/null +++ b/codex-rs/core/src/plugins/marketplace_add/source.rs @@ -0,0 +1,392 @@ +use super::MarketplaceAddError; +use crate::plugins::validate_plugin_segment; +use codex_core_plugins::marketplace::validate_marketplace_root; +use std::path::Path; +use std::path::PathBuf; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum MarketplaceSource { + Git { + url: String, + ref_name: Option, + }, + Local { + path: PathBuf, + }, +} + +pub(crate) fn parse_marketplace_source( + source: &str, + explicit_ref: Option, +) -> Result { + let source = source.trim(); + if source.is_empty() { + return Err(MarketplaceAddError::InvalidRequest( + "marketplace source must not be empty".to_string(), + )); + } + + let (base_source, parsed_ref) = split_source_ref(source); + let ref_name = explicit_ref.or(parsed_ref); + + if looks_like_local_path(&base_source) { + if ref_name.is_some() { + return Err(MarketplaceAddError::InvalidRequest( + "--ref is only supported for git marketplace sources".to_string(), + )); + } + let path = resolve_local_source_path(&base_source)?; + if path.is_file() { + return Err(MarketplaceAddError::InvalidRequest( + "local marketplace source must be a directory, not a file".to_string(), + )); + } + return Ok(MarketplaceSource::Local { path }); + } + + if is_ssh_git_url(&base_source) || is_git_url(&base_source) { + return Ok(MarketplaceSource::Git { + url: normalize_git_url(&base_source), + ref_name, + }); + } + + if looks_like_github_shorthand(&base_source) { + return Ok(MarketplaceSource::Git { + url: format!("https://github.com/{base_source}.git"), + ref_name, + }); + } + + Err(MarketplaceAddError::InvalidRequest(format!( + "invalid marketplace source format: {source}" + ))) +} + +pub(super) fn stage_marketplace_source( + source: &MarketplaceSource, + sparse_paths: &[String], + staged_root: &Path, + clone_source: F, +) -> Result<(), MarketplaceAddError> +where + F: Fn(&str, Option<&str>, &[String], &Path) -> Result<(), MarketplaceAddError>, +{ + if !sparse_paths.is_empty() && !matches!(source, MarketplaceSource::Git { .. }) { + return Err(MarketplaceAddError::InvalidRequest( + "--sparse is only supported for git marketplace sources".to_string(), + )); + } + + match source { + MarketplaceSource::Git { url, ref_name } => { + clone_source(url, ref_name.as_deref(), sparse_paths, staged_root) + } + MarketplaceSource::Local { .. } => unreachable!( + "local marketplace sources are added without staging a copied install root" + ), + } +} + +pub(super) fn validate_marketplace_source_root(root: &Path) -> Result { + let marketplace_name = validate_marketplace_root(root) + .map_err(|err| MarketplaceAddError::InvalidRequest(err.to_string()))?; + validate_plugin_segment(&marketplace_name, "marketplace name") + .map_err(MarketplaceAddError::InvalidRequest)?; + Ok(marketplace_name) +} + +fn split_source_ref(source: &str) -> (String, Option) { + if let Some((base, ref_name)) = source.rsplit_once('#') { + return (base.to_string(), non_empty_ref(ref_name)); + } + if !source.contains("://") + && !is_ssh_git_url(source) + && let Some((base, ref_name)) = source.rsplit_once('@') + { + return (base.to_string(), non_empty_ref(ref_name)); + } + (source.to_string(), None) +} + +fn non_empty_ref(ref_name: &str) -> Option { + let ref_name = ref_name.trim(); + (!ref_name.is_empty()).then(|| ref_name.to_string()) +} + +fn normalize_git_url(url: &str) -> String { + let url = url.trim_end_matches('/'); + if url.starts_with("https://github.com/") && !url.ends_with(".git") { + format!("{url}.git") + } else { + url.to_string() + } +} + +fn looks_like_local_path(source: &str) -> bool { + Path::new(source).is_absolute() + || looks_like_windows_absolute_path(source) + || source.starts_with("./") + || source.starts_with(".\\") + || source.starts_with("../") + || source.starts_with("..\\") + || source.starts_with("~/") + || source == "." + || source == ".." +} + +fn looks_like_windows_absolute_path(source: &str) -> bool { + let bytes = source.as_bytes(); + bytes.len() >= 3 + && bytes[0].is_ascii_alphabetic() + && bytes[1] == b':' + && matches!(bytes[2], b'\\' | b'/') + || source.starts_with(r"\\") +} + +fn resolve_local_source_path(source: &str) -> Result { + let path = expand_tilde_path(source); + let path = if path.is_absolute() { + path + } else { + std::env::current_dir() + .map_err(|err| { + MarketplaceAddError::Internal(format!( + "failed to read current working directory for local marketplace source: {err}" + )) + })? + .join(path) + }; + + path.canonicalize().map_err(|err| { + MarketplaceAddError::InvalidRequest(format!( + "failed to resolve local marketplace source {}: {err}", + path.display() + )) + }) +} + +fn expand_tilde_path(source: &str) -> PathBuf { + let Some(rest) = source.strip_prefix("~/") else { + return PathBuf::from(source); + }; + let Some(home) = std::env::var_os("HOME").or_else(|| std::env::var_os("USERPROFILE")) else { + return PathBuf::from(source); + }; + PathBuf::from(home).join(rest) +} + +fn is_ssh_git_url(source: &str) -> bool { + source.starts_with("ssh://") || source.starts_with("git@") && source.contains(':') +} + +fn is_git_url(source: &str) -> bool { + source.starts_with("http://") || source.starts_with("https://") +} + +fn looks_like_github_shorthand(source: &str) -> bool { + let mut segments = source.split('/'); + let owner = segments.next(); + let repo = segments.next(); + let extra = segments.next(); + owner.is_some_and(is_github_shorthand_segment) + && repo.is_some_and(is_github_shorthand_segment) + && extra.is_none() +} + +fn is_github_shorthand_segment(segment: &str) -> bool { + !segment.is_empty() + && segment + .chars() + .all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '-' | '_' | '.')) +} + +impl MarketplaceSource { + pub(super) fn display(&self) -> String { + match self { + Self::Git { url, ref_name } => match ref_name { + Some(ref_name) => format!("{url}#{ref_name}"), + None => url.clone(), + }, + Self::Local { path } => path.display().to_string(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use tempfile::TempDir; + + #[test] + fn github_shorthand_parses_ref_suffix() { + assert_eq!( + parse_marketplace_source("owner/repo@main", /*explicit_ref*/ None).unwrap(), + MarketplaceSource::Git { + url: "https://github.com/owner/repo.git".to_string(), + ref_name: Some("main".to_string()), + } + ); + } + + #[test] + fn git_url_parses_fragment_ref() { + assert_eq!( + parse_marketplace_source( + "https://example.com/team/repo.git#v1", + /*explicit_ref*/ None + ) + .unwrap(), + MarketplaceSource::Git { + url: "https://example.com/team/repo.git".to_string(), + ref_name: Some("v1".to_string()), + } + ); + } + + #[test] + fn explicit_ref_overrides_source_ref() { + assert_eq!( + parse_marketplace_source("owner/repo@main", Some("release".to_string())).unwrap(), + MarketplaceSource::Git { + url: "https://github.com/owner/repo.git".to_string(), + ref_name: Some("release".to_string()), + } + ); + } + + #[test] + fn github_shorthand_and_git_url_normalize_to_same_source() { + let shorthand = parse_marketplace_source("owner/repo", /*explicit_ref*/ None).unwrap(); + let git_url = parse_marketplace_source( + "https://github.com/owner/repo.git", + /*explicit_ref*/ None, + ) + .unwrap(); + + assert_eq!(shorthand, git_url); + assert_eq!( + shorthand, + MarketplaceSource::Git { + url: "https://github.com/owner/repo.git".to_string(), + ref_name: None, + } + ); + } + + #[test] + fn github_url_with_trailing_slash_normalizes_without_extra_path_segment() { + assert_eq!( + parse_marketplace_source("https://github.com/owner/repo/", /*explicit_ref*/ None) + .unwrap(), + MarketplaceSource::Git { + url: "https://github.com/owner/repo.git".to_string(), + ref_name: None, + } + ); + } + + #[test] + fn non_github_https_source_parses_as_git_url() { + assert_eq!( + parse_marketplace_source("https://gitlab.com/owner/repo", /*explicit_ref*/ None) + .unwrap(), + MarketplaceSource::Git { + url: "https://gitlab.com/owner/repo".to_string(), + ref_name: None, + } + ); + } + + #[test] + fn file_url_source_is_rejected() { + let err = + parse_marketplace_source("file:///tmp/marketplace.git", /*explicit_ref*/ None) + .unwrap_err(); + + assert!( + err.to_string() + .contains("invalid marketplace source format"), + "unexpected error: {err}" + ); + } + + #[test] + fn local_path_source_parses() { + let source = parse_marketplace_source(".", /*explicit_ref*/ None).unwrap(); + + let MarketplaceSource::Local { path } = source else { + panic!("expected local path source"); + }; + assert!(path.is_absolute()); + } + + #[test] + fn windows_absolute_paths_look_like_local_paths_on_every_host() { + assert!(looks_like_local_path(r"C:\Users\alice\marketplace")); + assert!(looks_like_local_path("C:/Users/alice/marketplace")); + assert!(looks_like_local_path(r"\\server\share\marketplace")); + assert!(!looks_like_local_path(r"C:relative\path")); + } + + #[test] + fn local_file_source_is_rejected() { + let tempdir = TempDir::new().unwrap(); + let file = tempdir.path().join("marketplace.json"); + std::fs::write(&file, "{}").unwrap(); + + let err = + parse_marketplace_source(file.to_str().unwrap(), /*explicit_ref*/ None).unwrap_err(); + + assert!( + err.to_string() + .contains("local marketplace source must be a directory, not a file"), + "unexpected error: {err}" + ); + } + + #[test] + fn non_git_sources_reject_ref_override() { + let err = parse_marketplace_source("./marketplace", Some("main".to_string())).unwrap_err(); + + assert!( + err.to_string() + .contains("--ref is only supported for git marketplace sources"), + "unexpected error: {err}" + ); + } + + #[test] + fn non_git_sources_reject_sparse_checkout() { + let path = std::env::current_dir().unwrap(); + let err = stage_marketplace_source( + &MarketplaceSource::Local { path }, + &["plugins/foo".to_string()], + Path::new("/tmp"), + |_url, _ref_name, _sparse_paths, _staged_root| Ok(()), + ) + .unwrap_err(); + + assert!( + err.to_string() + .contains("--sparse is only supported for git marketplace sources"), + "unexpected error: {err}" + ); + } + + #[test] + fn ssh_url_parses_as_git_url() { + assert_eq!( + parse_marketplace_source( + "ssh://git@github.com/owner/repo.git#main", + /*explicit_ref*/ None, + ) + .unwrap(), + MarketplaceSource::Git { + url: "ssh://git@github.com/owner/repo.git".to_string(), + ref_name: Some("main".to_string()), + } + ); + } +} diff --git a/codex-rs/core/src/plugins/mentions.rs b/codex-rs/core/src/plugins/mentions.rs index 5e11b9b91e..a5e94345cb 100644 --- a/codex-rs/core/src/plugins/mentions.rs +++ b/codex-rs/core/src/plugins/mentions.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use std::collections::HashSet; +use codex_connectors::metadata::connector_mention_slug; use codex_protocol::user_input::UserInput; use crate::connectors; @@ -108,7 +109,7 @@ pub(crate) fn build_connector_slug_counts( ) -> HashMap { let mut counts: HashMap = HashMap::new(); for connector in connectors { - let slug = connectors::connector_mention_slug(connector); + let slug = connector_mention_slug(connector); *counts.entry(slug).or_insert(0) += 1; } counts diff --git a/codex-rs/core/src/plugins/mod.rs b/codex-rs/core/src/plugins/mod.rs index 5115c3f7ea..16cc8d08c4 100644 --- a/codex-rs/core/src/plugins/mod.rs +++ b/codex-rs/core/src/plugins/mod.rs @@ -4,17 +4,15 @@ mod discoverable; mod injection; mod installed_marketplaces; mod manager; -mod manifest; -mod marketplace; +mod marketplace_add; mod mentions; -mod remote; mod render; mod startup_sync; -mod store; #[cfg(test)] pub(crate) mod test_support; -mod toggles; +pub use codex_core_plugins::marketplace_upgrade::ConfiguredMarketplaceUpgradeError as PluginMarketplaceUpgradeError; +pub use codex_core_plugins::marketplace_upgrade::ConfiguredMarketplaceUpgradeOutcome as PluginMarketplaceUpgradeOutcome; pub use codex_plugin::AppConnectorId; pub use codex_plugin::EffectiveSkillRoots; pub use codex_plugin::PluginCapabilitySummary; @@ -26,6 +24,7 @@ pub use codex_plugin::validate_plugin_segment; pub type LoadedPlugin = codex_plugin::LoadedPlugin; pub type PluginLoadOutcome = codex_plugin::PluginLoadOutcome; +pub(crate) use codex_core_plugins::marketplace::find_marketplace_manifest_path; pub(crate) use discoverable::list_tool_suggest_discoverable_plugins; pub(crate) use injection::build_plugin_injections; pub use installed_marketplaces::INSTALLED_MARKETPLACES_DIR; @@ -44,28 +43,17 @@ pub use manager::PluginRemoteSyncError; pub use manager::PluginUninstallError; pub use manager::PluginsManager; pub use manager::RemotePluginSyncResult; -pub use manager::installed_plugin_telemetry_metadata; -pub use manager::load_plugin_apps; -pub use manager::load_plugin_mcp_servers; -pub use manager::plugin_telemetry_metadata_from_root; -pub use manifest::PluginManifestInterface; -pub(crate) use manifest::PluginManifestPaths; -pub(crate) use manifest::load_plugin_manifest; -pub use marketplace::MarketplaceError; -pub use marketplace::MarketplaceListError; -pub use marketplace::MarketplacePluginAuthPolicy; -pub use marketplace::MarketplacePluginInstallPolicy; -pub use marketplace::MarketplacePluginPolicy; -pub use marketplace::MarketplacePluginSource; -pub use marketplace::validate_marketplace_root; -pub use remote::RemotePluginFetchError; -pub use remote::fetch_remote_featured_plugin_ids; +pub(crate) use manager::configured_plugins_from_stack; +pub use marketplace_add::MarketplaceAddError; +pub use marketplace_add::MarketplaceAddOutcome; +pub use marketplace_add::MarketplaceAddRequest; +pub use marketplace_add::add_marketplace; +pub(crate) use marketplace_add::parse_marketplace_source; pub(crate) use render::render_explicit_plugin_instructions; pub(crate) use render::render_plugins_section; pub(crate) use startup_sync::curated_plugins_repo_path; pub(crate) use startup_sync::read_curated_plugins_sha; pub(crate) use startup_sync::sync_openai_plugins_repo; -pub use toggles::collect_plugin_enabled_candidates; pub(crate) use mentions::build_connector_slug_counts; pub(crate) use mentions::build_skill_name_counts; diff --git a/codex-rs/core/src/project_doc.rs b/codex-rs/core/src/project_doc.rs deleted file mode 100644 index e7321695a7..0000000000 --- a/codex-rs/core/src/project_doc.rs +++ /dev/null @@ -1,325 +0,0 @@ -//! Project-level documentation discovery. -//! -//! Project-level documentation is primarily stored in files named `AGENTS.md`. -//! Additional fallback filenames can be configured via `project_doc_fallback_filenames`. -//! We include the concatenation of all files found along the path from the -//! project root to the current working directory as follows: -//! -//! 1. Determine the project root by walking upwards from the current working -//! directory until a configured `project_root_markers` entry is found. -//! When `project_root_markers` is unset, the default marker list is used -//! (`.git`). If no marker is found, only the current working directory is -//! considered. An empty marker list disables parent traversal. -//! 2. Collect every `AGENTS.md` found from the project root down to the -//! current working directory (inclusive) and concatenate their contents in -//! that order. -//! 3. We do **not** walk past the project root. - -use crate::config::Config; -use crate::config_loader::ConfigLayerStackOrdering; -use crate::config_loader::default_project_root_markers; -use crate::config_loader::merge_toml_values; -use crate::config_loader::project_root_markers_from_config; -use codex_app_server_protocol::ConfigLayerSource; -use codex_exec_server::Environment; -use codex_exec_server::ExecutorFileSystem; -use codex_features::Feature; -use codex_utils_absolute_path::AbsolutePathBuf; -use dunce::canonicalize as normalize_path; -use std::io; -use toml::Value as TomlValue; -use tracing::error; - -pub(crate) const HIERARCHICAL_AGENTS_MESSAGE: &str = - include_str!("../hierarchical_agents_message.md"); - -/// Default filename scanned for project-level docs. -pub const DEFAULT_PROJECT_DOC_FILENAME: &str = "AGENTS.md"; -/// Preferred local override for project-level docs. -pub const LOCAL_PROJECT_DOC_FILENAME: &str = "AGENTS.override.md"; - -/// When both `Config::instructions` and the project doc are present, they will -/// be concatenated with the following separator. -const PROJECT_DOC_SEPARATOR: &str = "\n\n--- project-doc ---\n\n"; - -fn render_js_repl_instructions(config: &Config) -> Option { - if !config.features.enabled(Feature::JsRepl) { - return None; - } - - let mut section = String::from("## JavaScript REPL (Node)\n"); - section.push_str( - "- Use `js_repl` for Node-backed JavaScript with top-level await in a persistent kernel.\n", - ); - section.push_str("- `js_repl` is a freeform/custom tool. Direct `js_repl` calls must send raw JavaScript tool input (optionally with first-line `// codex-js-repl: timeout_ms=15000`). Do not wrap code in JSON (for example `{\"code\":\"...\"}`), quotes, or markdown code fences.\n"); - section.push_str( - "- Helpers: `codex.cwd`, `codex.homeDir`, `codex.tmpDir`, `codex.tool(name, args?)`, and `codex.emitImage(imageLike)`.\n", - ); - section.push_str("- `codex.tool` executes a normal tool call and resolves to the raw tool output object. Use it for shell and non-shell tools alike. Nested tool outputs stay inside JavaScript unless you emit them explicitly.\n"); - section.push_str("- `codex.emitImage(...)` adds one image to the outer `js_repl` function output each time you call it, so you can call it multiple times to emit multiple images. It accepts a data URL, a single `input_image` item, an object like `{ bytes, mimeType }`, or a raw tool response object with exactly one image and no text. It rejects mixed text-and-image content.\n"); - section.push_str("- `codex.tool(...)` and `codex.emitImage(...)` keep stable helper identities across cells. Saved references and persisted objects can reuse them in later cells, but async callbacks that fire after a cell finishes still fail because no exec is active.\n"); - section.push_str("- Request full-resolution image processing with `detail: \"original\"` only when the `view_image` tool schema includes a `detail` argument. The same availability applies to `codex.emitImage(...)`: if `view_image.detail` is present, you may also pass `detail: \"original\"` there. Use this when high-fidelity image perception or precise localization is needed, especially for CUA agents.\n"); - section.push_str("- Example of sharing an in-memory Playwright screenshot: `await codex.emitImage({ bytes: await page.screenshot({ type: \"jpeg\", quality: 85 }), mimeType: \"image/jpeg\", detail: \"original\" })`.\n"); - section.push_str("- Example of sharing a local image tool result: `await codex.emitImage(codex.tool(\"view_image\", { path: \"/absolute/path\", detail: \"original\" }))`.\n"); - section.push_str("- When encoding an image to send with `codex.emitImage(...)` or `view_image`, prefer JPEG at about 85 quality when lossy compression is acceptable; use PNG when transparency or lossless detail matters. Smaller uploads are faster and less likely to hit size limits.\n"); - section.push_str("- Top-level bindings persist across cells. If a cell throws, prior bindings remain available and bindings that finished initializing before the throw often remain usable in later cells. For code you plan to reuse across cells, prefer declaring or assigning it in direct top-level statements before operations that might throw. If you hit `SyntaxError: Identifier 'x' has already been declared`, first reuse the existing binding, reassign a previously declared `let`, or pick a new descriptive name. Use `{ ... }` only for a short temporary block when you specifically need local scratch names; do not wrap an entire cell in block scope if you want those names reusable later. Reset the kernel with `js_repl_reset` only when you need a clean state.\n"); - section.push_str("- Top-level static import declarations (for example `import x from \"./file.js\"`) are currently unsupported in `js_repl`; use dynamic imports with `await import(\"pkg\")`, `await import(\"./file.js\")`, or `await import(\"/abs/path/file.mjs\")` instead. Imported local files must be ESM `.js`/`.mjs` files and run in the same REPL VM context. Bare package imports always resolve from REPL-global search roots (`CODEX_JS_REPL_NODE_MODULE_DIRS`, then cwd), not relative to the imported file location. Local files may statically import only other local relative/absolute/`file://` `.js`/`.mjs` files; package and builtin imports from local files must stay dynamic. `import.meta.resolve()` returns importable strings such as `file://...`, bare package names, and `node:...` specifiers. Local file modules reload between execs, while top-level bindings persist until `js_repl_reset`.\n"); - - if config.features.enabled(Feature::JsReplToolsOnly) { - section.push_str("- Do not call tools directly; use `js_repl` + `codex.tool(...)` for all tool calls, including shell commands.\n"); - section - .push_str("- MCP tools (if any) can also be called by name via `codex.tool(...)`.\n"); - } - - section.push_str("- Avoid direct access to `process.stdout` / `process.stderr` / `process.stdin`; it can corrupt the JSON line protocol. Use `console.log`, `codex.tool(...)`, and `codex.emitImage(...)`."); - - Some(section) -} - -/// Combines `Config::instructions` and `AGENTS.md` (if present) into a single -/// string of instructions. -pub(crate) async fn get_user_instructions( - config: &Config, - environment: Option<&Environment>, -) -> Option { - let fs = environment?.get_filesystem(); - get_user_instructions_with_fs(config, fs.as_ref()).await -} - -pub(crate) async fn get_user_instructions_with_fs( - config: &Config, - fs: &dyn ExecutorFileSystem, -) -> Option { - let project_docs = read_project_docs_with_fs(config, fs).await; - - let mut output = String::new(); - - if let Some(instructions) = config.user_instructions.clone() { - output.push_str(&instructions); - } - - match project_docs { - Ok(Some(docs)) => { - if !output.is_empty() { - output.push_str(PROJECT_DOC_SEPARATOR); - } - output.push_str(&docs); - } - Ok(None) => {} - Err(e) => { - error!("error trying to find project doc: {e:#}"); - } - }; - - if let Some(js_repl_section) = render_js_repl_instructions(config) { - if !output.is_empty() { - output.push_str("\n\n"); - } - output.push_str(&js_repl_section); - } - - if config.features.enabled(Feature::ChildAgentsMd) { - if !output.is_empty() { - output.push_str("\n\n"); - } - output.push_str(HIERARCHICAL_AGENTS_MESSAGE); - } - - if !output.is_empty() { - Some(output) - } else { - None - } -} - -/// Attempt to locate and load the project documentation. -/// -/// On success returns `Ok(Some(contents))` where `contents` is the -/// concatenation of all discovered docs. If no documentation file is found the -/// function returns `Ok(None)`. Unexpected I/O failures bubble up as `Err` so -/// callers can decide how to handle them. -pub async fn read_project_docs( - config: &Config, - environment: &Environment, -) -> io::Result> { - let fs = environment.get_filesystem(); - read_project_docs_with_fs(config, fs.as_ref()).await -} - -async fn read_project_docs_with_fs( - config: &Config, - fs: &dyn ExecutorFileSystem, -) -> io::Result> { - let max_total = config.project_doc_max_bytes; - - if max_total == 0 { - return Ok(None); - } - - let paths = discover_project_doc_paths(config, fs).await?; - if paths.is_empty() { - return Ok(None); - } - - let mut remaining: u64 = max_total as u64; - let mut parts: Vec = Vec::new(); - - for p in paths { - if remaining == 0 { - break; - } - - match fs.get_metadata(&p).await { - Ok(metadata) if !metadata.is_file => continue, - Ok(_) => {} - Err(err) if err.kind() == io::ErrorKind::NotFound => continue, - Err(err) => return Err(err), - } - - let mut data = match fs.read_file(&p).await { - Ok(data) => data, - Err(err) if err.kind() == io::ErrorKind::NotFound => continue, - Err(err) => return Err(err), - }; - let size = data.len() as u64; - if size > remaining { - data.truncate(remaining as usize); - } - - if size > remaining { - tracing::warn!( - "Project doc `{}` exceeds remaining budget ({} bytes) - truncating.", - p.display(), - remaining, - ); - } - - let text = String::from_utf8_lossy(&data).to_string(); - if !text.trim().is_empty() { - parts.push(text); - remaining = remaining.saturating_sub(data.len() as u64); - } - } - - if parts.is_empty() { - Ok(None) - } else { - Ok(Some(parts.join("\n\n"))) - } -} - -/// Discover the list of AGENTS.md files using the same search rules as -/// `read_project_docs`, but return the file paths instead of concatenated -/// contents. The list is ordered from project root to the current working -/// directory (inclusive). Symlinks are allowed. When `project_doc_max_bytes` -/// is zero, returns an empty list. -pub async fn discover_project_doc_paths( - config: &Config, - fs: &dyn ExecutorFileSystem, -) -> io::Result> { - if config.project_doc_max_bytes == 0 { - return Ok(Vec::new()); - } - - let mut dir = config.cwd.clone(); - if let Ok(canon) = normalize_path(&dir) { - dir = AbsolutePathBuf::try_from(canon)?; - } - - let mut merged = TomlValue::Table(toml::map::Map::new()); - for layer in config.config_layer_stack.get_layers( - ConfigLayerStackOrdering::LowestPrecedenceFirst, - /*include_disabled*/ false, - ) { - if matches!(layer.name, ConfigLayerSource::Project { .. }) { - continue; - } - merge_toml_values(&mut merged, &layer.config); - } - let project_root_markers = match project_root_markers_from_config(&merged) { - Ok(Some(markers)) => markers, - Ok(None) => default_project_root_markers(), - Err(err) => { - tracing::warn!("invalid project_root_markers: {err}"); - default_project_root_markers() - } - }; - let mut project_root = None; - if !project_root_markers.is_empty() { - for ancestor in dir.ancestors() { - for marker in &project_root_markers { - let marker_path = AbsolutePathBuf::try_from(ancestor.join(marker))?; - let marker_exists = match fs.get_metadata(&marker_path).await { - Ok(_) => true, - Err(err) if err.kind() == io::ErrorKind::NotFound => false, - Err(err) => return Err(err), - }; - if marker_exists { - project_root = Some(AbsolutePathBuf::try_from(ancestor.to_path_buf())?); - break; - } - } - if project_root.is_some() { - break; - } - } - } - - let search_dirs: Vec = if let Some(root) = project_root { - let mut dirs = Vec::new(); - let mut cursor = dir.clone(); - loop { - dirs.push(cursor.clone()); - if cursor == root { - break; - } - let Some(parent) = cursor.parent() else { - break; - }; - cursor = parent; - } - dirs.reverse(); - dirs - } else { - vec![dir] - }; - - let mut found: Vec = Vec::new(); - let candidate_filenames = candidate_filenames(config); - for d in search_dirs { - for name in &candidate_filenames { - let candidate = d.join(name); - match fs.get_metadata(&candidate).await { - Ok(md) if md.is_file => { - found.push(candidate); - break; - } - Ok(_) => {} - Err(err) if err.kind() == io::ErrorKind::NotFound => continue, - Err(err) => return Err(err), - } - } - } - - Ok(found) -} -fn candidate_filenames<'a>(config: &'a Config) -> Vec<&'a str> { - let mut names: Vec<&'a str> = - Vec::with_capacity(2 + config.project_doc_fallback_filenames.len()); - names.push(LOCAL_PROJECT_DOC_FILENAME); - names.push(DEFAULT_PROJECT_DOC_FILENAME); - for candidate in &config.project_doc_fallback_filenames { - let candidate = candidate.as_str(); - if candidate.is_empty() { - continue; - } - if !names.contains(&candidate) { - names.push(candidate); - } - } - names -} - -#[cfg(test)] -#[path = "project_doc_tests.rs"] -mod tests; diff --git a/codex-rs/core/src/prompt_debug.rs b/codex-rs/core/src/prompt_debug.rs index 789aac5806..fb64481523 100644 --- a/codex-rs/core/src/prompt_debug.rs +++ b/codex-rs/core/src/prompt_debug.rs @@ -106,8 +106,9 @@ mod tests { async fn build_prompt_input_includes_context_and_user_message() { let codex_home = tempfile::tempdir().expect("create codex home"); let cwd = tempfile::tempdir().expect("create cwd"); - let mut config = test_config(); - config.codex_home = codex_home.path().to_path_buf(); + let mut config = test_config().await; + config.codex_home = + AbsolutePathBuf::from_absolute_path(codex_home.path()).expect("codex home is absolute"); config.cwd = AbsolutePathBuf::try_from(cwd.path().to_path_buf()).expect("absolute cwd"); config.user_instructions = Some("Project-specific test instructions".to_string()); diff --git a/codex-rs/core/src/realtime_context.rs b/codex-rs/core/src/realtime_context.rs index fbe48a3310..129a7de642 100644 --- a/codex-rs/core/src/realtime_context.rs +++ b/codex-rs/core/src/realtime_context.rs @@ -2,10 +2,14 @@ use crate::codex::Session; use crate::compact::content_items_to_text; use crate::event_mapping::is_contextual_user_message_content; use chrono::Utc; +use codex_exec_server::LOCAL_FS; use codex_git_utils::resolve_root_git_project_for_trust; use codex_protocol::models::ResponseItem; -use codex_state::SortKey; -use codex_state::ThreadMetadata; +use codex_thread_store::ListThreadsParams; +use codex_thread_store::StoredThread; +use codex_thread_store::ThreadSortKey; +use codex_thread_store::ThreadStore; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_output_truncation::TruncationPolicy; use codex_utils_output_truncation::truncate_text; use dirs::home_dir; @@ -23,11 +27,13 @@ use tracing::info; use tracing::warn; const STARTUP_CONTEXT_HEADER: &str = "Startup context from Codex.\nThis is background context about recent work and machine/workspace layout. It may be incomplete or stale. Use it to inform responses, and do not repeat it back unless relevant."; +const STARTUP_CONTEXT_OPEN_TAG: &str = ""; +const STARTUP_CONTEXT_CLOSE_TAG: &str = ""; const CURRENT_THREAD_SECTION_TOKEN_BUDGET: usize = 1_200; const RECENT_WORK_SECTION_TOKEN_BUDGET: usize = 2_200; const WORKSPACE_SECTION_TOKEN_BUDGET: usize = 1_600; const NOTES_SECTION_TOKEN_BUDGET: usize = 300; -const CURRENT_THREAD_TURN_TOKEN_BUDGET: usize = 300; +pub(crate) const REALTIME_TURN_TOKEN_BUDGET: usize = 300; const MAX_RECENT_THREADS: usize = 40; const MAX_RECENT_WORK_GROUPS: usize = 8; const MAX_CURRENT_CWD_ASKS: usize = 8; @@ -58,8 +64,8 @@ pub(crate) async fn build_realtime_startup_context( let history = sess.clone_history().await; let current_thread_section = build_current_thread_section(history.raw_items()); let recent_threads = load_recent_threads(sess).await; - let recent_work_section = build_recent_work_section(&cwd, &recent_threads); - let workspace_section = build_workspace_section_with_user_root(&cwd, home_dir()); + let recent_work_section = build_recent_work_section(&cwd, &recent_threads).await; + let workspace_section = build_workspace_section_with_user_root(&cwd, home_dir()).await; if current_thread_section.is_none() && recent_work_section.is_none() @@ -98,15 +104,16 @@ pub(crate) async fn build_realtime_startup_context( } if let Some(section) = format_section( "Notes", - Some("Built at realtime startup from the current thread history, persisted thread metadata in the state DB, and a bounded local workspace scan. This excludes repo memory instructions, AGENTS files, project-doc prompt blends, and memory summaries.".to_string()), + Some("Built at realtime startup from the current thread history, local thread metadata, and a bounded local workspace scan. This excludes repo memory instructions, AGENTS files, project-doc prompt blends, and memory summaries.".to_string()), NOTES_SECTION_TOKEN_BUDGET, ) { parts.push(section); } - let context = truncate_text(&parts.join("\n\n"), TruncationPolicy::Tokens(budget_tokens)); + let context = format_startup_context_blob(&parts.join("\n\n")); debug!( approx_tokens = approx_token_count(&context), + requested_budget_tokens = budget_tokens, bytes = context.len(), has_current_thread_section, has_recent_work_section, @@ -117,41 +124,49 @@ pub(crate) async fn build_realtime_startup_context( Some(context) } -async fn load_recent_threads(sess: &Session) -> Vec { - let Some(state_db) = sess.services.state_db.as_ref() else { - return Vec::new(); - }; - - match state_db - .list_threads( - MAX_RECENT_THREADS, - /*anchor*/ None, - SortKey::UpdatedAt, - &[], - /*model_providers*/ None, - /*archived_only*/ false, - /*search_term*/ None, - ) +async fn load_recent_threads(sess: &Session) -> Vec { + match sess + .services + .thread_store + .list_threads(ListThreadsParams { + page_size: MAX_RECENT_THREADS, + cursor: None, + sort_key: ThreadSortKey::UpdatedAt, + allowed_sources: Vec::new(), + model_providers: None, + archived: false, + search_term: None, + }) .await { Ok(page) => page.items, Err(err) => { - warn!("failed to load realtime startup threads from state db: {err}"); + warn!("failed to load realtime startup threads from thread store: {err}"); Vec::new() } } } -fn build_recent_work_section(cwd: &Path, recent_threads: &[ThreadMetadata]) -> Option { - let mut groups: HashMap> = HashMap::new(); +async fn build_recent_work_section( + cwd: &AbsolutePathBuf, + recent_threads: &[StoredThread], +) -> Option { + let mut groups: HashMap> = HashMap::new(); for entry in recent_threads { - let group = - resolve_root_git_project_for_trust(&entry.cwd).unwrap_or_else(|| entry.cwd.clone()); + let group = match AbsolutePathBuf::from_absolute_path(entry.cwd.as_path()) { + Ok(entry_cwd) => resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &entry_cwd) + .await + .map(AbsolutePathBuf::into_path_buf) + .unwrap_or_else(|| entry.cwd.clone()), + Err(_) => entry.cwd.clone(), + }; groups.entry(group).or_default().push(entry); } - let current_group = - resolve_root_git_project_for_trust(cwd).unwrap_or_else(|| cwd.to_path_buf()); + let current_group = resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), cwd) + .await + .map(AbsolutePathBuf::into_path_buf) + .unwrap_or_else(|| cwd.clone().into_path_buf()); let mut groups = groups.into_iter().collect::>(); groups.sort_by(|(left_group, left_entries), (right_group, right_entries)| { let left_latest = left_entries @@ -176,14 +191,13 @@ fn build_recent_work_section(cwd: &Path, recent_threads: &[ThreadMetadata]) -> O )) }); - let sections = groups - .into_iter() - .take(MAX_RECENT_WORK_GROUPS) - .filter_map(|(group, mut entries)| { - entries.sort_by_key(|entry| Reverse(entry.updated_at)); - format_thread_group(¤t_group, &group, entries) - }) - .collect::>(); + let mut sections = Vec::new(); + for (group, mut entries) in groups.into_iter().take(MAX_RECENT_WORK_GROUPS) { + entries.sort_by_key(|entry| Reverse(entry.updated_at)); + if let Some(section) = format_thread_group(¤t_group, &group, entries).await { + sections.push(section); + } + } (!sections.is_empty()).then(|| sections.join("\n\n")) } @@ -262,30 +276,9 @@ fn build_current_thread_section(items: &[ResponseItem]) -> Option { turn_lines.push(assistant_messages.join("\n\n")); } - let turn_budget = CURRENT_THREAD_TURN_TOKEN_BUDGET.min(remaining_budget); + let turn_budget = REALTIME_TURN_TOKEN_BUDGET.min(remaining_budget); let turn_text = turn_lines.join("\n"); - let mut truncation_budget = turn_budget; - let turn_text = loop { - let candidate = truncate_text(&turn_text, TruncationPolicy::Tokens(truncation_budget)); - let candidate_tokens = approx_token_count(&candidate); - if candidate_tokens <= turn_budget { - break candidate; - } - - // The shared truncator adds its marker after choosing preserved - // content, so tighten the content budget until the rendered turn - // itself fits the per-turn cap. - let excess_tokens = candidate_tokens.saturating_sub(turn_budget); - let next_budget = truncation_budget.saturating_sub(excess_tokens.max(1)); - if next_budget == 0 { - let candidate = truncate_text(&turn_text, TruncationPolicy::Tokens(0)); - if approx_token_count(&candidate) <= turn_budget { - break candidate; - } - break String::new(); - } - truncation_budget = next_budget; - }; + let turn_text = truncate_realtime_text_to_token_budget(&turn_text, turn_budget); let turn_tokens = approx_token_count(&turn_text); if turn_tokens == 0 { continue; @@ -300,19 +293,45 @@ fn build_current_thread_section(items: &[ResponseItem]) -> Option { (retained_turn_count > 0).then(|| lines.join("\n")) } -fn build_workspace_section_with_user_root( - cwd: &Path, +pub(crate) fn truncate_realtime_text_to_token_budget(text: &str, budget_tokens: usize) -> String { + let mut truncation_budget = budget_tokens; + loop { + let candidate = truncate_text(text, TruncationPolicy::Tokens(truncation_budget)); + let candidate_tokens = approx_token_count(&candidate); + if candidate_tokens <= budget_tokens { + break candidate; + } + + // The shared truncator adds its marker after choosing preserved + // content, so tighten the content budget until the rendered turn + // itself fits the per-turn cap. + let excess_tokens = candidate_tokens.saturating_sub(budget_tokens); + let next_budget = truncation_budget.saturating_sub(excess_tokens.max(1)); + if next_budget == 0 { + let candidate = truncate_text(text, TruncationPolicy::Tokens(0)); + if approx_token_count(&candidate) <= budget_tokens { + break candidate; + } + break String::new(); + } + truncation_budget = next_budget; + } +} + +async fn build_workspace_section_with_user_root( + cwd: &AbsolutePathBuf, user_root: Option, ) -> Option { - let git_root = resolve_root_git_project_for_trust(cwd); - let cwd_tree = render_tree(cwd); + let cwd_path = cwd.as_path(); + let git_root = resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), cwd).await; + let cwd_tree = render_tree(cwd_path); let git_root_tree = git_root .as_ref() - .filter(|git_root| git_root.as_path() != cwd) - .and_then(|git_root| render_tree(git_root)); + .filter(|git_root| git_root.as_path() != cwd_path) + .and_then(|git_root| render_tree(git_root.as_path())); let user_root_tree = user_root .as_ref() - .filter(|user_root| user_root.as_path() != cwd) + .filter(|user_root| user_root.as_path() != cwd_path) .filter(|user_root| { git_root .as_ref() @@ -325,8 +344,8 @@ fn build_workspace_section_with_user_root( } let mut lines = vec![ - format!("Current working directory: {}", cwd.display()), - format!("Working directory name: {}", file_name_string(cwd)), + format!("Current working directory: {}", cwd_path.display()), + format!("Working directory name: {}", file_name_string(cwd_path)), ]; if let Some(git_root) = &git_root { @@ -433,23 +452,43 @@ fn format_section(title: &str, body: Option, budget_tokens: usize) -> Op return None; } - Some(format!( - "## {title}\n{}", - truncate_text(body, TruncationPolicy::Tokens(budget_tokens)) - )) + let heading = format!("## {title}\n"); + let body_budget = budget_tokens.saturating_sub(approx_token_count(&heading)); + if body_budget == 0 { + return None; + } + + let body = truncate_realtime_text_to_token_budget(body, body_budget); + if body.is_empty() { + return None; + } + + Some(format!("{heading}{body}")) } -fn format_thread_group( +fn format_startup_context_blob(body: &str) -> String { + format!("{STARTUP_CONTEXT_OPEN_TAG}\n{body}\n{STARTUP_CONTEXT_CLOSE_TAG}") +} + +async fn format_thread_group( current_group: &Path, group: &Path, - entries: Vec<&ThreadMetadata>, + entries: Vec<&StoredThread>, ) -> Option { let latest = entries.first()?; - let group_label = if resolve_root_git_project_for_trust(latest.cwd.as_path()).is_some() { - format!("### Git repo: {}", group.display()) - } else { - format!("### Directory: {}", group.display()) - }; + let group_label = + if let Ok(latest_cwd) = AbsolutePathBuf::from_absolute_path(latest.cwd.as_path()) { + if resolve_root_git_project_for_trust(LOCAL_FS.as_ref(), &latest_cwd) + .await + .is_some() + { + format!("### Git repo: {}", group.display()) + } else { + format!("### Directory: {}", group.display()) + } + } else { + format!("### Directory: {}", group.display()) + }; let mut lines = vec![ group_label, format!("Recent sessions: {}", entries.len()), @@ -457,8 +496,9 @@ fn format_thread_group( ]; if let Some(git_branch) = latest - .git_branch - .as_deref() + .git_info + .as_ref() + .and_then(|git| git.branch.as_deref()) .filter(|git_branch| !git_branch.is_empty()) { lines.push(format!("Latest branch: {git_branch}")); diff --git a/codex-rs/core/src/realtime_context_tests.rs b/codex-rs/core/src/realtime_context_tests.rs index 7495161d46..4096a1692a 100644 --- a/codex-rs/core/src/realtime_context_tests.rs +++ b/codex-rs/core/src/realtime_context_tests.rs @@ -1,22 +1,42 @@ +use super::CURRENT_THREAD_SECTION_TOKEN_BUDGET; +use super::NOTES_SECTION_TOKEN_BUDGET; +use super::RECENT_WORK_SECTION_TOKEN_BUDGET; +use super::STARTUP_CONTEXT_HEADER; +use super::WORKSPACE_SECTION_TOKEN_BUDGET; use super::build_current_thread_section; use super::build_recent_work_section; use super::build_workspace_section_with_user_root; +use super::format_section; +use super::format_startup_context_blob; use chrono::TimeZone; use chrono::Utc; +use codex_git_utils::GitSha; use codex_protocol::ThreadId; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; -use codex_state::ThreadMetadata; +use codex_protocol::protocol::AskForApproval; +use codex_protocol::protocol::GitInfo; +use codex_protocol::protocol::SandboxPolicy; +use codex_protocol::protocol::SessionSource; +use codex_thread_store::StoredThread; +use core_test_support::PathBufExt; +use core_test_support::PathExt; use pretty_assertions::assert_eq; use std::fs; use std::path::PathBuf; use std::process::Command; use tempfile::TempDir; -fn thread_metadata(cwd: &str, title: &str, first_user_message: &str) -> ThreadMetadata { - ThreadMetadata { - id: ThreadId::new(), - rollout_path: PathBuf::from("/tmp/rollout.jsonl"), +fn stored_thread(cwd: &str, title: &str, first_user_message: &str) -> StoredThread { + StoredThread { + thread_id: ThreadId::new(), + rollout_path: Some(PathBuf::from("/tmp/rollout.jsonl")), + forked_from_id: None, + preview: first_user_message.to_string(), + name: (!title.is_empty()).then(|| title.to_string()), + model_provider: "test-provider".to_string(), + model: Some("gpt-5".to_string()), + reasoning_effort: None, created_at: Utc .timestamp_opt(1_709_251_100, 0) .single() @@ -25,24 +45,23 @@ fn thread_metadata(cwd: &str, title: &str, first_user_message: &str) -> ThreadMe .timestamp_opt(1_709_251_200, 0) .single() .expect("valid timestamp"), - source: "cli".to_string(), - agent_path: None, - agent_nickname: None, - agent_role: None, - model_provider: "test-provider".to_string(), - model: Some("gpt-5".to_string()), - reasoning_effort: None, + archived_at: None, cwd: PathBuf::from(cwd), cli_version: "test".to_string(), - title: title.to_string(), - sandbox_policy: "workspace-write".to_string(), - approval_mode: "never".to_string(), - tokens_used: 0, + source: SessionSource::Cli, + agent_nickname: None, + agent_role: None, + agent_path: None, + git_info: Some(GitInfo { + commit_hash: Some(GitSha::new("abcdef")), + branch: Some("main".to_string()), + repository_url: None, + }), + approval_mode: AskForApproval::Never, + sandbox_policy: SandboxPolicy::new_read_only_policy(), + token_usage: None, first_user_message: Some(first_user_message.to_string()), - archived_at: None, - git_sha: None, - git_branch: Some("main".to_string()), - git_origin_url: None, + history: None, } } @@ -161,29 +180,84 @@ fn current_thread_section_keeps_latest_turns_when_history_exceeds_budget() { } #[test] -fn workspace_section_requires_meaningful_structure() { - let cwd = TempDir::new().expect("tempdir"); +fn startup_context_blob_is_wrapped_in_tags_without_final_truncation() { + let body = "Startup context from Codex.\n## Current Thread\nhello"; + let wrapped = format_startup_context_blob(body); + assert_eq!( - build_workspace_section_with_user_root(cwd.path(), /*user_root*/ None), - None + wrapped, + "\nStartup context from Codex.\n## Current Thread\nhello\n" ); } #[test] -fn workspace_section_includes_tree_when_entries_exist() { +fn fixed_section_budgets_apply_per_section_without_total_blob_truncation() { + let body = [ + STARTUP_CONTEXT_HEADER.to_string(), + format_section( + "Current Thread", + Some("current thread ".repeat(2_000)), + CURRENT_THREAD_SECTION_TOKEN_BUDGET, + ) + .expect("current thread section"), + format_section( + "Recent Work", + Some("recent work ".repeat(3_000)), + RECENT_WORK_SECTION_TOKEN_BUDGET, + ) + .expect("recent work section"), + format_section( + "Machine / Workspace Map", + Some("workspace map ".repeat(2_500)), + WORKSPACE_SECTION_TOKEN_BUDGET, + ) + .expect("workspace section"), + format_section( + "Notes", + Some("notes ".repeat(500)), + NOTES_SECTION_TOKEN_BUDGET, + ) + .expect("notes section"), + ] + .join("\n\n"); + + let wrapped = format_startup_context_blob(&body); + + assert!(wrapped.starts_with("\n")); + assert!(wrapped.ends_with("\n")); + assert!(wrapped.contains("tokens truncated")); + assert!(wrapped.contains("## Current Thread")); + assert!(wrapped.contains("## Recent Work")); + assert!(wrapped.contains("## Machine / Workspace Map")); + assert!(wrapped.contains("## Notes")); +} + +#[tokio::test] +async fn workspace_section_requires_meaningful_structure() { + let cwd = TempDir::new().expect("tempdir"); + assert_eq!( + build_workspace_section_with_user_root(&cwd.path().abs(), /*user_root*/ None).await, + None + ); +} + +#[tokio::test] +async fn workspace_section_includes_tree_when_entries_exist() { let cwd = TempDir::new().expect("tempdir"); fs::create_dir(cwd.path().join("docs")).expect("create docs dir"); fs::write(cwd.path().join("README.md"), "hello").expect("write readme"); - let section = build_workspace_section_with_user_root(cwd.path(), /*user_root*/ None) - .expect("workspace section"); + let section = + build_workspace_section_with_user_root(&cwd.path().abs(), /*user_root*/ None) + .await + .expect("workspace section"); assert!(section.contains("Working directory tree:")); assert!(section.contains("- docs/")); assert!(section.contains("- README.md")); } -#[test] -fn workspace_section_includes_user_root_tree_when_distinct() { +#[tokio::test] +async fn workspace_section_includes_user_root_tree_when_distinct() { let root = TempDir::new().expect("tempdir"); let cwd = root.path().join("cwd"); let git_root = root.path().join("git"); @@ -196,15 +270,16 @@ fn workspace_section_includes_user_root_tree_when_distinct() { fs::create_dir_all(user_root.join("code")).expect("create user root child"); fs::write(user_root.join(".zshrc"), "export TEST=1").expect("write home file"); - let section = build_workspace_section_with_user_root(cwd.as_path(), Some(user_root)) + let section = build_workspace_section_with_user_root(&cwd.abs(), Some(user_root)) + .await .expect("workspace section"); assert!(section.contains("User root tree:")); assert!(section.contains("- code/")); assert!(!section.contains("- .zshrc")); } -#[test] -fn recent_work_section_groups_threads_by_cwd() { +#[tokio::test] +async fn recent_work_section_groups_threads_by_cwd() { let root = TempDir::new().expect("tempdir"); let repo = root.path().join("repo"); let workspace_a = repo.join("workspace-a"); @@ -224,22 +299,23 @@ fn recent_work_section_groups_threads_by_cwd() { fs::create_dir_all(&outside).expect("create outside dir"); let recent_threads = vec![ - thread_metadata( + stored_thread( workspace_a.to_string_lossy().as_ref(), "Investigate realtime startup context", "Log the startup context before sending it", ), - thread_metadata( + stored_thread( workspace_b.to_string_lossy().as_ref(), "Trim websocket startup payload", "Remove memories from the realtime startup context", ), - thread_metadata(outside.to_string_lossy().as_ref(), "", "Inspect flaky test"), + stored_thread(outside.to_string_lossy().as_ref(), "", "Inspect flaky test"), ]; let current_cwd = workspace_a; - let repo = fs::canonicalize(repo).expect("canonicalize repo"); + let repo = repo.abs(); - let section = build_recent_work_section(current_cwd.as_path(), &recent_threads) + let section = build_recent_work_section(¤t_cwd.abs(), &recent_threads) + .await .expect("recent work section"); assert!(section.contains(&format!("### Git repo: {}", repo.display()))); assert!(section.contains("Recent sessions: 2")); diff --git a/codex-rs/core/src/realtime_conversation.rs b/codex-rs/core/src/realtime_conversation.rs index 8200ba4908..234dd78f38 100644 --- a/codex-rs/core/src/realtime_conversation.rs +++ b/codex-rs/core/src/realtime_conversation.rs @@ -42,6 +42,7 @@ use codex_protocol::protocol::RealtimeConversationRealtimeEvent; use codex_protocol::protocol::RealtimeConversationSdpEvent; use codex_protocol::protocol::RealtimeConversationStartedEvent; use codex_protocol::protocol::RealtimeHandoffRequested; +use codex_protocol::protocol::RealtimeOutputModality; use codex_protocol::protocol::RealtimeVoice; use codex_protocol::protocol::RealtimeVoicesList; use http::HeaderMap; @@ -62,10 +63,12 @@ const AUDIO_IN_QUEUE_CAPACITY: usize = 256; const USER_TEXT_IN_QUEUE_CAPACITY: usize = 64; const HANDOFF_OUT_QUEUE_CAPACITY: usize = 64; const OUTPUT_EVENTS_QUEUE_CAPACITY: usize = 256; -const REALTIME_STARTUP_CONTEXT_TOKEN_BUDGET: usize = 5_000; +const REALTIME_STARTUP_CONTEXT_TOKEN_BUDGET: usize = 5_300; const DEFAULT_REALTIME_MODEL: &str = "gpt-realtime-1.5"; -const REALTIME_V2_PROGRESS_UPDATE_SUFFIX: &str = - "\n\nUpdate from background agent (task hasn't finished yet):"; +pub(crate) const REALTIME_USER_TEXT_PREFIX: &str = "[USER] "; +pub(crate) const REALTIME_BACKEND_TEXT_PREFIX: &str = "[BACKEND] "; +const REALTIME_V2_HANDOFF_COMPLETE_ACKNOWLEDGEMENT: &str = + "Background agent finished. Use the preceding [BACKEND] messages as the result."; const REALTIME_V2_STEER_ACKNOWLEDGEMENT: &str = "This was sent to steer the previous background agent task."; const REALTIME_ACTIVE_RESPONSE_ERROR_PREFIX: &str = @@ -208,6 +211,7 @@ impl RealtimeHandoffState { struct ConversationState { audio_tx: Sender, user_text_tx: Sender, + session_kind: RealtimeSessionKind, writer: RealtimeWebsocketWriter, handoff: RealtimeHandoffState, input_task: JoinHandle<()>, @@ -244,6 +248,16 @@ impl RealtimeConversationManager { .and_then(|state| state.realtime_active.load(Ordering::Relaxed).then_some(())) } + pub(crate) async fn is_running_v2(&self) -> bool { + let state = self.state.lock().await; + matches!( + state.as_ref(), + Some(state) + if state.realtime_active.load(Ordering::Relaxed) + && state.session_kind == RealtimeSessionKind::V2 + ) + } + async fn start(&self, start: RealtimeStart) -> CodexResult { let previous_state = { let mut guard = self.state.lock().await; @@ -330,6 +344,7 @@ impl RealtimeConversationManager { *guard = Some(ConversationState { audio_tx, user_text_tx, + session_kind, writer, handoff, input_task: task, @@ -405,15 +420,18 @@ impl RealtimeConversationManager { pub(crate) async fn text_in(&self, text: String) -> CodexResult<()> { let sender = { let guard = self.state.lock().await; - guard.as_ref().map(|state| state.user_text_tx.clone()) + guard + .as_ref() + .map(|state| (state.user_text_tx.clone(), state.session_kind)) }; - let Some(sender) = sender else { + let Some((sender, session_kind)) = sender else { return Err(CodexErr::InvalidRequest( "conversation is not running".to_string(), )); }; + let text = prefix_realtime_text(text, REALTIME_USER_TEXT_PREFIX, session_kind); sender .send(text) .await @@ -436,6 +454,11 @@ impl RealtimeConversationManager { return Ok(()); }; + let output_text = prefix_realtime_text( + output_text, + REALTIME_BACKEND_TEXT_PREFIX, + handoff.session_kind, + ); *handoff.last_output_text.lock().await = Some(output_text.clone()); handoff .output_tx @@ -593,8 +616,14 @@ async fn prepare_realtime_start( api_provider.base_url = realtime_ws_base_url.clone(); } let version = config.realtime.version; - let session_config = - build_realtime_session_config(sess, params.prompt, params.session_id, params.voice).await?; + let session_config = build_realtime_session_config( + sess, + params.prompt, + params.session_id, + params.output_modality, + params.voice, + ) + .await?; let requested_session_id = session_config.session_id.clone(); let extra_headers = match transport { ConversationStartTransport::Websocket => { @@ -622,6 +651,7 @@ pub(crate) async fn build_realtime_session_config( sess: &Arc, prompt: Option>, session_id: Option, + output_modality: RealtimeOutputModality, voice: Option, ) -> CodexResult { let config = sess.get_config().await; @@ -653,6 +683,13 @@ pub(crate) async fn build_realtime_session_config( RealtimeWsVersion::V1 => RealtimeEventParser::V1, RealtimeWsVersion::V2 => RealtimeEventParser::RealtimeV2, }; + if config.realtime.version == RealtimeWsVersion::V1 + && matches!(output_modality, RealtimeOutputModality::Text) + { + return Err(CodexErr::InvalidRequest( + "text realtime output modality requires realtime v2".to_string(), + )); + } let session_mode = match config.realtime.session_type { RealtimeWsMode::Conversational => RealtimeSessionMode::Conversational, RealtimeWsMode::Transcription => RealtimeSessionMode::Transcription, @@ -667,6 +704,7 @@ pub(crate) async fn build_realtime_session_config( session_id: Some(session_id.unwrap_or_else(|| sess.conversation_id.to_string())), event_parser, session_mode, + output_modality, voice, }) } @@ -679,6 +717,17 @@ fn default_realtime_voice(version: RealtimeWsVersion) -> RealtimeVoice { } } +fn prefix_realtime_text(text: String, prefix: &str, session_kind: RealtimeSessionKind) -> String { + if session_kind != RealtimeSessionKind::V2 || text.is_empty() || text.starts_with(prefix) { + return text; + } + format!("{prefix}{text}") +} + +pub(crate) fn prefix_realtime_v2_text(text: String, prefix: &str) -> String { + prefix_realtime_text(text, prefix, RealtimeSessionKind::V2) +} + fn validate_realtime_voice(version: RealtimeWsVersion, voice: RealtimeVoice) -> CodexResult<()> { let voices = RealtimeVoicesList::builtin(); let allowed = match version { @@ -789,7 +838,9 @@ async fn handle_start_inner( if let Some(text) = maybe_routed_text { debug!(text = %text, "[realtime-text] realtime conversation text output"); let sess_for_routed_text = Arc::clone(&sess_clone); - sess_for_routed_text.route_realtime_text_input(text).await; + sess_for_routed_text + .route_realtime_text_input(wrap_realtime_delegation_input(&text)) + .await; } if !fanout_realtime_active.load(Ordering::Relaxed) { break; @@ -851,6 +902,20 @@ fn realtime_text_from_handoff_request(handoff: &RealtimeHandoffRequested) -> Opt .or((!handoff.input_transcript.is_empty()).then_some(handoff.input_transcript.clone())) } +fn wrap_realtime_delegation_input(input: &str) -> String { + format!( + "\n {}\n", + escape_xml_text(input) + ) +} + +fn escape_xml_text(input: &str) -> String { + input + .replace('&', "&") + .replace('<', "<") + .replace('>', ">") +} + fn realtime_api_key(auth: Option<&CodexAuth>, provider: &ModelProviderInfo) -> CodexResult { if let Some(api_key) = provider.api_key()? { return Ok(api_key); @@ -1042,18 +1107,17 @@ async fn handle_handoff_output( return Ok(()); } } - writer - .send_conversation_item_create(format!( - "{output_text}{REALTIME_V2_PROGRESS_UPDATE_SUFFIX}" - )) - .await + writer.send_conversation_item_create(output_text).await } HandoffOutput::FinalUpdate { handoff_id, - output_text, + output_text: _, } => { if let Err(err) = writer - .send_conversation_handoff_append(handoff_id, output_text) + .send_conversation_handoff_append( + handoff_id, + REALTIME_V2_HANDOFF_COMPLETE_ACKNOWLEDGEMENT.to_string(), + ) .await { Err(err) @@ -1216,7 +1280,9 @@ async fn handle_realtime_server_event( RealtimeEvent::Error(_) => true, RealtimeEvent::SessionUpdated { .. } | RealtimeEvent::InputTranscriptDelta(_) + | RealtimeEvent::InputTranscriptDone(_) | RealtimeEvent::OutputTranscriptDelta(_) + | RealtimeEvent::OutputTranscriptDone(_) | RealtimeEvent::ConversationItemAdded(_) | RealtimeEvent::ConversationItemDone { .. } => false, }; diff --git a/codex-rs/core/src/realtime_conversation_tests.rs b/codex-rs/core/src/realtime_conversation_tests.rs index 0a32d063c0..cda130e182 100644 --- a/codex-rs/core/src/realtime_conversation_tests.rs +++ b/codex-rs/core/src/realtime_conversation_tests.rs @@ -1,6 +1,7 @@ use super::RealtimeHandoffState; use super::RealtimeSessionKind; use super::realtime_text_from_handoff_request; +use super::wrap_realtime_delegation_input; use async_channel::bounded; use codex_protocol::protocol::RealtimeHandoffRequested; use codex_protocol::protocol::RealtimeTranscriptEntry; @@ -54,6 +55,22 @@ fn ignores_empty_handoff_request_input_transcript() { assert_eq!(realtime_text_from_handoff_request(&handoff), None); } +#[test] +fn wraps_realtime_delegation_input() { + assert_eq!( + wrap_realtime_delegation_input("hello"), + "\n hello\n" + ); +} + +#[test] +fn wraps_realtime_delegation_input_with_xml_escaping() { + assert_eq!( + wrap_realtime_delegation_input("use a < b && c > d"), + "\n use a < b && c > d\n" + ); +} + #[tokio::test] async fn clears_active_handoff_explicitly() { let (tx, _rx) = bounded(1); diff --git a/codex-rs/core/src/realtime_prompt.rs b/codex-rs/core/src/realtime_prompt.rs index 27005c630d..eb429bf566 100644 --- a/codex-rs/core/src/realtime_prompt.rs +++ b/codex-rs/core/src/realtime_prompt.rs @@ -74,7 +74,8 @@ mod tests { let prompt = prepare_realtime_backend_prompt(/*prompt*/ None, /*config_prompt*/ None); - assert!(prompt.starts_with("You are Codex, an OpenAI Coding Agent")); + assert!(prompt.starts_with("## Identity, tone, and role")); + assert!(prompt.contains("You are Codex, an OpenAI general-purpose agentic assistant")); assert!(prompt.contains("The user's name is ")); assert!(!prompt.contains("{{ user_first_name }}")); } diff --git a/codex-rs/core/src/review_prompts.rs b/codex-rs/core/src/review_prompts.rs index 988ceff821..12a5eb4a52 100644 --- a/codex-rs/core/src/review_prompts.rs +++ b/codex-rs/core/src/review_prompts.rs @@ -1,8 +1,8 @@ use codex_git_utils::merge_base_with_head; use codex_protocol::protocol::ReviewRequest; use codex_protocol::protocol::ReviewTarget; +use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_template::Template; -use std::path::Path; use std::sync::LazyLock; #[derive(Clone, Debug, PartialEq)] @@ -38,7 +38,7 @@ static COMMIT_PROMPT_TEMPLATE: LazyLock