Compare commits

..

6 Commits

Author SHA1 Message Date
Charles Cunningham
649a456fb0 Fix test 2026-01-16 10:17:15 -08:00
Charles Cunningham
70424769d1 Reduce diff 2026-01-16 10:17:15 -08:00
Charles Cunningham
a0e778795c Remove diff 2026-01-16 10:17:15 -08:00
Charles Cunningham
ef828ac65a Small tweaks 2026-01-16 10:17:15 -08:00
Charles Cunningham
2821aef611 Remove unnecessary diff 2026-01-16 10:17:15 -08:00
Charles Cunningham
e50f924e4f Wire text elements through TUI and TUI2 2026-01-16 10:17:14 -08:00
1206 changed files with 84603 additions and 37169 deletions

View File

@@ -4,7 +4,6 @@ common --repo_env=BAZEL_NO_APPLE_CPP_TOOLCHAIN=1
common --disk_cache=~/.cache/bazel-disk-cache
common --repo_contents_cache=~/.cache/bazel-repo-contents-cache
common --repository_cache=~/.cache/bazel-repo-cache
startup --experimental_remote_repo_contents_cache
common --experimental_platform_in_output_dir

View File

@@ -1 +0,0 @@
9.0.0

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm

View File

@@ -1,163 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
: "${TARGET:?TARGET environment variable is required}"
: "${GITHUB_ENV:?GITHUB_ENV environment variable is required}"
apt_update_args=()
if [[ -n "${APT_UPDATE_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_update_args=(${APT_UPDATE_ARGS})
fi
apt_install_args=()
if [[ -n "${APT_INSTALL_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_install_args=(${APT_INSTALL_ARGS})
fi
sudo apt-get update "${apt_update_args[@]}"
sudo apt-get install -y "${apt_install_args[@]}" musl-tools pkg-config g++ clang libc++-dev libc++abi-dev lld
case "${TARGET}" in
x86_64-unknown-linux-musl)
arch="x86_64"
;;
aarch64-unknown-linux-musl)
arch="aarch64"
;;
*)
echo "Unexpected musl target: ${TARGET}" >&2
exit 1
;;
esac
# Use the musl toolchain as the Rust linker to avoid Zig injecting its own CRT.
if command -v "${arch}-linux-musl-gcc" >/dev/null; then
musl_linker="$(command -v "${arch}-linux-musl-gcc")"
elif command -v musl-gcc >/dev/null; then
musl_linker="$(command -v musl-gcc)"
else
echo "musl gcc not found after install; arch=${arch}" >&2
exit 1
fi
zig_target="${TARGET/-unknown-linux-musl/-linux-musl}"
runner_temp="${RUNNER_TEMP:-/tmp}"
tool_root="${runner_temp}/codex-musl-tools-${TARGET}"
mkdir -p "${tool_root}"
sysroot=""
if command -v zig >/dev/null; then
zig_bin="$(command -v zig)"
cc="${tool_root}/zigcc"
cxx="${tool_root}/zigcxx"
cat >"${cc}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
# Drop any explicit --target/-target flags. Zig expects -target and
# rejects Rust triples like *-unknown-linux-musl.
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" cc -target "${zig_target}" "\${args[@]}"
EOF
cat >"${cxx}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" c++ -target "${zig_target}" "\${args[@]}"
EOF
chmod +x "${cc}" "${cxx}"
sysroot="$("${zig_bin}" cc -target "${zig_target}" -print-sysroot 2>/dev/null || true)"
else
cc="${musl_linker}"
if command -v "${arch}-linux-musl-g++" >/dev/null; then
cxx="$(command -v "${arch}-linux-musl-g++")"
elif command -v musl-g++ >/dev/null; then
cxx="$(command -v musl-g++)"
else
cxx="${cc}"
fi
fi
if [[ -n "${sysroot}" && "${sysroot}" != "/" ]]; then
echo "BORING_BSSL_SYSROOT=${sysroot}" >> "$GITHUB_ENV"
boring_sysroot_var="BORING_BSSL_SYSROOT_${TARGET}"
boring_sysroot_var="${boring_sysroot_var//-/_}"
echo "${boring_sysroot_var}=${sysroot}" >> "$GITHUB_ENV"
fi
cflags="-pthread"
cxxflags="-pthread"
if [[ "${TARGET}" == "aarch64-unknown-linux-musl" ]]; then
# BoringSSL enables -Wframe-larger-than=25344 under clang and treats warnings as errors.
cflags="${cflags} -Wno-error=frame-larger-than"
cxxflags="${cxxflags} -Wno-error=frame-larger-than"
fi
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
echo "CC=${cc}" >> "$GITHUB_ENV"
echo "TARGET_CC=${cc}" >> "$GITHUB_ENV"
target_cc_var="CC_${TARGET}"
target_cc_var="${target_cc_var//-/_}"
echo "${target_cc_var}=${cc}" >> "$GITHUB_ENV"
echo "CXX=${cxx}" >> "$GITHUB_ENV"
echo "TARGET_CXX=${cxx}" >> "$GITHUB_ENV"
target_cxx_var="CXX_${TARGET}"
target_cxx_var="${target_cxx_var//-/_}"
echo "${target_cxx_var}=${cxx}" >> "$GITHUB_ENV"
cargo_linker_var="CARGO_TARGET_${TARGET^^}_LINKER"
cargo_linker_var="${cargo_linker_var//-/_}"
echo "${cargo_linker_var}=${musl_linker}" >> "$GITHUB_ENV"
echo "CMAKE_C_COMPILER=${cc}" >> "$GITHUB_ENV"
echo "CMAKE_CXX_COMPILER=${cxx}" >> "$GITHUB_ENV"
echo "CMAKE_ARGS=-DCMAKE_HAVE_THREADS_LIBRARY=1 -DCMAKE_USE_PTHREADS_INIT=1 -DCMAKE_THREAD_LIBS_INIT=-pthread -DTHREADS_PREFER_PTHREAD_FLAG=ON" >> "$GITHUB_ENV"

View File

@@ -81,7 +81,7 @@ jobs:
# previously built artifacts to minimize build time. The more precise you are with
# hashFiles sources the less work bazel will have to do.
# - name: Mount bazel caches
# uses: actions/cache@v5
# uses: actions/cache@v4
# with:
# path: |
# ~/.cache/bazel-repo-cache

View File

@@ -59,7 +59,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
components: rustfmt
- name: cargo fmt
@@ -77,7 +77,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-shear
@@ -177,7 +177,7 @@ jobs:
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
components: clippy
@@ -261,21 +261,15 @@ jobs:
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
TARGET: ${{ matrix.target }}
APT_UPDATE_ARGS: -o Acquire::Retries=3
APT_INSTALL_ARGS: --no-install-recommends
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
run: |
set -euo pipefail
sudo apt-get -y update -o Acquire::Retries=3
sudo apt-get -y install --no-install-recommends musl-tools pkg-config
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
@@ -422,7 +416,7 @@ jobs:
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}

View File

@@ -20,7 +20,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- name: Validate tag matches Cargo.toml version
shell: bash
@@ -46,15 +45,6 @@ jobs:
echo "✅ Tag and Cargo.toml agree (${tag_ver})"
echo "::endgroup::"
- name: Verify config schema fixture
shell: bash
working-directory: codex-rs
run: |
set -euo pipefail
echo "If this fails, run: just write-config-schema to overwrite fixture with intentional changes."
cargo run -p codex-core --bin codex-write-config-schema
git diff --exit-code core/config.schema.json
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
@@ -90,7 +80,7 @@ jobs:
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
@@ -104,17 +94,11 @@ jobs:
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
run: |
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Cargo build
shell: bash
@@ -252,7 +236,6 @@ jobs:
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
repo_root=$PWD
# We want to ship the raw Windows executables in the GitHub Release
# in addition to the compressed archives. Keep the originals for
@@ -292,30 +275,7 @@ jobs:
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
if [[ "$base" == "codex-${{ matrix.target }}.exe" ]]; then
# Bundle the sandbox helper binaries into the main codex zip so
# WinGet installs include the required helpers next to codex.exe.
# Fall back to the single-binary zip if the helpers are missing
# to avoid breaking releases.
bundle_dir="$(mktemp -d)"
runner_src="$dest/codex-command-runner-${{ matrix.target }}.exe"
setup_src="$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
if [[ -f "$runner_src" && -f "$setup_src" ]]; then
cp "$dest/$base" "$bundle_dir/$base"
cp "$runner_src" "$bundle_dir/codex-command-runner.exe"
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
# Use an absolute path so bundle zips land in the real dist
# dir even when 7z runs from a temp directory.
(cd "$bundle_dir" && 7z a "$repo_root/$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
rm -rf "$bundle_dir"
else
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Also create .zst (existing behaviour) *and* remove the original
@@ -398,10 +358,6 @@ jobs:
ls -R dist/
- name: Add config schema release asset
run: |
cp codex-rs/core/config.schema.json dist/config-schema.json
- name: Define release name
id: release_name
run: |
@@ -472,19 +428,6 @@ jobs:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
- name: Trigger developers.openai.com deploy
# Only trigger the deploy if the release is not a pre-release.
# The deploy is used to update the developers.openai.com website with the new config schema json file.
if: ${{ !contains(steps.release_name.outputs.name, '-') }}
continue-on-error: true
env:
DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL: ${{ secrets.DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL }}
run: |
if ! curl -sS -f -o /dev/null -X POST "$DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL"; then
echo "::warning title=developers.openai.com deploy hook failed::Vercel deploy hook POST failed for ${GITHUB_REF_NAME}"
exit 1
fi
# Publish to npm using OIDC authentication.
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
# npm docs: https://docs.npmjs.com/trusted-publishers

View File

@@ -24,7 +24,7 @@ jobs:
node-version: 22
cache: pnpm
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
- name: build codex
run: cargo build --bin codex

View File

@@ -93,21 +93,15 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.install_musl }}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.install_musl }}
name: Install musl build dependencies
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
run: |
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Build exec server binaries
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
@@ -204,7 +198,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
@@ -246,7 +240,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b

View File

@@ -11,17 +11,15 @@ In the codex-rs folder where the rust code lives:
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- When possible, make `match` statements exhaustive and avoid wildcard arms.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
Before finalizing a large change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
## TUI style conventions

View File

@@ -2,9 +2,13 @@ bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "toolchains_llvm_bootstrapped", version = "0.3.1")
archive_override(
module_name = "toolchains_llvm_bootstrapped",
integrity = "sha256-4/2h4tYSUSptxFVI9G50yJxWGOwHSeTeOGBlaLQBV8g=",
strip_prefix = "toolchains_llvm_bootstrapped-d20baf67e04d8e2887e3779022890d1dc5e6b948",
urls = ["https://github.com/cerisier/toolchains_llvm_bootstrapped/archive/d20baf67e04d8e2887e3779022890d1dc5e6b948.tar.gz"],
integrity = "sha256-9ks21bgEqbQWmwUIvqeLA64+Jk6o4ZVjC8KxjVa2Vw8=",
strip_prefix = "toolchains_llvm_bootstrapped-e3775e66a7b6d287c705ca0cd24497ef4a77c503",
urls = ["https://github.com/cerisier/toolchains_llvm_bootstrapped/archive/e3775e66a7b6d287c705ca0cd24497ef4a77c503/master.tar.gz"],
patch_strip = 1,
patches = [
"//patches:llvm_toolchain_archive_params.patch",
],
)
osx = use_extension("@toolchains_llvm_bootstrapped//toolchain/extension:osx.bzl", "osx")
@@ -90,7 +94,7 @@ crate.annotation(
crate = "windows-link",
patch_args = ["-p1"],
patches = [
"//patches:windows-link.patch",
"//patches:windows-link.patch"
],
)

362
MODULE.bazel.lock generated
View File

@@ -1,5 +1,5 @@
{
"lockFileVersion": 26,
"lockFileVersion": 24,
"registryFileHashes": {
"https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497",
"https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2",
@@ -9,19 +9,11 @@
"https://bcr.bazel.build/modules/abseil-cpp/20230802.0/MODULE.bazel": "d253ae36a8bd9ee3c5955384096ccb6baf16a1b1e93e858370da0a3b94f77c16",
"https://bcr.bazel.build/modules/abseil-cpp/20230802.1/MODULE.bazel": "fa92e2eb41a04df73cdabeec37107316f7e5272650f81d6cc096418fe647b915",
"https://bcr.bazel.build/modules/abseil-cpp/20240116.1/MODULE.bazel": "37bcdb4440fbb61df6a1c296ae01b327f19e9bb521f9b8e26ec854b6f97309ed",
"https://bcr.bazel.build/modules/abseil-cpp/20240116.2/MODULE.bazel": "73939767a4686cd9a520d16af5ab440071ed75cec1a876bf2fcfaf1f71987a16",
"https://bcr.bazel.build/modules/abseil-cpp/20250127.1/MODULE.bazel": "c4a89e7ceb9bf1e25cf84a9f830ff6b817b72874088bf5141b314726e46a57c1",
"https://bcr.bazel.build/modules/abseil-cpp/20250512.1/MODULE.bazel": "d209fdb6f36ffaf61c509fcc81b19e81b411a999a934a032e10cd009a0226215",
"https://bcr.bazel.build/modules/abseil-cpp/20250814.1/MODULE.bazel": "51f2312901470cdab0dbdf3b88c40cd21c62a7ed58a3de45b365ddc5b11bcab2",
"https://bcr.bazel.build/modules/abseil-cpp/20250814.1/source.json": "cea3901d7e299da7320700abbaafe57a65d039f10d0d7ea601c4a66938ea4b0c",
"https://bcr.bazel.build/modules/apple_support/1.11.1/MODULE.bazel": "1843d7cd8a58369a444fc6000e7304425fba600ff641592161d9f15b179fb896",
"https://bcr.bazel.build/modules/apple_support/1.15.1/MODULE.bazel": "a0556fefca0b1bb2de8567b8827518f94db6a6e7e7d632b4c48dc5f865bc7c85",
"https://bcr.bazel.build/modules/apple_support/1.21.0/MODULE.bazel": "ac1824ed5edf17dee2fdd4927ada30c9f8c3b520be1b5fd02a5da15bc10bff3e",
"https://bcr.bazel.build/modules/apple_support/1.21.1/MODULE.bazel": "5809fa3efab15d1f3c3c635af6974044bac8a4919c62238cce06acee8a8c11f1",
"https://bcr.bazel.build/modules/abseil-cpp/20240116.1/source.json": "9be551b8d4e3ef76875c0d744b5d6a504a27e3ae67bc6b28f46415fd2d2957da",
"https://bcr.bazel.build/modules/apple_support/1.23.0/MODULE.bazel": "317d47e3f65b580e7fb4221c160797fda48e32f07d2dfff63d754ef2316dcd25",
"https://bcr.bazel.build/modules/apple_support/1.23.1/MODULE.bazel": "53763fed456a968cf919b3240427cf3a9d5481ec5466abc9d5dc51bc70087442",
"https://bcr.bazel.build/modules/apple_support/1.24.1/MODULE.bazel": "f46e8ddad60aef170ee92b2f3d00ef66c147ceafea68b6877cb45bd91737f5f8",
"https://bcr.bazel.build/modules/apple_support/1.24.2/MODULE.bazel": "0e62471818affb9f0b26f128831d5c40b074d32e6dda5a0d3852847215a41ca4",
"https://bcr.bazel.build/modules/apple_support/1.24.2/source.json": "2c22c9827093250406c5568da6c54e6fdf0ef06238def3d99c71b12feb057a8d",
"https://bcr.bazel.build/modules/apple_support/1.24.1/source.json": "cf725267cbacc5f028ef13bb77e7f2c2e0066923a4dab1025e4a0511b1ed258a",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/MODULE.bazel": "2b31ffcc9bdc8295b2167e07a757dbbc9ac8906e7028e5170a3708cecaac119f",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.19.3/MODULE.bazel": "253d739ba126f62a5767d832765b12b59e9f8d2bc88cc1572f4a73e46eb298ca",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.19.3/source.json": "ffab9254c65ba945f8369297ad97ca0dec213d3adc6e07877e23a48624a8b456",
@@ -29,21 +21,16 @@
"https://bcr.bazel.build/modules/aspect_tools_telemetry/0.3.2/MODULE.bazel": "598e7fe3b54f5fa64fdbeead1027653963a359cc23561d43680006f3b463d5a4",
"https://bcr.bazel.build/modules/aspect_tools_telemetry/0.3.2/source.json": "c6f5c39e6f32eb395f8fdaea63031a233bbe96d49a3bfb9f75f6fce9b74bec6c",
"https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd",
"https://bcr.bazel.build/modules/bazel_features/1.10.0/MODULE.bazel": "f75e8807570484a99be90abcd52b5e1f390362c258bcb73106f4544957a48101",
"https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8",
"https://bcr.bazel.build/modules/bazel_features/1.15.0/MODULE.bazel": "d38ff6e517149dc509406aca0db3ad1efdd890a85e049585b7234d04238e2a4d",
"https://bcr.bazel.build/modules/bazel_features/1.17.0/MODULE.bazel": "039de32d21b816b47bd42c778e0454217e9c9caac4a3cf8e15c7231ee3ddee4d",
"https://bcr.bazel.build/modules/bazel_features/1.18.0/MODULE.bazel": "1be0ae2557ab3a72a57aeb31b29be347bcdc5d2b1eb1e70f39e3851a7e97041a",
"https://bcr.bazel.build/modules/bazel_features/1.19.0/MODULE.bazel": "59adcdf28230d220f0067b1f435b8537dd033bfff8db21335ef9217919c7fb58",
"https://bcr.bazel.build/modules/bazel_features/1.21.0/MODULE.bazel": "675642261665d8eea09989aa3b8afb5c37627f1be178382c320d1b46afba5e3b",
"https://bcr.bazel.build/modules/bazel_features/1.23.0/MODULE.bazel": "fd1ac84bc4e97a5a0816b7fd7d4d4f6d837b0047cf4cbd81652d616af3a6591a",
"https://bcr.bazel.build/modules/bazel_features/1.24.0/MODULE.bazel": "4796b4c25b47053e9bbffa792b3792d07e228ff66cd0405faef56a978708acd4",
"https://bcr.bazel.build/modules/bazel_features/1.27.0/MODULE.bazel": "621eeee06c4458a9121d1f104efb80f39d34deff4984e778359c60eaf1a8cb65",
"https://bcr.bazel.build/modules/bazel_features/1.28.0/MODULE.bazel": "4b4200e6cbf8fa335b2c3f43e1d6ef3e240319c33d43d60cc0fbd4b87ece299d",
"https://bcr.bazel.build/modules/bazel_features/1.3.0/MODULE.bazel": "cdcafe83ec318cda34e02948e81d790aab8df7a929cec6f6969f13a489ccecd9",
"https://bcr.bazel.build/modules/bazel_features/1.30.0/MODULE.bazel": "a14b62d05969a293b80257e72e597c2da7f717e1e69fa8b339703ed6731bec87",
"https://bcr.bazel.build/modules/bazel_features/1.32.0/MODULE.bazel": "095d67022a58cb20f7e20e1aefecfa65257a222c18a938e2914fd257b5f1ccdc",
"https://bcr.bazel.build/modules/bazel_features/1.33.0/MODULE.bazel": "8b8dc9d2a4c88609409c3191165bccec0e4cb044cd7a72ccbe826583303459f6",
"https://bcr.bazel.build/modules/bazel_features/1.34.0/MODULE.bazel": "e8475ad7c8965542e0c7aac8af68eb48c4af904be3d614b6aa6274c092c2ea1e",
"https://bcr.bazel.build/modules/bazel_features/1.34.0/source.json": "dfa5c4b01110313153b484a735764d247fee5624bbab63d25289e43b151a657a",
"https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7",
@@ -65,25 +52,20 @@
"https://bcr.bazel.build/modules/bazel_skylib/1.8.1/MODULE.bazel": "88ade7293becda963e0e3ea33e7d54d3425127e0a326e0d17da085a5f1f03ff6",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.2/MODULE.bazel": "69ad6927098316848b34a9142bcc975e018ba27f08c4ff403f50c1b6e646ca67",
"https://bcr.bazel.build/modules/bazel_skylib/1.8.2/source.json": "34a3c8bcf233b835eb74be9d628899bb32999d3e0eadef1947a0a562a2b16ffb",
"https://bcr.bazel.build/modules/buildozer/8.2.1/MODULE.bazel": "61e9433c574c2bd9519cad7fa66b9c1d2b8e8d5f3ae5d6528a2c2d26e68d874d",
"https://bcr.bazel.build/modules/buildozer/8.2.1/source.json": "7c33f6a26ee0216f85544b4bca5e9044579e0219b6898dd653f5fb449cf2e484",
"https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
"https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
"https://bcr.bazel.build/modules/gawk/5.3.2.bcr.1/MODULE.bazel": "cdf8cbe5ee750db04b78878c9633cc76e80dcf4416cbe982ac3a9222f80713c8",
"https://bcr.bazel.build/modules/gawk/5.3.2.bcr.1/source.json": "fa7b512dfcb5eafd90ce3959cf42a2a6fe96144ebbb4b3b3928054895f2afac2",
"https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb",
"https://bcr.bazel.build/modules/googletest/1.11.0/MODULE.bazel": "3a83f095183f66345ca86aa13c58b59f9f94a2f81999c093d4eeaa2d262d12f4",
"https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/MODULE.bazel": "22c31a561553727960057361aa33bf20fb2e98584bc4fec007906e27053f80c6",
"https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/source.json": "41e9e129f80d8c8bf103a7acc337b76e54fad1214ac0a7084bf24f4cd924b8b4",
"https://bcr.bazel.build/modules/googletest/1.14.0/MODULE.bazel": "cfbcbf3e6eac06ef9d85900f64424708cc08687d1b527f0ef65aa7517af8118f",
"https://bcr.bazel.build/modules/googletest/1.15.2/MODULE.bazel": "6de1edc1d26cafb0ea1a6ab3f4d4192d91a312fd2d360b63adaa213cd00b2108",
"https://bcr.bazel.build/modules/googletest/1.17.0/MODULE.bazel": "dbec758171594a705933a29fcf69293d2468c49ec1f2ebca65c36f504d72df46",
"https://bcr.bazel.build/modules/googletest/1.17.0/source.json": "38e4454b25fc30f15439c0378e57909ab1fd0a443158aa35aec685da727cd713",
"https://bcr.bazel.build/modules/jq.bzl/0.1.0/MODULE.bazel": "2ce69b1af49952cd4121a9c3055faa679e748ce774c7f1fda9657f936cae902f",
"https://bcr.bazel.build/modules/jq.bzl/0.1.0/source.json": "746bf13cac0860f091df5e4911d0c593971cd8796b5ad4e809b2f8e133eee3d5",
"https://bcr.bazel.build/modules/jsoncpp/1.9.5/MODULE.bazel": "31271aedc59e815656f5736f282bb7509a97c7ecb43e927ac1a37966e0578075",
"https://bcr.bazel.build/modules/jsoncpp/1.9.6/MODULE.bazel": "2f8d20d3b7d54143213c4dfc3d98225c42de7d666011528dc8fe91591e2e17b0",
"https://bcr.bazel.build/modules/jsoncpp/1.9.6/source.json": "a04756d367a2126c3541682864ecec52f92cdee80a35735a3cb249ce015ca000",
"https://bcr.bazel.build/modules/jsoncpp/1.9.5/source.json": "4108ee5085dd2885a341c7fab149429db457b3169b86eb081fa245eadf69169d",
"https://bcr.bazel.build/modules/libpfm/4.11.0/MODULE.bazel": "45061ff025b301940f1e30d2c16bea596c25b176c8b6b3087e92615adbd52902",
"https://bcr.bazel.build/modules/nlohmann_json/3.6.1/MODULE.bazel": "6f7b417dcc794d9add9e556673ad25cb3ba835224290f4f848f8e2db1e1fca74",
"https://bcr.bazel.build/modules/nlohmann_json/3.6.1/source.json": "f448c6e8963fdfa7eb831457df83ad63d3d6355018f6574fb017e8169deb43a9",
"https://bcr.bazel.build/modules/openssl/3.5.4.bcr.0/MODULE.bazel": "0f6b8f20b192b9ff0781406256150bcd46f19e66d807dcb0c540548439d6fc35",
"https://bcr.bazel.build/modules/openssl/3.5.4.bcr.0/source.json": "543ed7627cc18e6460b9c1ae4a1b6b1debc5a5e0aca878b00f7531c7186b73da",
"https://bcr.bazel.build/modules/package_metadata/0.0.2/MODULE.bazel": "fb8d25550742674d63d7b250063d4580ca530499f045d70748b1b142081ebb92",
@@ -101,28 +83,21 @@
"https://bcr.bazel.build/modules/platforms/1.0.0/source.json": "f4ff1fd412e0246fd38c82328eb209130ead81d62dcd5a9e40910f867f733d96",
"https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7",
"https://bcr.bazel.build/modules/protobuf/27.0/MODULE.bazel": "7873b60be88844a0a1d8f80b9d5d20cfbd8495a689b8763e76c6372998d3f64c",
"https://bcr.bazel.build/modules/protobuf/27.1/MODULE.bazel": "703a7b614728bb06647f965264967a8ef1c39e09e8f167b3ca0bb1fd80449c0d",
"https://bcr.bazel.build/modules/protobuf/29.0-rc2/MODULE.bazel": "6241d35983510143049943fc0d57937937122baf1b287862f9dc8590fc4c37df",
"https://bcr.bazel.build/modules/protobuf/29.0-rc3/MODULE.bazel": "33c2dfa286578573afc55a7acaea3cada4122b9631007c594bf0729f41c8de92",
"https://bcr.bazel.build/modules/protobuf/29.1/MODULE.bazel": "557c3457560ff49e122ed76c0bc3397a64af9574691cb8201b4e46d4ab2ecb95",
"https://bcr.bazel.build/modules/protobuf/29.0/MODULE.bazel": "319dc8bf4c679ff87e71b1ccfb5a6e90a6dbc4693501d471f48662ac46d04e4e",
"https://bcr.bazel.build/modules/protobuf/29.0/source.json": "b857f93c796750eef95f0d61ee378f3420d00ee1dd38627b27193aa482f4f981",
"https://bcr.bazel.build/modules/protobuf/3.19.0/MODULE.bazel": "6b5fbb433f760a99a22b18b6850ed5784ef0e9928a72668b66e4d7ccd47db9b0",
"https://bcr.bazel.build/modules/protobuf/32.1/MODULE.bazel": "89cd2866a9cb07fee9ff74c41ceace11554f32e0d849de4e23ac55515cfada4d",
"https://bcr.bazel.build/modules/protobuf/33.4/MODULE.bazel": "114775b816b38b6d0ca620450d6b02550c60ceedfdc8d9a229833b34a223dc42",
"https://bcr.bazel.build/modules/protobuf/33.4/source.json": "555f8686b4c7d6b5ba731fbea13bf656b4bfd9a7ff629c1d9d3f6e1d6155de79",
"https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/MODULE.bazel": "88af1c246226d87e65be78ed49ecd1e6f5e98648558c14ce99176da041dc378e",
"https://bcr.bazel.build/modules/pybind11_bazel/2.12.0/MODULE.bazel": "e6f4c20442eaa7c90d7190d8dc539d0ab422f95c65a57cc59562170c58ae3d34",
"https://bcr.bazel.build/modules/pybind11_bazel/2.12.0/source.json": "6900fdc8a9e95866b8c0d4ad4aba4d4236317b5c1cd04c502df3f0d33afed680",
"https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/source.json": "be4789e951dd5301282729fe3d4938995dc4c1a81c2ff150afc9f1b0504c6022",
"https://bcr.bazel.build/modules/re2/2023-09-01/MODULE.bazel": "cb3d511531b16cfc78a225a9e2136007a48cf8a677e4264baeab57fe78a80206",
"https://bcr.bazel.build/modules/re2/2024-07-02.bcr.1/MODULE.bazel": "b4963dda9b31080be1905ef085ecd7dd6cd47c05c79b9cdf83ade83ab2ab271a",
"https://bcr.bazel.build/modules/re2/2024-07-02.bcr.1/source.json": "2ff292be6ef3340325ce8a045ecc326e92cbfab47c7cbab4bd85d28971b97ac4",
"https://bcr.bazel.build/modules/re2/2024-07-02/MODULE.bazel": "0eadc4395959969297cbcf31a249ff457f2f1d456228c67719480205aa306daa",
"https://bcr.bazel.build/modules/re2/2023-09-01/source.json": "e044ce89c2883cd957a2969a43e79f7752f9656f6b20050b62f90ede21ec6eb4",
"https://bcr.bazel.build/modules/rules_android/0.1.1/MODULE.bazel": "48809ab0091b07ad0182defb787c4c5328bd3a278938415c00a7b69b50c4d3a8",
"https://bcr.bazel.build/modules/rules_android/0.1.1/source.json": "e6986b41626ee10bdc864937ffb6d6bf275bb5b9c65120e6137d56e6331f089e",
"https://bcr.bazel.build/modules/rules_apple/3.16.0/MODULE.bazel": "0d1caf0b8375942ce98ea944be754a18874041e4e0459401d925577624d3a54a",
"https://bcr.bazel.build/modules/rules_apple/4.1.0/MODULE.bazel": "76e10fd4a48038d3fc7c5dc6e63b7063bbf5304a2e3bd42edda6ec660eebea68",
"https://bcr.bazel.build/modules/rules_apple/4.1.0/source.json": "8ee81e1708756f81b343a5eb2b2f0b953f1d25c4ab3d4a68dc02754872e80715",
"https://bcr.bazel.build/modules/rules_cc/0.0.1/MODULE.bazel": "cb2aa0747f84c6c3a78dad4e2049c154f08ab9d166b1273835a8174940365647",
"https://bcr.bazel.build/modules/rules_cc/0.0.10/MODULE.bazel": "ec1705118f7eaedd6e118508d3d26deba2a4e76476ada7e0e3965211be012002",
"https://bcr.bazel.build/modules/rules_cc/0.0.13/MODULE.bazel": "0e8529ed7b323dad0775ff924d2ae5af7640b23553dfcd4d34344c7e7a867191",
"https://bcr.bazel.build/modules/rules_cc/0.0.14/MODULE.bazel": "5e343a3aac88b8d7af3b1b6d2093b55c347b8eefc2e7d1442f7a02dc8fea48ac",
"https://bcr.bazel.build/modules/rules_cc/0.0.15/MODULE.bazel": "6704c35f7b4a72502ee81f61bf88706b54f06b3cbe5558ac17e2e14666cd5dcc",
"https://bcr.bazel.build/modules/rules_cc/0.0.16/MODULE.bazel": "7661303b8fc1b4d7f532e54e9d6565771fea666fbdf839e0a86affcd02defe87",
"https://bcr.bazel.build/modules/rules_cc/0.0.17/MODULE.bazel": "2ae1d8f4238ec67d7185d8861cb0a2cdf4bc608697c331b95bf990e69b62e64a",
@@ -131,37 +106,35 @@
"https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e",
"https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5",
"https://bcr.bazel.build/modules/rules_cc/0.1.1/MODULE.bazel": "2f0222a6f229f0bf44cd711dc13c858dad98c62d52bd51d8fc3a764a83125513",
"https://bcr.bazel.build/modules/rules_cc/0.1.2/MODULE.bazel": "557ddc3a96858ec0d465a87c0a931054d7dcfd6583af2c7ed3baf494407fd8d0",
"https://bcr.bazel.build/modules/rules_cc/0.1.5/MODULE.bazel": "88dfc9361e8b5ae1008ac38f7cdfd45ad738e4fa676a3ad67d19204f045a1fd8",
"https://bcr.bazel.build/modules/rules_cc/0.2.0/MODULE.bazel": "b5c17f90458caae90d2ccd114c81970062946f49f355610ed89bebf954f5783c",
"https://bcr.bazel.build/modules/rules_cc/0.2.13/MODULE.bazel": "eecdd666eda6be16a8d9dc15e44b5c75133405e820f620a234acc4b1fdc5aa37",
"https://bcr.bazel.build/modules/rules_cc/0.2.14/MODULE.bazel": "353c99ed148887ee89c54a17d4100ae7e7e436593d104b668476019023b58df8",
"https://bcr.bazel.build/modules/rules_cc/0.2.16/MODULE.bazel": "9242fa89f950c6ef7702801ab53922e99c69b02310c39fb6e62b2bd30df2a1d4",
"https://bcr.bazel.build/modules/rules_cc/0.2.16/source.json": "d03d5cde49376d87e14ec14b666c56075e5e3926930327fd5d0484a1ff2ac1cc",
"https://bcr.bazel.build/modules/rules_cc/0.2.4/MODULE.bazel": "1ff1223dfd24f3ecf8f028446d4a27608aa43c3f41e346d22838a4223980b8cc",
"https://bcr.bazel.build/modules/rules_cc/0.2.8/MODULE.bazel": "f1df20f0bf22c28192a794f29b501ee2018fa37a3862a1a2132ae2940a23a642",
"https://bcr.bazel.build/modules/rules_foreign_cc/0.9.0/MODULE.bazel": "c9e8c682bf75b0e7c704166d79b599f93b72cfca5ad7477df596947891feeef6",
"https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/MODULE.bazel": "40c97d1144356f52905566c55811f13b299453a14ac7769dfba2ac38192337a8",
"https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/source.json": "c8b1e2c717646f1702290959a3302a178fb639d987ab61d548105019f11e527e",
"https://bcr.bazel.build/modules/rules_java/4.0.0/MODULE.bazel": "5a78a7ae82cd1a33cef56dc578c7d2a46ed0dca12643ee45edbb8417899e6f74",
"https://bcr.bazel.build/modules/rules_java/5.3.5/MODULE.bazel": "a4ec4f2db570171e3e5eb753276ee4b389bae16b96207e9d3230895c99644b86",
"https://bcr.bazel.build/modules/rules_java/6.0.0/MODULE.bazel": "8a43b7df601a7ec1af61d79345c17b31ea1fedc6711fd4abfd013ea612978e39",
"https://bcr.bazel.build/modules/rules_java/6.3.0/MODULE.bazel": "a97c7678c19f236a956ad260d59c86e10a463badb7eb2eda787490f4c969b963",
"https://bcr.bazel.build/modules/rules_java/6.4.0/MODULE.bazel": "e986a9fe25aeaa84ac17ca093ef13a4637f6107375f64667a15999f77db6c8f6",
"https://bcr.bazel.build/modules/rules_java/6.5.2/MODULE.bazel": "1d440d262d0e08453fa0c4d8f699ba81609ed0e9a9a0f02cd10b3e7942e61e31",
"https://bcr.bazel.build/modules/rules_java/7.10.0/MODULE.bazel": "530c3beb3067e870561739f1144329a21c851ff771cd752a49e06e3dc9c2e71a",
"https://bcr.bazel.build/modules/rules_java/7.12.2/MODULE.bazel": "579c505165ee757a4280ef83cda0150eea193eed3bef50b1004ba88b99da6de6",
"https://bcr.bazel.build/modules/rules_java/7.2.0/MODULE.bazel": "06c0334c9be61e6cef2c8c84a7800cef502063269a5af25ceb100b192453d4ab",
"https://bcr.bazel.build/modules/rules_java/7.3.2/MODULE.bazel": "50dece891cfdf1741ea230d001aa9c14398062f2b7c066470accace78e412bc2",
"https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe",
"https://bcr.bazel.build/modules/rules_java/8.3.2/MODULE.bazel": "7336d5511ad5af0b8615fdc7477535a2e4e723a357b6713af439fe8cf0195017",
"https://bcr.bazel.build/modules/rules_java/8.5.1/MODULE.bazel": "d8a9e38cc5228881f7055a6079f6f7821a073df3744d441978e7a43e20226939",
"https://bcr.bazel.build/modules/rules_java/8.14.0/MODULE.bazel": "717717ed40cc69994596a45aec6ea78135ea434b8402fb91b009b9151dd65615",
"https://bcr.bazel.build/modules/rules_java/8.14.0/source.json": "8a88c4ca9e8759da53cddc88123880565c520503321e2566b4e33d0287a3d4bc",
"https://bcr.bazel.build/modules/rules_java/8.6.0/MODULE.bazel": "9c064c434606d75a086f15ade5edb514308cccd1544c2b2a89bbac4310e41c71",
"https://bcr.bazel.build/modules/rules_java/8.6.1/MODULE.bazel": "f4808e2ab5b0197f094cabce9f4b006a27766beb6a9975931da07099560ca9c2",
"https://bcr.bazel.build/modules/rules_java/9.0.3/MODULE.bazel": "1f98ed015f7e744a745e0df6e898a7c5e83562d6b759dfd475c76456dda5ccea",
"https://bcr.bazel.build/modules/rules_java/9.0.3/source.json": "b038c0c07e12e658135bbc32cc1a2ded6e33785105c9d41958014c592de4593e",
"https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7",
"https://bcr.bazel.build/modules/rules_jvm_external/5.1/MODULE.bazel": "33f6f999e03183f7d088c9be518a63467dfd0be94a11d0055fe2d210f89aa909",
"https://bcr.bazel.build/modules/rules_jvm_external/5.2/MODULE.bazel": "d9351ba35217ad0de03816ef3ed63f89d411349353077348a45348b096615036",
"https://bcr.bazel.build/modules/rules_jvm_external/5.3/MODULE.bazel": "bf93870767689637164657731849fb887ad086739bd5d360d90007a581d5527d",
"https://bcr.bazel.build/modules/rules_jvm_external/6.1/MODULE.bazel": "75b5fec090dbd46cf9b7d8ea08cf84a0472d92ba3585b476f44c326eda8059c4",
"https://bcr.bazel.build/modules/rules_jvm_external/6.3/MODULE.bazel": "c998e060b85f71e00de5ec552019347c8bca255062c990ac02d051bb80a38df0",
"https://bcr.bazel.build/modules/rules_jvm_external/6.7/MODULE.bazel": "e717beabc4d091ecb2c803c2d341b88590e9116b8bf7947915eeb33aab4f96dd",
"https://bcr.bazel.build/modules/rules_jvm_external/6.7/source.json": "5426f412d0a7fc6b611643376c7e4a82dec991491b9ce5cb1cfdd25fe2e92be4",
"https://bcr.bazel.build/modules/rules_jvm_external/6.3/source.json": "6f5f5a5a4419ae4e37c35a5bb0a6ae657ed40b7abc5a5189111b47fcebe43197",
"https://bcr.bazel.build/modules/rules_kotlin/1.9.0/MODULE.bazel": "ef85697305025e5a61f395d4eaede272a5393cee479ace6686dba707de804d59",
"https://bcr.bazel.build/modules/rules_kotlin/1.9.6/MODULE.bazel": "d269a01a18ee74d0335450b10f62c9ed81f2321d7958a2934e44272fe82dcef3",
"https://bcr.bazel.build/modules/rules_kotlin/1.9.6/source.json": "2faa4794364282db7c06600b7e5e34867a564ae91bda7cae7c29c64e9466b7d5",
"https://bcr.bazel.build/modules/rules_license/0.0.3/MODULE.bazel": "627e9ab0247f7d1e05736b59dbb1b6871373de5ad31c3011880b4133cafd4bd0",
@@ -177,47 +150,34 @@
"https://bcr.bazel.build/modules/rules_platform/0.1.0/source.json": "98becf9569572719b65f639133510633eb3527fb37d347d7ef08447f3ebcf1c9",
"https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06",
"https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7",
"https://bcr.bazel.build/modules/rules_proto/6.0.0-rc1/MODULE.bazel": "1e5b502e2e1a9e825eef74476a5a1ee524a92297085015a052510b09a1a09483",
"https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73",
"https://bcr.bazel.build/modules/rules_proto/7.1.0/MODULE.bazel": "002d62d9108f75bb807cd56245d45648f38275cb3a99dcd45dfb864c5d74cb96",
"https://bcr.bazel.build/modules/rules_proto/7.1.0/source.json": "39f89066c12c24097854e8f57ab8558929f9c8d474d34b2c00ac04630ad8940e",
"https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2",
"https://bcr.bazel.build/modules/rules_proto/7.0.2/source.json": "1e5e7260ae32ef4f2b52fd1d0de8d03b606a44c91b694d2f1afb1d3b28a48ce1",
"https://bcr.bazel.build/modules/rules_python/0.10.2/MODULE.bazel": "cc82bc96f2997baa545ab3ce73f196d040ffb8756fd2d66125a530031cd90e5f",
"https://bcr.bazel.build/modules/rules_python/0.23.1/MODULE.bazel": "49ffccf0511cb8414de28321f5fcf2a31312b47c40cc21577144b7447f2bf300",
"https://bcr.bazel.build/modules/rules_python/0.25.0/MODULE.bazel": "72f1506841c920a1afec76975b35312410eea3aa7b63267436bfb1dd91d2d382",
"https://bcr.bazel.build/modules/rules_python/0.28.0/MODULE.bazel": "cba2573d870babc976664a912539b320cbaa7114cd3e8f053c720171cde331ed",
"https://bcr.bazel.build/modules/rules_python/0.31.0/MODULE.bazel": "93a43dc47ee570e6ec9f5779b2e64c1476a6ce921c48cc9a1678a91dd5f8fd58",
"https://bcr.bazel.build/modules/rules_python/0.33.2/MODULE.bazel": "3e036c4ad8d804a4dad897d333d8dce200d943df4827cb849840055be8d2e937",
"https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel": "9208ee05fd48bf09ac60ed269791cf17fb343db56c8226a720fbb1cdf467166c",
"https://bcr.bazel.build/modules/rules_python/1.3.0/MODULE.bazel": "8361d57eafb67c09b75bf4bbe6be360e1b8f4f18118ab48037f2bd50aa2ccb13",
"https://bcr.bazel.build/modules/rules_python/1.4.1/MODULE.bazel": "8991ad45bdc25018301d6b7e1d3626afc3c8af8aaf4bc04f23d0b99c938b73a6",
"https://bcr.bazel.build/modules/rules_python/1.6.0/MODULE.bazel": "7e04ad8f8d5bea40451cf80b1bd8262552aa73f841415d20db96b7241bd027d8",
"https://bcr.bazel.build/modules/rules_python/1.7.0/MODULE.bazel": "d01f995ecd137abf30238ad9ce97f8fc3ac57289c8b24bd0bf53324d937a14f8",
"https://bcr.bazel.build/modules/rules_python/1.7.0/source.json": "028a084b65dcf8f4dc4f82f8778dbe65df133f234b316828a82e060d81bdce32",
"https://bcr.bazel.build/modules/rules_python/0.40.0/MODULE.bazel": "9d1a3cd88ed7d8e39583d9ffe56ae8a244f67783ae89b60caafc9f5cf318ada7",
"https://bcr.bazel.build/modules/rules_python/0.40.0/source.json": "939d4bd2e3110f27bfb360292986bb79fd8dcefb874358ccd6cdaa7bda029320",
"https://bcr.bazel.build/modules/rules_rs/0.0.23/MODULE.bazel": "2e7ae2044105b1873a451c628713329d6746493f677b371f9d8063fd06a00937",
"https://bcr.bazel.build/modules/rules_rs/0.0.23/source.json": "1149e7f599f2e41e9e9de457f9c4deb3d219a4fec967cea30557d02ede88037e",
"https://bcr.bazel.build/modules/rules_rust/0.66.0/MODULE.bazel": "86ef763a582f4739a27029bdcc6c562258ed0ea6f8d58294b049e215ceb251b3",
"https://bcr.bazel.build/modules/rules_rust/0.68.1/MODULE.bazel": "8d3332ef4079673385eb81f8bd68b012decc04ac00c9d5a01a40eff90301732c",
"https://bcr.bazel.build/modules/rules_rust/0.68.1/source.json": "3378e746f81b62457fdfd37391244fa8ff075ba85c05931ee4f3a20ac1efe963",
"https://bcr.bazel.build/modules/rules_shell/0.2.0/MODULE.bazel": "fda8a652ab3c7d8fee214de05e7a9916d8b28082234e8d2c0094505c5268ed3c",
"https://bcr.bazel.build/modules/rules_shell/0.3.0/MODULE.bazel": "de4402cd12f4cc8fda2354fce179fdb068c0b9ca1ec2d2b17b3e21b24c1a937b",
"https://bcr.bazel.build/modules/rules_shell/0.4.0/MODULE.bazel": "0f8f11bb3cd11755f0b48c1de0bbcf62b4b34421023aa41a2fc74ef68d9584f0",
"https://bcr.bazel.build/modules/rules_shell/0.4.1/MODULE.bazel": "00e501db01bbf4e3e1dd1595959092c2fadf2087b2852d3f553b5370f5633592",
"https://bcr.bazel.build/modules/rules_shell/0.6.1/MODULE.bazel": "72e76b0eea4e81611ef5452aa82b3da34caca0c8b7b5c0c9584338aa93bae26b",
"https://bcr.bazel.build/modules/rules_shell/0.6.1/source.json": "20ec05cd5e592055e214b2da8ccb283c7f2a421ea0dc2acbf1aa792e11c03d0c",
"https://bcr.bazel.build/modules/rules_swift/1.16.0/MODULE.bazel": "4a09f199545a60d09895e8281362b1ff3bb08bbde69c6fc87aff5b92fcc916ca",
"https://bcr.bazel.build/modules/rules_swift/2.1.1/MODULE.bazel": "494900a80f944fc7aa61500c2073d9729dff0b764f0e89b824eb746959bc1046",
"https://bcr.bazel.build/modules/rules_swift/2.4.0/MODULE.bazel": "1639617eb1ede28d774d967a738b4a68b0accb40650beadb57c21846beab5efd",
"https://bcr.bazel.build/modules/rules_swift/3.1.2/MODULE.bazel": "72c8f5cf9d26427cee6c76c8e3853eb46ce6b0412a081b2b6db6e8ad56267400",
"https://bcr.bazel.build/modules/rules_swift/3.1.2/source.json": "e85761f3098a6faf40b8187695e3de6d97944e98abd0d8ce579cb2daf6319a66",
"https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8",
"https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c",
"https://bcr.bazel.build/modules/stardoc/0.5.6/MODULE.bazel": "c43dabc564990eeab55e25ed61c07a1aadafe9ece96a4efabb3f8bf9063b71ef",
"https://bcr.bazel.build/modules/stardoc/0.6.2/MODULE.bazel": "7060193196395f5dd668eda046ccbeacebfd98efc77fed418dbe2b82ffaa39fd",
"https://bcr.bazel.build/modules/stardoc/0.7.0/MODULE.bazel": "05e3d6d30c099b6770e97da986c53bd31844d7f13d41412480ea265ac9e8079c",
"https://bcr.bazel.build/modules/stardoc/0.7.2/MODULE.bazel": "fc152419aa2ea0f51c29583fab1e8c99ddefd5b3778421845606ee628629e0e5",
"https://bcr.bazel.build/modules/stardoc/0.7.2/source.json": "58b029e5e901d6802967754adf0a9056747e8176f017cfe3607c0851f4d42216",
"https://bcr.bazel.build/modules/swift_argument_parser/1.3.1.1/MODULE.bazel": "5e463fbfba7b1701d957555ed45097d7f984211330106ccd1352c6e0af0dcf91",
"https://bcr.bazel.build/modules/swift_argument_parser/1.3.1.2/MODULE.bazel": "75aab2373a4bbe2a1260b9bf2a1ebbdbf872d3bd36f80bff058dccd82e89422f",
"https://bcr.bazel.build/modules/swift_argument_parser/1.3.1.2/source.json": "5fba48bbe0ba48761f9e9f75f92876cafb5d07c0ce059cc7a8027416de94a05b",
"https://bcr.bazel.build/modules/stardoc/0.7.1/MODULE.bazel": "3548faea4ee5dda5580f9af150e79d0f6aea934fc60c1cc50f4efdd9420759e7",
"https://bcr.bazel.build/modules/stardoc/0.7.1/source.json": "b6500ffcd7b48cd72c29bb67bcac781e12701cc0d6d55d266a652583cfcdab01",
"https://bcr.bazel.build/modules/tar.bzl/0.2.1/MODULE.bazel": "52d1c00a80a8cc67acbd01649e83d8dd6a9dc426a6c0b754a04fe8c219c76468",
"https://bcr.bazel.build/modules/tar.bzl/0.6.0/MODULE.bazel": "a3584b4edcfafcabd9b0ef9819808f05b372957bbdff41601429d5fd0aac2e7c",
"https://bcr.bazel.build/modules/tar.bzl/0.6.0/source.json": "4a620381df075a16cb3a7ed57bd1d05f7480222394c64a20fa51bdb636fda658",
@@ -237,10 +197,9 @@
"general": {
"bzlTransitiveDigest": "dnnhvKMf9MIXMulhbhHBblZdDAfAkiSVjApIXpUz9Y8=",
"usagesDigest": "dPuxg6asjUidjHZi+xFfMiW+r9RawVYGjTZnOeP+fLI=",
"recordedInputs": [
"REPO_MAPPING:aspect_tools_telemetry+,bazel_lib bazel_lib+",
"REPO_MAPPING:aspect_tools_telemetry+,bazel_skylib bazel_skylib+"
],
"recordedFileInputs": {},
"recordedDirentsInputs": {},
"envVariables": {},
"generatedRepoSpecs": {
"aspect_tools_telemetry_report": {
"repoRuleId": "@@aspect_tools_telemetry+//:extension.bzl%tel_repository",
@@ -287,16 +246,28 @@
}
}
}
}
},
"recordedRepoMappingEntries": [
[
"aspect_tools_telemetry+",
"bazel_lib",
"bazel_lib+"
],
[
"aspect_tools_telemetry+",
"bazel_skylib",
"bazel_skylib+"
]
]
}
},
"@@rules_kotlin+//src/main/starlark/core/repositories:bzlmod_setup.bzl%rules_kotlin_extensions": {
"general": {
"bzlTransitiveDigest": "ABI1D/sbS1ovwaW/kHDoj8nnXjQ0oKU9fzmzEG4iT8o=",
"bzlTransitiveDigest": "rL/34P1aFDq2GqVC2zCFgQ8nTuOC6ziogocpvG50Qz8=",
"usagesDigest": "QI2z8ZUR+mqtbwsf2fLqYdJAkPOHdOV+tF2yVAUgRzw=",
"recordedInputs": [
"REPO_MAPPING:rules_kotlin+,bazel_tools bazel_tools"
],
"recordedFileInputs": {},
"recordedDirentsInputs": {},
"envVariables": {},
"generatedRepoSpecs": {
"com_github_jetbrains_kotlin_git": {
"repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_compiler_git_repository",
@@ -344,205 +315,14 @@
]
}
}
}
}
},
"@@rules_python+//python/extensions:config.bzl%config": {
"general": {
"bzlTransitiveDigest": "2hLgIvNVTLgxus0ZuXtleBe70intCfo0cHs8qvt6cdM=",
"usagesDigest": "ZVSXMAGpD+xzVNPuvF1IoLBkty7TROO0+akMapt1pAg=",
"recordedInputs": [
"REPO_MAPPING:rules_python+,bazel_tools bazel_tools",
"REPO_MAPPING:rules_python+,pypi__build rules_python++config+pypi__build",
"REPO_MAPPING:rules_python+,pypi__click rules_python++config+pypi__click",
"REPO_MAPPING:rules_python+,pypi__colorama rules_python++config+pypi__colorama",
"REPO_MAPPING:rules_python+,pypi__importlib_metadata rules_python++config+pypi__importlib_metadata",
"REPO_MAPPING:rules_python+,pypi__installer rules_python++config+pypi__installer",
"REPO_MAPPING:rules_python+,pypi__more_itertools rules_python++config+pypi__more_itertools",
"REPO_MAPPING:rules_python+,pypi__packaging rules_python++config+pypi__packaging",
"REPO_MAPPING:rules_python+,pypi__pep517 rules_python++config+pypi__pep517",
"REPO_MAPPING:rules_python+,pypi__pip rules_python++config+pypi__pip",
"REPO_MAPPING:rules_python+,pypi__pip_tools rules_python++config+pypi__pip_tools",
"REPO_MAPPING:rules_python+,pypi__pyproject_hooks rules_python++config+pypi__pyproject_hooks",
"REPO_MAPPING:rules_python+,pypi__setuptools rules_python++config+pypi__setuptools",
"REPO_MAPPING:rules_python+,pypi__tomli rules_python++config+pypi__tomli",
"REPO_MAPPING:rules_python+,pypi__wheel rules_python++config+pypi__wheel",
"REPO_MAPPING:rules_python+,pypi__zipp rules_python++config+pypi__zipp"
],
"generatedRepoSpecs": {
"rules_python_internal": {
"repoRuleId": "@@rules_python+//python/private:internal_config_repo.bzl%internal_config_repo",
"attributes": {
"transition_setting_generators": {},
"transition_settings": []
}
},
"pypi__build": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/e2/03/f3c8ba0a6b6e30d7d18c40faab90807c9bb5e9a1e3b2fe2008af624a9c97/build-1.2.1-py3-none-any.whl",
"sha256": "75e10f767a433d9a86e50d83f418e83efc18ede923ee5ff7df93b6cb0306c5d4",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__click": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl",
"sha256": "ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__colorama": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl",
"sha256": "4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__importlib_metadata": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/2d/0a/679461c511447ffaf176567d5c496d1de27cbe34a87df6677d7171b2fbd4/importlib_metadata-7.1.0-py3-none-any.whl",
"sha256": "30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__installer": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/e5/ca/1172b6638d52f2d6caa2dd262ec4c811ba59eee96d54a7701930726bce18/installer-0.7.0-py3-none-any.whl",
"sha256": "05d1933f0a5ba7d8d6296bb6d5018e7c94fa473ceb10cf198a92ccea19c27b53",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__more_itertools": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/50/e2/8e10e465ee3987bb7c9ab69efb91d867d93959095f4807db102d07995d94/more_itertools-10.2.0-py3-none-any.whl",
"sha256": "686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__packaging": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/49/df/1fceb2f8900f8639e278b056416d49134fb8d84c5942ffaa01ad34782422/packaging-24.0-py3-none-any.whl",
"sha256": "2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__pep517": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/25/6e/ca4a5434eb0e502210f591b97537d322546e4833dcb4d470a48c375c5540/pep517-0.13.1-py3-none-any.whl",
"sha256": "31b206f67165b3536dd577c5c3f1518e8fbaf38cbc57efff8369a392feff1721",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__pip": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/8a/6a/19e9fe04fca059ccf770861c7d5721ab4c2aebc539889e97c7977528a53b/pip-24.0-py3-none-any.whl",
"sha256": "ba0d021a166865d2265246961bec0152ff124de910c5cc39f1156ce3fa7c69dc",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__pip_tools": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/0d/dc/38f4ce065e92c66f058ea7a368a9c5de4e702272b479c0992059f7693941/pip_tools-7.4.1-py3-none-any.whl",
"sha256": "4c690e5fbae2f21e87843e89c26191f0d9454f362d8acdbd695716493ec8b3a9",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__pyproject_hooks": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/ae/f3/431b9d5fe7d14af7a32340792ef43b8a714e7726f1d7b69cc4e8e7a3f1d7/pyproject_hooks-1.1.0-py3-none-any.whl",
"sha256": "7ceeefe9aec63a1064c18d939bdc3adf2d8aa1988a510afec15151578b232aa2",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__setuptools": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/90/99/158ad0609729111163fc1f674a5a42f2605371a4cf036d0441070e2f7455/setuptools-78.1.1-py3-none-any.whl",
"sha256": "c3a9c4211ff4c309edb8b8c4f1cbfa7ae324c4ba9f91ff254e3d305b9fd54561",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__tomli": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl",
"sha256": "939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__wheel": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/7d/cd/d7460c9a869b16c3dd4e1e403cce337df165368c71d6af229a74699622ce/wheel-0.43.0-py3-none-any.whl",
"sha256": "55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
},
"pypi__zipp": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"url": "https://files.pythonhosted.org/packages/da/55/a03fd7240714916507e1fcf7ae355bd9d9ed2e6db492595f1a67f61681be/zipp-3.18.2-py3-none-any.whl",
"sha256": "dce197b859eb796242b0622af1b8beb0a722d52aa2f57133ead08edd5bf5374e",
"type": "zip",
"build_file_content": "package(default_visibility = [\"//visibility:public\"])\n\nload(\"@rules_python//python:py_library.bzl\", \"py_library\")\n\npy_library(\n name = \"lib\",\n srcs = glob([\"**/*.py\"]),\n data = glob([\"**/*\"], exclude=[\n # These entries include those put into user-installed dependencies by\n # data_exclude to avoid non-determinism.\n \"**/*.py\",\n \"**/*.pyc\",\n \"**/*.pyc.*\", # During pyc creation, temp files named *.pyc.NNN are created\n \"**/*.dist-info/RECORD\",\n \"BUILD\",\n \"WORKSPACE\",\n ]),\n # This makes this directory a top-level in the python import\n # search path for anything that depends on this.\n imports = [\".\"],\n)\n"
}
}
}
}
},
"@@rules_python+//python/uv:uv.bzl%uv": {
"general": {
"bzlTransitiveDigest": "ijW9KS7qsIY+yBVvJ+Nr1mzwQox09j13DnE3iIwaeTM=",
"usagesDigest": "H8dQoNZcoqP+Mu0tHZTi4KHATzvNkM5ePuEqoQdklIU=",
"recordedInputs": [
"REPO_MAPPING:rules_python+,bazel_tools bazel_tools",
"REPO_MAPPING:rules_python+,platforms platforms"
],
"generatedRepoSpecs": {
"uv": {
"repoRuleId": "@@rules_python+//python/uv/private:uv_toolchains_repo.bzl%uv_toolchains_repo",
"attributes": {
"toolchain_type": "'@@rules_python+//python/uv:uv_toolchain_type'",
"toolchain_names": [
"none"
],
"toolchain_implementations": {
"none": "'@@rules_python+//python:none'"
},
"toolchain_compatible_with": {
"none": [
"@platforms//:incompatible"
]
},
"toolchain_target_settings": {}
}
}
}
},
"recordedRepoMappingEntries": [
[
"rules_kotlin+",
"bazel_tools",
"bazel_tools"
]
]
}
}
},
@@ -575,7 +355,7 @@
"anstyle_1.0.11": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"lexopt\",\"req\":\"^0.3.0\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"anyhow_1.0.100": "{\"dependencies\":[{\"name\":\"backtrace\",\"optional\":true,\"req\":\"^0.3.51\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.6\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"syn\",\"req\":\"^2.0\"},{\"kind\":\"dev\",\"name\":\"thiserror\",\"req\":\"^2\"},{\"features\":[\"diff\"],\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.108\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"arboard_3.6.1": "{\"dependencies\":[{\"features\":[\"std\"],\"name\":\"clipboard-win\",\"req\":\"^5.3.1\",\"target\":\"cfg(windows)\"},{\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.10.2\"},{\"default_features\":false,\"features\":[\"png\"],\"name\":\"image\",\"optional\":true,\"req\":\"^0.25\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"default_features\":false,\"features\":[\"tiff\"],\"name\":\"image\",\"optional\":true,\"req\":\"^0.25\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"png\",\"bmp\"],\"name\":\"image\",\"optional\":true,\"req\":\"^0.25\",\"target\":\"cfg(windows)\"},{\"name\":\"log\",\"req\":\"^0.4\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"name\":\"log\",\"req\":\"^0.4\",\"target\":\"cfg(windows)\"},{\"name\":\"objc2\",\"req\":\"^0.6.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"objc2-core-graphics\",\"NSPasteboard\",\"NSPasteboardItem\",\"NSImage\"],\"name\":\"objc2-app-kit\",\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"CFCGTypes\"],\"name\":\"objc2-core-foundation\",\"optional\":true,\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"CGImage\",\"CGColorSpace\",\"CGDataProvider\"],\"name\":\"objc2-core-graphics\",\"optional\":true,\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"default_features\":false,\"features\":[\"std\",\"NSArray\",\"NSString\",\"NSEnumerator\",\"NSGeometry\",\"NSValue\"],\"name\":\"objc2-foundation\",\"req\":\"^0.3.0\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"name\":\"parking_lot\",\"req\":\"^0.12\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"name\":\"percent-encoding\",\"req\":\"^2.3.1\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"features\":[\"Win32_Foundation\",\"Win32_Storage_FileSystem\",\"Win32_System_DataExchange\",\"Win32_System_Memory\",\"Win32_System_Ole\",\"Win32_UI_Shell\"],\"name\":\"windows-sys\",\"req\":\">=0.52.0, <0.61.0\",\"target\":\"cfg(windows)\"},{\"name\":\"wl-clipboard-rs\",\"optional\":true,\"req\":\"^0.9.0\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"},{\"name\":\"x11rb\",\"req\":\"^0.13\",\"target\":\"cfg(all(unix, not(any(target_os=\\\"macos\\\", target_os=\\\"android\\\", target_os=\\\"emscripten\\\"))))\"}],\"features\":{\"core-graphics\":[\"dep:objc2-core-graphics\"],\"default\":[\"image-data\"],\"image\":[\"dep:image\"],\"image-data\":[\"dep:objc2-core-graphics\",\"dep:objc2-core-foundation\",\"image\",\"windows-sys\",\"core-graphics\"],\"wayland-data-control\":[\"wl-clipboard-rs\"],\"windows-sys\":[\"windows-sys/Win32_Graphics_Gdi\"],\"wl-clipboard-rs\":[\"dep:wl-clipboard-rs\"]}}",
"arc-swap_1.8.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"adaptive-barrier\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"~0.7\"},{\"kind\":\"dev\",\"name\":\"crossbeam-utils\",\"req\":\"~0.8\"},{\"kind\":\"dev\",\"name\":\"itertools\",\"req\":\"^0.14\"},{\"kind\":\"dev\",\"name\":\"num_cpus\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"~0.12\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"name\":\"rustversion\",\"req\":\"^1\"},{\"features\":[\"rc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0.130\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0.177\"}],\"features\":{\"experimental-strategies\":[],\"experimental-thread-local\":[],\"internal-test-strategies\":[],\"weak\":[]}}",
"arc-swap_1.7.1": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"adaptive-barrier\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"~0.5\"},{\"kind\":\"dev\",\"name\":\"crossbeam-utils\",\"req\":\"~0.8\"},{\"kind\":\"dev\",\"name\":\"itertools\",\"req\":\"^0.12\"},{\"kind\":\"dev\",\"name\":\"num_cpus\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"~1\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"~0.12\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"features\":[\"rc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0.130\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0.130\"}],\"features\":{\"experimental-strategies\":[],\"experimental-thread-local\":[],\"internal-test-strategies\":[],\"weak\":[]}}",
"arrayvec_0.7.6": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bencher\",\"req\":\"^0.1.4\"},{\"default_features\":false,\"name\":\"borsh\",\"optional\":true,\"req\":\"^1.2.0\"},{\"kind\":\"dev\",\"name\":\"matches\",\"req\":\"^0.1\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1.4\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"ascii-canvas_3.0.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"diff\",\"req\":\"^0.1\"},{\"name\":\"term\",\"req\":\"^0.7\"}],\"features\":{}}",
"ascii_1.1.0": "{\"dependencies\":[{\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.25\"},{\"name\":\"serde_test\",\"optional\":true,\"req\":\"^1.0\"}],\"features\":{\"alloc\":[],\"default\":[\"std\"],\"std\":[\"alloc\"]}}",
@@ -626,7 +406,7 @@
"cfg_aliases_0.1.1": "{\"dependencies\":[],\"features\":{}}",
"cfg_aliases_0.2.1": "{\"dependencies\":[],\"features\":{}}",
"chardetng_0.1.17": "{\"dependencies\":[{\"name\":\"arrayvec\",\"optional\":true,\"req\":\"^0.5.1\"},{\"name\":\"cfg-if\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"detone\",\"req\":\"^1.0.0\"},{\"default_features\":false,\"name\":\"encoding_rs\",\"req\":\"^0.8.29\"},{\"default_features\":false,\"name\":\"memchr\",\"req\":\"^2.2.0\"},{\"name\":\"rayon\",\"optional\":true,\"req\":\"^1.3.0\"}],\"features\":{\"multithreading\":[\"rayon\",\"arrayvec\"],\"testing-only-no-semver-guarantees-do-not-use\":[]}}",
"chrono_0.4.43": "{\"dependencies\":[{\"features\":[\"derive\"],\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.3.0\"},{\"name\":\"defmt\",\"optional\":true,\"req\":\"^1.0.1\"},{\"features\":[\"fallback\"],\"name\":\"iana-time-zone\",\"optional\":true,\"req\":\"^0.1.45\",\"target\":\"cfg(unix)\"},{\"name\":\"js-sys\",\"optional\":true,\"req\":\"^0.3\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"default_features\":false,\"name\":\"num-traits\",\"req\":\"^0.2\"},{\"name\":\"pure-rust-locales\",\"optional\":true,\"req\":\"^0.8.2\"},{\"default_features\":false,\"name\":\"rkyv\",\"optional\":true,\"req\":\"^0.7.43\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.99\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"similar-asserts\",\"req\":\"^1.6.1\"},{\"name\":\"wasm-bindgen\",\"optional\":true,\"req\":\"^0.2\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"kind\":\"dev\",\"name\":\"windows-bindgen\",\"req\":\"^0.66\"},{\"name\":\"windows-link\",\"optional\":true,\"req\":\"^0.2\",\"target\":\"cfg(windows)\"}],\"features\":{\"__internal_bench\":[],\"alloc\":[],\"clock\":[\"winapi\",\"iana-time-zone\",\"now\"],\"core-error\":[],\"default\":[\"clock\",\"std\",\"oldtime\",\"wasmbind\"],\"defmt\":[\"dep:defmt\",\"pure-rust-locales?/defmt\"],\"libc\":[],\"now\":[\"std\"],\"oldtime\":[],\"rkyv\":[\"dep:rkyv\",\"rkyv/size_32\"],\"rkyv-16\":[\"dep:rkyv\",\"rkyv?/size_16\"],\"rkyv-32\":[\"dep:rkyv\",\"rkyv?/size_32\"],\"rkyv-64\":[\"dep:rkyv\",\"rkyv?/size_64\"],\"rkyv-validation\":[\"rkyv?/validation\"],\"std\":[\"alloc\"],\"unstable-locales\":[\"pure-rust-locales\"],\"wasmbind\":[\"wasm-bindgen\",\"js-sys\"],\"winapi\":[\"windows-link\"]}}",
"chrono_0.4.42": "{\"dependencies\":[{\"features\":[\"derive\"],\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.3.0\"},{\"features\":[\"fallback\"],\"name\":\"iana-time-zone\",\"optional\":true,\"req\":\"^0.1.45\",\"target\":\"cfg(unix)\"},{\"name\":\"js-sys\",\"optional\":true,\"req\":\"^0.3\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"default_features\":false,\"name\":\"num-traits\",\"req\":\"^0.2\"},{\"name\":\"pure-rust-locales\",\"optional\":true,\"req\":\"^0.8\"},{\"default_features\":false,\"name\":\"rkyv\",\"optional\":true,\"req\":\"^0.7.43\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.99\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"similar-asserts\",\"req\":\"^1.6.1\"},{\"name\":\"wasm-bindgen\",\"optional\":true,\"req\":\"^0.2\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"kind\":\"dev\",\"name\":\"windows-bindgen\",\"req\":\"^0.63\",\"target\":\"cfg(windows)\"},{\"name\":\"windows-link\",\"optional\":true,\"req\":\"^0.2\",\"target\":\"cfg(windows)\"}],\"features\":{\"__internal_bench\":[],\"alloc\":[],\"clock\":[\"winapi\",\"iana-time-zone\",\"now\"],\"core-error\":[],\"default\":[\"clock\",\"std\",\"oldtime\",\"wasmbind\"],\"libc\":[],\"now\":[\"std\"],\"oldtime\":[],\"rkyv\":[\"dep:rkyv\",\"rkyv/size_32\"],\"rkyv-16\":[\"dep:rkyv\",\"rkyv?/size_16\"],\"rkyv-32\":[\"dep:rkyv\",\"rkyv?/size_32\"],\"rkyv-64\":[\"dep:rkyv\",\"rkyv?/size_64\"],\"rkyv-validation\":[\"rkyv?/validation\"],\"std\":[\"alloc\"],\"unstable-locales\":[\"pure-rust-locales\"],\"wasmbind\":[\"wasm-bindgen\",\"js-sys\"],\"winapi\":[\"windows-link\"]}}",
"chunked_transfer_1.5.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.3\"}],\"features\":{}}",
"cipher_0.4.4": "{\"dependencies\":[{\"name\":\"blobby\",\"optional\":true,\"req\":\"^0.3\"},{\"name\":\"crypto-common\",\"req\":\"^0.1.6\"},{\"name\":\"inout\",\"req\":\"^0.1\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1.5\"}],\"features\":{\"alloc\":[],\"block-padding\":[\"inout/block-padding\"],\"dev\":[\"blobby\"],\"rand_core\":[\"crypto-common/rand_core\"],\"std\":[\"alloc\",\"crypto-common/std\",\"inout/std\"]}}",
"clap_4.5.54": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"automod\",\"req\":\"^1.0.14\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"clap-cargo\",\"req\":\"^0.15.0\"},{\"default_features\":false,\"name\":\"clap_builder\",\"req\":\"=4.5.54\"},{\"name\":\"clap_derive\",\"optional\":true,\"req\":\"=4.5.49\"},{\"kind\":\"dev\",\"name\":\"jiff\",\"req\":\"^0.2.3\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.15\"},{\"kind\":\"dev\",\"name\":\"semver\",\"req\":\"^1.0.26\"},{\"kind\":\"dev\",\"name\":\"shlex\",\"req\":\"^1.3.0\"},{\"features\":[\"term-svg\"],\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.16\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.91\"},{\"default_features\":false,\"features\":[\"color-auto\",\"diff\",\"examples\"],\"kind\":\"dev\",\"name\":\"trycmd\",\"req\":\"^0.15.3\"}],\"features\":{\"cargo\":[\"clap_builder/cargo\"],\"color\":[\"clap_builder/color\"],\"debug\":[\"clap_builder/debug\",\"clap_derive?/debug\"],\"default\":[\"std\",\"color\",\"help\",\"usage\",\"error-context\",\"suggestions\"],\"deprecated\":[\"clap_builder/deprecated\",\"clap_derive?/deprecated\"],\"derive\":[\"dep:clap_derive\"],\"env\":[\"clap_builder/env\"],\"error-context\":[\"clap_builder/error-context\"],\"help\":[\"clap_builder/help\"],\"std\":[\"clap_builder/std\"],\"string\":[\"clap_builder/string\"],\"suggestions\":[\"clap_builder/suggestions\"],\"unicode\":[\"clap_builder/unicode\"],\"unstable-derive-ui-tests\":[],\"unstable-doc\":[\"clap_builder/unstable-doc\",\"derive\"],\"unstable-ext\":[\"clap_builder/unstable-ext\"],\"unstable-markdown\":[\"clap_derive/unstable-markdown\"],\"unstable-styles\":[\"clap_builder/unstable-styles\"],\"unstable-v5\":[\"clap_builder/unstable-v5\",\"clap_derive?/unstable-v5\",\"deprecated\"],\"usage\":[\"clap_builder/usage\"],\"wrap_help\":[\"clap_builder/wrap_help\"]}}",
@@ -641,6 +421,7 @@
"colorchoice_1.0.4": "{\"dependencies\":[],\"features\":{}}",
"combine_4.6.7": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-std\",\"req\":\"^1\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"bytes\",\"req\":\"^1\"},{\"name\":\"bytes_05\",\"optional\":true,\"package\":\"bytes\",\"req\":\"^0.5\"},{\"kind\":\"dev\",\"name\":\"bytes_05\",\"package\":\"bytes\",\"req\":\"^0.5\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.3\"},{\"kind\":\"dev\",\"name\":\"futures-03-dep\",\"package\":\"futures\",\"req\":\"^0.3.1\"},{\"default_features\":false,\"name\":\"futures-core-03\",\"optional\":true,\"package\":\"futures-core\",\"req\":\"^0.3.1\"},{\"default_features\":false,\"name\":\"futures-io-03\",\"optional\":true,\"package\":\"futures-io\",\"req\":\"^0.3.1\"},{\"default_features\":false,\"name\":\"memchr\",\"req\":\"^2.3\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"^1.0\"},{\"features\":[\"tokio\",\"quickcheck\"],\"kind\":\"dev\",\"name\":\"partial-io\",\"req\":\"^0.3\"},{\"name\":\"pin-project-lite\",\"optional\":true,\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"quick-error\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"quickcheck\",\"req\":\"^0.6\"},{\"name\":\"regex\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"io-util\"],\"name\":\"tokio-02-dep\",\"optional\":true,\"package\":\"tokio\",\"req\":\"^0.2.3\"},{\"features\":[\"fs\",\"io-driver\",\"io-util\",\"macros\"],\"kind\":\"dev\",\"name\":\"tokio-02-dep\",\"package\":\"tokio\",\"req\":\"^0.2\"},{\"default_features\":false,\"name\":\"tokio-03-dep\",\"optional\":true,\"package\":\"tokio\",\"req\":\"^0.3\"},{\"features\":[\"fs\",\"macros\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio-03-dep\",\"package\":\"tokio\",\"req\":\"^0.3\"},{\"default_features\":false,\"name\":\"tokio-dep\",\"optional\":true,\"package\":\"tokio\",\"req\":\"^1\"},{\"features\":[\"fs\",\"macros\",\"rt\",\"rt-multi-thread\",\"io-util\"],\"kind\":\"dev\",\"name\":\"tokio-dep\",\"package\":\"tokio\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"codec\"],\"name\":\"tokio-util\",\"optional\":true,\"req\":\"^0.7\"}],\"features\":{\"alloc\":[],\"default\":[\"std\"],\"futures-03\":[\"pin-project\",\"std\",\"futures-core-03\",\"futures-io-03\",\"pin-project-lite\"],\"mp4\":[],\"pin-project\":[\"pin-project-lite\"],\"std\":[\"memchr/std\",\"bytes\",\"alloc\"],\"tokio\":[\"tokio-dep\",\"tokio-util/io\",\"futures-core-03\",\"pin-project-lite\"],\"tokio-02\":[\"pin-project\",\"std\",\"tokio-02-dep\",\"futures-core-03\",\"pin-project-lite\",\"bytes_05\"],\"tokio-03\":[\"pin-project\",\"std\",\"tokio-03-dep\",\"futures-core-03\",\"pin-project-lite\"]}}",
"compact_str_0.8.1": "{\"dependencies\":[{\"default_features\":false,\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"borsh\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"castaway\",\"req\":\"^0.2.3\"},{\"name\":\"cfg-if\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"cfg-if\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"diesel\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"itoa\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"markup\",\"optional\":true,\"req\":\"^0.13\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"proptest\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"std\"],\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"quickcheck\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"quickcheck\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"quickcheck_macros\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"rayon\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"size_32\"],\"name\":\"rkyv\",\"optional\":true,\"req\":\"^0.7\"},{\"default_features\":false,\"features\":[\"alloc\",\"size_32\"],\"kind\":\"dev\",\"name\":\"rkyv\",\"req\":\"^0.7\"},{\"name\":\"rustversion\",\"req\":\"^1\"},{\"name\":\"ryu\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"derive\",\"alloc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"features\":[\"union\"],\"name\":\"smallvec\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"name\":\"sqlx\",\"optional\":true,\"req\":\"^0.7\"},{\"name\":\"static_assertions\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"test-case\",\"req\":\"^3\"},{\"kind\":\"dev\",\"name\":\"test-strategy\",\"req\":\"^0.3\"}],\"features\":{\"arbitrary\":[\"dep:arbitrary\"],\"borsh\":[\"dep:borsh\"],\"bytes\":[\"dep:bytes\"],\"default\":[\"std\"],\"diesel\":[\"dep:diesel\"],\"markup\":[\"dep:markup\"],\"proptest\":[\"dep:proptest\"],\"quickcheck\":[\"dep:quickcheck\"],\"rkyv\":[\"dep:rkyv\"],\"serde\":[\"dep:serde\"],\"smallvec\":[\"dep:smallvec\"],\"sqlx\":[\"dep:sqlx\",\"std\"],\"sqlx-mysql\":[\"sqlx\",\"sqlx/mysql\"],\"sqlx-postgres\":[\"sqlx\",\"sqlx/postgres\"],\"sqlx-sqlite\":[\"sqlx\",\"sqlx/sqlite\"],\"std\":[]}}",
"compact_str_0.9.0": "{\"dependencies\":[{\"default_features\":false,\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"borsh\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"castaway\",\"req\":\"^0.2.3\"},{\"name\":\"cfg-if\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"cfg-if\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"diesel\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"itoa\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"markup\",\"optional\":true,\"req\":\"^0.15\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"proptest\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"std\"],\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"quickcheck\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"quickcheck\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"quickcheck_macros\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"rayon\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"rkyv\",\"optional\":true,\"req\":\"^0.8\"},{\"kind\":\"dev\",\"name\":\"rkyv\",\"req\":\"^0.8.8\"},{\"name\":\"rustversion\",\"req\":\"^1\"},{\"name\":\"ryu\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"derive\",\"alloc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"features\":[\"union\"],\"name\":\"smallvec\",\"optional\":true,\"req\":\"^1\"},{\"default_features\":false,\"name\":\"sqlx\",\"optional\":true,\"req\":\"^0.8\"},{\"name\":\"static_assertions\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"test-case\",\"req\":\"^3\"},{\"kind\":\"dev\",\"name\":\"test-strategy\",\"req\":\"^0.3\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"arbitrary\":[\"dep:arbitrary\"],\"borsh\":[\"dep:borsh\"],\"bytes\":[\"dep:bytes\"],\"default\":[\"std\"],\"diesel\":[\"dep:diesel\"],\"markup\":[\"dep:markup\"],\"proptest\":[\"dep:proptest\"],\"quickcheck\":[\"dep:quickcheck\"],\"rkyv\":[\"dep:rkyv\"],\"serde\":[\"dep:serde\"],\"smallvec\":[\"dep:smallvec\"],\"sqlx\":[\"dep:sqlx\",\"std\"],\"sqlx-mysql\":[\"sqlx\",\"sqlx/mysql\"],\"sqlx-postgres\":[\"sqlx\",\"sqlx/postgres\"],\"sqlx-sqlite\":[\"sqlx\",\"sqlx/sqlite\"],\"std\":[],\"zeroize\":[\"dep:zeroize\"]}}",
"concurrent-queue_2.5.0": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"cargo_bench_support\"],\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5\"},{\"default_features\":false,\"name\":\"crossbeam-utils\",\"req\":\"^0.8.11\"},{\"kind\":\"dev\",\"name\":\"easy-parallel\",\"req\":\"^3.1.0\"},{\"kind\":\"dev\",\"name\":\"fastrand\",\"req\":\"^2.0.0\"},{\"name\":\"loom\",\"optional\":true,\"req\":\"^0.7\",\"target\":\"cfg(loom)\"},{\"default_features\":false,\"name\":\"portable-atomic\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"console_0.15.11": "{\"dependencies\":[{\"name\":\"encode_unicode\",\"req\":\"^1\",\"target\":\"cfg(windows)\"},{\"name\":\"libc\",\"req\":\"^0.2.99\"},{\"name\":\"once_cell\",\"req\":\"^1.8\"},{\"default_features\":false,\"features\":[\"std\",\"bit-set\",\"break-dead-code\"],\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"regex\",\"req\":\"^1.4.2\"},{\"name\":\"unicode-width\",\"optional\":true,\"req\":\"^0.2\"},{\"features\":[\"Win32_Foundation\",\"Win32_System_Console\",\"Win32_Storage_FileSystem\",\"Win32_UI_Input_KeyboardAndMouse\"],\"name\":\"windows-sys\",\"req\":\"^0.59\",\"target\":\"cfg(windows)\"}],\"features\":{\"ansi-parsing\":[],\"default\":[\"unicode-width\",\"ansi-parsing\"],\"windows-console-colors\":[\"ansi-parsing\"]}}",
"const-hex_1.17.0": "{\"dependencies\":[{\"name\":\"cfg-if\",\"req\":\"^1\"},{\"name\":\"cpufeatures\",\"req\":\"^0.2\",\"target\":\"cfg(any(target_arch = \\\"x86\\\", target_arch = \\\"x86_64\\\"))\"},{\"kind\":\"dev\",\"name\":\"divan\",\"package\":\"codspeed-divan-compat\",\"req\":\"^3\"},{\"default_features\":false,\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"faster-hex\",\"req\":\"^0.10.0\"},{\"default_features\":false,\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"~0.4.2\"},{\"default_features\":false,\"name\":\"proptest\",\"optional\":true,\"req\":\"^1.4\"},{\"kind\":\"dev\",\"name\":\"rustc-hex\",\"req\":\"^2.1\"},{\"default_features\":false,\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"serde_core\",\"optional\":true,\"req\":\"^1.0\"},{\"default_features\":false,\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"}],\"features\":{\"__fuzzing\":[\"dep:proptest\",\"std\"],\"alloc\":[\"serde_core?/alloc\",\"proptest?/alloc\"],\"core-error\":[],\"default\":[\"std\"],\"force-generic\":[],\"hex\":[],\"nightly\":[],\"portable-simd\":[],\"serde\":[\"dep:serde_core\"],\"std\":[\"serde_core?/std\",\"proptest?/std\",\"alloc\"]}}",
@@ -658,9 +439,9 @@
"crossterm_winapi_0.9.1": "{\"dependencies\":[{\"features\":[\"winbase\",\"consoleapi\",\"processenv\",\"handleapi\",\"synchapi\",\"impl-default\"],\"name\":\"winapi\",\"req\":\"^0.3.8\",\"target\":\"cfg(windows)\"}],\"features\":{}}",
"crunchy_0.2.4": "{\"dependencies\":[],\"features\":{\"default\":[\"limit_128\"],\"limit_1024\":[],\"limit_128\":[],\"limit_2048\":[],\"limit_256\":[],\"limit_512\":[],\"limit_64\":[],\"std\":[]}}",
"crypto-common_0.1.6": "{\"dependencies\":[{\"features\":[\"more_lengths\"],\"name\":\"generic-array\",\"req\":\"^0.14.4\"},{\"name\":\"rand_core\",\"optional\":true,\"req\":\"^0.6\"},{\"name\":\"typenum\",\"req\":\"^1.14\"}],\"features\":{\"getrandom\":[\"rand_core/getrandom\"],\"std\":[]}}",
"ctor-proc-macro_0.0.7": "{\"dependencies\":[],\"features\":{\"default\":[]}}",
"ctor-proc-macro_0.0.6": "{\"dependencies\":[],\"features\":{\"default\":[]}}",
"ctor_0.1.26": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"libc-print\",\"req\":\"^0.1.20\"},{\"name\":\"quote\",\"req\":\"^1.0.20\"},{\"default_features\":false,\"features\":[\"full\",\"parsing\",\"printing\",\"proc-macro\"],\"name\":\"syn\",\"req\":\"^1.0.98\"}],\"features\":{}}",
"ctor_0.6.3": "{\"dependencies\":[{\"name\":\"ctor-proc-macro\",\"optional\":true,\"req\":\"=0.0.7\"},{\"default_features\":false,\"name\":\"dtor\",\"optional\":true,\"req\":\"^0.1.0\"},{\"kind\":\"dev\",\"name\":\"libc-print\",\"req\":\"^0.1.20\"}],\"features\":{\"__no_warn_on_missing_unsafe\":[\"dtor?/__no_warn_on_missing_unsafe\"],\"default\":[\"dtor\",\"proc_macro\",\"__no_warn_on_missing_unsafe\"],\"dtor\":[\"dep:dtor\"],\"proc_macro\":[\"dep:ctor-proc-macro\",\"dtor?/proc_macro\"],\"used_linker\":[\"dtor?/used_linker\"]}}",
"ctor_0.5.0": "{\"dependencies\":[{\"name\":\"ctor-proc-macro\",\"optional\":true,\"req\":\"=0.0.6\"},{\"default_features\":false,\"name\":\"dtor\",\"optional\":true,\"req\":\"^0.1.0\"},{\"kind\":\"dev\",\"name\":\"libc-print\",\"req\":\"^0.1.20\"}],\"features\":{\"__no_warn_on_missing_unsafe\":[\"dtor?/__no_warn_on_missing_unsafe\"],\"default\":[\"dtor\",\"proc_macro\",\"__no_warn_on_missing_unsafe\"],\"dtor\":[\"dep:dtor\"],\"proc_macro\":[\"dep:ctor-proc-macro\",\"dtor?/proc_macro\"],\"used_linker\":[\"dtor?/used_linker\"]}}",
"darling_0.20.11": "{\"dependencies\":[{\"name\":\"darling_core\",\"req\":\"=0.20.11\"},{\"name\":\"darling_macro\",\"req\":\"=0.20.11\"},{\"kind\":\"dev\",\"name\":\"proc-macro2\",\"req\":\"^1.0.86\"},{\"kind\":\"dev\",\"name\":\"quote\",\"req\":\"^1.0.18\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.9\",\"target\":\"cfg(compiletests)\"},{\"kind\":\"dev\",\"name\":\"syn\",\"req\":\"^2.0.15\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.89\",\"target\":\"cfg(compiletests)\"}],\"features\":{\"default\":[\"suggestions\"],\"diagnostics\":[\"darling_core/diagnostics\"],\"suggestions\":[\"darling_core/suggestions\"]}}",
"darling_0.21.3": "{\"dependencies\":[{\"name\":\"darling_core\",\"req\":\"=0.21.3\"},{\"name\":\"darling_macro\",\"req\":\"=0.21.3\"},{\"kind\":\"dev\",\"name\":\"proc-macro2\",\"req\":\"^1.0.86\"},{\"kind\":\"dev\",\"name\":\"quote\",\"req\":\"^1.0.18\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.9\",\"target\":\"cfg(compiletests)\"},{\"kind\":\"dev\",\"name\":\"syn\",\"req\":\"^2.0.15\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.89\",\"target\":\"cfg(compiletests)\"}],\"features\":{\"default\":[\"suggestions\"],\"diagnostics\":[\"darling_core/diagnostics\"],\"serde\":[\"darling_core/serde\"],\"suggestions\":[\"darling_core/suggestions\"]}}",
"darling_0.23.0": "{\"dependencies\":[{\"name\":\"darling_core\",\"req\":\"=0.23.0\"},{\"name\":\"darling_macro\",\"req\":\"=0.23.0\"},{\"kind\":\"dev\",\"name\":\"proc-macro2\",\"req\":\"^1.0.86\"},{\"kind\":\"dev\",\"name\":\"quote\",\"req\":\"^1.0.18\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.9\",\"target\":\"cfg(compiletests)\"},{\"kind\":\"dev\",\"name\":\"syn\",\"req\":\"^2.0.15\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.89\",\"target\":\"cfg(compiletests)\"}],\"features\":{\"default\":[\"suggestions\"],\"diagnostics\":[\"darling_core/diagnostics\"],\"serde\":[\"darling_core/serde\"],\"suggestions\":[\"darling_core/suggestions\"]}}",
@@ -696,6 +477,7 @@
"display_container_0.9.0": "{\"dependencies\":[{\"name\":\"either\",\"req\":\"^1.8\"},{\"name\":\"indenter\",\"req\":\"^0.3.3\"}],\"features\":{}}",
"displaydoc_0.2.5": "{\"dependencies\":[{\"default_features\":false,\"kind\":\"dev\",\"name\":\"libc\",\"req\":\"^0.2\"},{\"kind\":\"dev\",\"name\":\"pretty_assertions\",\"req\":\"^0.6.1\"},{\"name\":\"proc-macro2\",\"req\":\"^1.0\"},{\"name\":\"quote\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1.1\"},{\"name\":\"syn\",\"req\":\"^2.0\"},{\"kind\":\"dev\",\"name\":\"thiserror\",\"req\":\"^1.0.24\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"doc-comment_0.3.3": "{\"dependencies\":[],\"features\":{\"no_core\":[],\"old_macros\":[]}}",
"document-features_0.2.12": "{\"dependencies\":[{\"name\":\"litrs\",\"req\":\"^1.0.0\"}],\"features\":{\"default\":[],\"self-test\":[]}}",
"dotenvy_0.15.7": "{\"dependencies\":[{\"name\":\"clap\",\"optional\":true,\"req\":\"^3.2\"},{\"kind\":\"dev\",\"name\":\"once_cell\",\"req\":\"^1.16.0\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.3.0\"}],\"features\":{\"cli\":[\"clap\"]}}",
"downcast-rs_1.2.1": "{\"dependencies\":[],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"dtor-proc-macro_0.0.6": "{\"dependencies\":[],\"features\":{\"default\":[]}}",
@@ -759,8 +541,6 @@
"getrandom_0.2.16": "{\"dependencies\":[{\"name\":\"cfg-if\",\"req\":\"^1\"},{\"name\":\"compiler_builtins\",\"optional\":true,\"req\":\"^0.1\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0\"},{\"name\":\"js-sys\",\"optional\":true,\"req\":\"^0.3\",\"target\":\"cfg(all(any(target_arch = \\\"wasm32\\\", target_arch = \\\"wasm64\\\"), target_os = \\\"unknown\\\"))\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(unix)\"},{\"default_features\":false,\"name\":\"wasi\",\"req\":\"^0.11\",\"target\":\"cfg(target_os = \\\"wasi\\\")\"},{\"default_features\":false,\"name\":\"wasm-bindgen\",\"optional\":true,\"req\":\"^0.2.62\",\"target\":\"cfg(all(any(target_arch = \\\"wasm32\\\", target_arch = \\\"wasm64\\\"), target_os = \\\"unknown\\\"))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3.18\",\"target\":\"cfg(all(any(target_arch = \\\"wasm32\\\", target_arch = \\\"wasm64\\\"), target_os = \\\"unknown\\\"))\"}],\"features\":{\"custom\":[],\"js\":[\"wasm-bindgen\",\"js-sys\"],\"linux_disable_fallback\":[],\"rdrand\":[],\"rustc-dep-of-std\":[\"compiler_builtins\",\"core\",\"libc/rustc-dep-of-std\",\"wasi/rustc-dep-of-std\"],\"std\":[],\"test-in-browser\":[]}}",
"getrandom_0.3.3": "{\"dependencies\":[{\"name\":\"cfg-if\",\"req\":\"^1\"},{\"name\":\"compiler_builtins\",\"optional\":true,\"req\":\"^0.1\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"js-sys\",\"optional\":true,\"req\":\"^0.3.77\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", any(target_os = \\\"unknown\\\", target_os = \\\"none\\\"), target_feature = \\\"atomics\\\"))\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(all(any(target_os = \\\"linux\\\", target_os = \\\"android\\\"), not(any(all(target_os = \\\"linux\\\", target_env = \\\"\\\"), getrandom_backend = \\\"custom\\\", getrandom_backend = \\\"linux_raw\\\", getrandom_backend = \\\"rdrand\\\", getrandom_backend = \\\"rndr\\\"))))\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(any(target_os = \\\"dragonfly\\\", target_os = \\\"freebsd\\\", target_os = \\\"hurd\\\", target_os = \\\"illumos\\\", target_os = \\\"cygwin\\\", all(target_os = \\\"horizon\\\", target_arch = \\\"arm\\\")))\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(any(target_os = \\\"haiku\\\", target_os = \\\"redox\\\", target_os = \\\"nto\\\", target_os = \\\"aix\\\"))\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(any(target_os = \\\"ios\\\", target_os = \\\"visionos\\\", target_os = \\\"watchos\\\", target_os = \\\"tvos\\\"))\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(any(target_os = \\\"macos\\\", target_os = \\\"openbsd\\\", target_os = \\\"vita\\\", target_os = \\\"emscripten\\\"))\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(target_os = \\\"netbsd\\\")\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(target_os = \\\"solaris\\\")\"},{\"default_features\":false,\"name\":\"libc\",\"req\":\"^0.2.154\",\"target\":\"cfg(target_os = \\\"vxworks\\\")\"},{\"default_features\":false,\"name\":\"r-efi\",\"req\":\"^5.1\",\"target\":\"cfg(all(target_os = \\\"uefi\\\", getrandom_backend = \\\"efi_rng\\\"))\"},{\"default_features\":false,\"name\":\"wasi\",\"req\":\"^0.14\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", target_os = \\\"wasi\\\", target_env = \\\"p2\\\"))\"},{\"default_features\":false,\"name\":\"wasm-bindgen\",\"optional\":true,\"req\":\"^0.2.98\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", any(target_os = \\\"unknown\\\", target_os = \\\"none\\\")))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", any(target_os = \\\"unknown\\\", target_os = \\\"none\\\")))\"}],\"features\":{\"rustc-dep-of-std\":[\"dep:compiler_builtins\",\"dep:core\"],\"std\":[],\"wasm_js\":[\"dep:wasm-bindgen\",\"dep:js-sys\"]}}",
"gimli_0.31.1": "{\"dependencies\":[{\"name\":\"alloc\",\"optional\":true,\"package\":\"rustc-std-workspace-alloc\",\"req\":\"^1.0.0\"},{\"name\":\"compiler_builtins\",\"optional\":true,\"req\":\"^0.1.2\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0.0\"},{\"default_features\":false,\"name\":\"fallible-iterator\",\"optional\":true,\"req\":\"^0.3.0\"},{\"name\":\"indexmap\",\"optional\":true,\"req\":\"^2.0.0\"},{\"default_features\":false,\"name\":\"stable_deref_trait\",\"optional\":true,\"req\":\"^1.1.0\"},{\"kind\":\"dev\",\"name\":\"test-assembler\",\"req\":\"^0.1.3\"}],\"features\":{\"default\":[\"read-all\",\"write\"],\"endian-reader\":[\"read\",\"dep:stable_deref_trait\"],\"fallible-iterator\":[\"dep:fallible-iterator\"],\"read\":[\"read-core\"],\"read-all\":[\"read\",\"std\",\"fallible-iterator\",\"endian-reader\"],\"read-core\":[],\"rustc-dep-of-std\":[\"dep:core\",\"dep:alloc\",\"dep:compiler_builtins\"],\"std\":[\"fallible-iterator?/std\",\"stable_deref_trait?/std\"],\"write\":[\"dep:indexmap\"]}}",
"git+https://github.com/JakkuSakura/tokio-tungstenite?rev=2ae536b0de793f3ddf31fc2f22d445bf1ef2023d#2ae536b0de793f3ddf31fc2f22d445bf1ef2023d_tokio-tungstenite": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"sink\",\"std\"],\"name\":\"futures-util\",\"optional\":false},{\"name\":\"log\"},{\"default_features\":true,\"features\":[],\"name\":\"native-tls-crate\",\"optional\":true,\"package\":\"native-tls\"},{\"default_features\":false,\"features\":[],\"name\":\"rustls\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-native-certs\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-pki-types\",\"optional\":true},{\"default_features\":false,\"features\":[\"io-util\"],\"name\":\"tokio\",\"optional\":false},{\"default_features\":true,\"features\":[],\"name\":\"tokio-native-tls\",\"optional\":true},{\"default_features\":false,\"features\":[],\"name\":\"tokio-rustls\",\"optional\":true},{\"default_features\":false,\"features\":[],\"name\":\"tungstenite\",\"optional\":false},{\"default_features\":true,\"features\":[],\"name\":\"webpki-roots\",\"optional\":true}],\"features\":{\"__rustls-tls\":[\"rustls\",\"rustls-pki-types\",\"tokio-rustls\",\"stream\",\"tungstenite/__rustls-tls\",\"handshake\"],\"connect\":[\"stream\",\"tokio/net\",\"handshake\"],\"default\":[\"connect\",\"handshake\"],\"handshake\":[\"tungstenite/handshake\"],\"native-tls\":[\"native-tls-crate\",\"tokio-native-tls\",\"stream\",\"tungstenite/native-tls\",\"handshake\"],\"native-tls-vendored\":[\"native-tls\",\"native-tls-crate/vendored\",\"tungstenite/native-tls-vendored\"],\"proxy\":[\"tungstenite/proxy\",\"tokio/net\",\"handshake\"],\"rustls-tls-native-roots\":[\"__rustls-tls\",\"rustls-native-certs\"],\"rustls-tls-webpki-roots\":[\"__rustls-tls\",\"webpki-roots\"],\"stream\":[],\"url\":[\"tungstenite/url\"]},\"strip_prefix\":\"\"}",
"git+https://github.com/JakkuSakura/tungstenite-rs?rev=f514de8644821113e5d18a027d6d28a5c8cc0a6e#f514de8644821113e5d18a027d6d28a5c8cc0a6e_tungstenite": "{\"dependencies\":[{\"name\":\"bytes\"},{\"default_features\":true,\"features\":[],\"name\":\"data-encoding\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"http\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"httparse\",\"optional\":true},{\"name\":\"log\"},{\"default_features\":true,\"features\":[],\"name\":\"native-tls-crate\",\"optional\":true,\"package\":\"native-tls\"},{\"name\":\"rand\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"rustls\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-native-certs\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"rustls-pki-types\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"sha1\",\"optional\":true},{\"name\":\"thiserror\"},{\"default_features\":true,\"features\":[],\"name\":\"url\",\"optional\":true},{\"name\":\"utf-8\"},{\"default_features\":true,\"features\":[],\"name\":\"webpki-roots\",\"optional\":true}],\"features\":{\"__rustls-tls\":[\"rustls\",\"rustls-pki-types\"],\"default\":[\"handshake\"],\"handshake\":[\"data-encoding\",\"http\",\"httparse\",\"sha1\"],\"native-tls\":[\"native-tls-crate\"],\"native-tls-vendored\":[\"native-tls\",\"native-tls-crate/vendored\"],\"proxy\":[\"handshake\"],\"rustls-tls-native-roots\":[\"__rustls-tls\",\"rustls-native-certs\"],\"rustls-tls-webpki-roots\":[\"__rustls-tls\",\"webpki-roots\"],\"url\":[\"dep:url\"]},\"strip_prefix\":\"\"}",
"git+https://github.com/nornagon/crossterm?branch=nornagon%2Fcolor-query#87db8bfa6dc99427fd3b071681b07fc31c6ce995_crossterm": "{\"dependencies\":[{\"default_features\":true,\"features\":[],\"name\":\"bitflags\",\"optional\":false},{\"default_features\":false,\"features\":[],\"name\":\"futures-core\",\"optional\":true},{\"name\":\"parking_lot\"},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"filedescriptor\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":false,\"features\":[],\"name\":\"libc\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[\"os-poll\"],\"name\":\"mio\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":false,\"features\":[\"std\",\"stdio\",\"termios\"],\"name\":\"rustix\",\"optional\":false,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[],\"name\":\"signal-hook\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[\"support-v1_0\"],\"name\":\"signal-hook-mio\",\"optional\":true,\"target\":\"cfg(unix)\"},{\"default_features\":true,\"features\":[],\"name\":\"crossterm_winapi\",\"optional\":true,\"target\":\"cfg(windows)\"},{\"default_features\":true,\"features\":[\"winuser\",\"winerror\"],\"name\":\"winapi\",\"optional\":true,\"target\":\"cfg(windows)\"}],\"features\":{\"bracketed-paste\":[],\"default\":[\"bracketed-paste\",\"windows\",\"events\"],\"event-stream\":[\"dep:futures-core\",\"events\"],\"events\":[\"dep:mio\",\"dep:signal-hook\",\"dep:signal-hook-mio\"],\"serde\":[\"dep:serde\",\"bitflags/serde\"],\"use-dev-tty\":[\"filedescriptor\",\"rustix/process\"],\"windows\":[\"dep:winapi\",\"dep:crossterm_winapi\"]},\"strip_prefix\":\"\"}",
"git+https://github.com/nornagon/ratatui?branch=nornagon-v0.29.0-patch#9b2ad1298408c45918ee9f8241a6f95498cdbed2_ratatui": "{\"dependencies\":[{\"name\":\"bitflags\"},{\"name\":\"cassowary\"},{\"name\":\"compact_str\"},{\"default_features\":true,\"features\":[],\"name\":\"crossterm\",\"optional\":true},{\"default_features\":true,\"features\":[],\"name\":\"document-features\",\"optional\":true},{\"name\":\"indoc\"},{\"name\":\"instability\"},{\"name\":\"itertools\"},{\"name\":\"lru\"},{\"default_features\":true,\"features\":[],\"name\":\"palette\",\"optional\":true},{\"name\":\"paste\"},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true},{\"default_features\":true,\"features\":[\"derive\"],\"name\":\"strum\",\"optional\":false},{\"default_features\":true,\"features\":[],\"name\":\"termwiz\",\"optional\":true},{\"default_features\":true,\"features\":[\"local-offset\"],\"name\":\"time\",\"optional\":true},{\"name\":\"unicode-segmentation\"},{\"name\":\"unicode-truncate\"},{\"name\":\"unicode-width\"},{\"default_features\":true,\"features\":[],\"name\":\"termion\",\"optional\":true,\"target\":\"cfg(not(windows))\"}],\"features\":{\"all-widgets\":[\"widget-calendar\"],\"crossterm\":[\"dep:crossterm\"],\"default\":[\"crossterm\",\"underline-color\"],\"macros\":[],\"palette\":[\"dep:palette\"],\"scrolling-regions\":[],\"serde\":[\"dep:serde\",\"bitflags/serde\",\"compact_str/serde\"],\"termion\":[\"dep:termion\"],\"termwiz\":[\"dep:termwiz\"],\"underline-color\":[\"dep:crossterm\"],\"unstable\":[\"unstable-rendered-line-info\",\"unstable-widget-ref\",\"unstable-backend-writer\"],\"unstable-backend-writer\":[],\"unstable-rendered-line-info\":[],\"unstable-widget-ref\":[],\"widget-calendar\":[\"dep:time\"]},\"strip_prefix\":\"\"}",
"globset_0.4.16": "{\"dependencies\":[{\"name\":\"aho-corasick\",\"req\":\"^1.1.1\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"bstr\",\"req\":\"^1.6.2\"},{\"kind\":\"dev\",\"name\":\"glob\",\"req\":\"^0.3.1\"},{\"name\":\"log\",\"optional\":true,\"req\":\"^0.4.20\"},{\"default_features\":false,\"features\":[\"std\",\"perf\",\"syntax\",\"meta\",\"nfa\",\"hybrid\"],\"name\":\"regex-automata\",\"req\":\"^0.4.0\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"regex-syntax\",\"req\":\"^0.8.0\"},{\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.188\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.107\"}],\"features\":{\"default\":[\"log\"],\"serde1\":[\"serde\"],\"simd-accel\":[]}}",
@@ -834,6 +614,7 @@
"jni_0.21.1": "{\"dependencies\":[{\"name\":\"cesu8\",\"req\":\"^1.1.0\"},{\"name\":\"cfg-if\",\"req\":\"^1.0.0\"},{\"name\":\"combine\",\"req\":\"^4.1.0\"},{\"name\":\"java-locator\",\"optional\":true,\"req\":\"^0.1\"},{\"name\":\"jni-sys\",\"req\":\"^0.3.0\"},{\"name\":\"libloading\",\"optional\":true,\"req\":\"^0.7\"},{\"name\":\"log\",\"req\":\"^0.4.4\"},{\"name\":\"thiserror\",\"req\":\"^1.0.20\"},{\"kind\":\"dev\",\"name\":\"assert_matches\",\"req\":\"^1.5.0\"},{\"kind\":\"dev\",\"name\":\"lazy_static\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"rusty-fork\",\"req\":\"^0.3.0\"},{\"kind\":\"build\",\"name\":\"walkdir\",\"req\":\"^2\"},{\"features\":[\"Win32_Globalization\"],\"name\":\"windows-sys\",\"req\":\"^0.45.0\",\"target\":\"cfg(windows)\"},{\"kind\":\"dev\",\"name\":\"bytemuck\",\"req\":\"^1.13.0\",\"target\":\"cfg(windows)\"}],\"features\":{\"default\":[],\"invocation\":[\"java-locator\",\"libloading\"]}}",
"jobserver_0.1.34": "{\"dependencies\":[{\"features\":[\"std\"],\"name\":\"getrandom\",\"req\":\"^0.3.2\",\"target\":\"cfg(windows)\"},{\"name\":\"libc\",\"req\":\"^0.2.171\",\"target\":\"cfg(unix)\"},{\"features\":[\"fs\"],\"kind\":\"dev\",\"name\":\"nix\",\"req\":\"^0.28.0\",\"target\":\"cfg(unix)\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.10.1\"}],\"features\":{}}",
"js-sys_0.3.77": "{\"dependencies\":[{\"default_features\":false,\"name\":\"once_cell\",\"req\":\"^1.12\"},{\"default_features\":false,\"name\":\"wasm-bindgen\",\"req\":\"=0.2.100\"}],\"features\":{\"default\":[\"std\"],\"std\":[\"wasm-bindgen/std\"]}}",
"kasuari_0.4.11": "{\"dependencies\":[{\"name\":\"document-features\",\"optional\":true,\"req\":\"^0.2\"},{\"name\":\"hashbrown\",\"req\":\"^0.16\"},{\"default_features\":false,\"features\":[\"require-cas\"],\"name\":\"portable-atomic\",\"optional\":true,\"req\":\"^1.11\"},{\"features\":[\"alloc\"],\"name\":\"portable-atomic-util\",\"optional\":true,\"req\":\"^0.2.4\"},{\"kind\":\"dev\",\"name\":\"rstest\",\"req\":\"^0.26\"},{\"default_features\":false,\"name\":\"thiserror\",\"req\":\"^2.0\"}],\"features\":{\"default\":[\"std\"],\"document-features\":[\"dep:document-features\"],\"portable-atomic\":[\"dep:portable-atomic\",\"dep:portable-atomic-util\"],\"std\":[\"thiserror/std\",\"portable-atomic?/std\"]}}",
"keyring_3.6.3": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"base64\",\"req\":\"^0.22\"},{\"name\":\"byteorder\",\"optional\":true,\"req\":\"^1.2\",\"target\":\"cfg(target_os = \\\"windows\\\")\"},{\"features\":[\"derive\",\"wrap_help\"],\"kind\":\"dev\",\"name\":\"clap\",\"req\":\"^4\"},{\"name\":\"dbus-secret-service\",\"optional\":true,\"req\":\"^4.0.0-rc.1\",\"target\":\"cfg(target_os = \\\"openbsd\\\")\"},{\"name\":\"dbus-secret-service\",\"optional\":true,\"req\":\"^4.0.0-rc.2\",\"target\":\"cfg(target_os = \\\"linux\\\")\"},{\"name\":\"dbus-secret-service\",\"optional\":true,\"req\":\"^4.0.1\",\"target\":\"cfg(target_os = \\\"freebsd\\\")\"},{\"kind\":\"dev\",\"name\":\"doc-comment\",\"req\":\"^0.3\"},{\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.11.5\"},{\"kind\":\"dev\",\"name\":\"fastrand\",\"req\":\"^2\"},{\"features\":[\"std\"],\"name\":\"linux-keyutils\",\"optional\":true,\"req\":\"^0.2\",\"target\":\"cfg(target_os = \\\"linux\\\")\"},{\"name\":\"log\",\"req\":\"^0.4.22\"},{\"name\":\"openssl\",\"optional\":true,\"req\":\"^0.10.66\"},{\"kind\":\"dev\",\"name\":\"rpassword\",\"req\":\"^7\"},{\"kind\":\"dev\",\"name\":\"rprompt\",\"req\":\"^2\"},{\"name\":\"secret-service\",\"optional\":true,\"req\":\"^4\",\"target\":\"cfg(target_os = \\\"freebsd\\\")\"},{\"name\":\"secret-service\",\"optional\":true,\"req\":\"^4\",\"target\":\"cfg(target_os = \\\"linux\\\")\"},{\"name\":\"secret-service\",\"optional\":true,\"req\":\"^4\",\"target\":\"cfg(target_os = \\\"openbsd\\\")\"},{\"name\":\"security-framework\",\"optional\":true,\"req\":\"^2\",\"target\":\"cfg(target_os = \\\"ios\\\")\"},{\"name\":\"security-framework\",\"optional\":true,\"req\":\"^3\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"kind\":\"dev\",\"name\":\"whoami\",\"req\":\"^1.5\"},{\"features\":[\"Win32_Foundation\",\"Win32_Security_Credentials\"],\"name\":\"windows-sys\",\"optional\":true,\"req\":\"^0.60\",\"target\":\"cfg(target_os = \\\"windows\\\")\"},{\"name\":\"zbus\",\"optional\":true,\"req\":\"^4\",\"target\":\"cfg(target_os = \\\"freebsd\\\")\"},{\"name\":\"zbus\",\"optional\":true,\"req\":\"^4\",\"target\":\"cfg(target_os = \\\"linux\\\")\"},{\"name\":\"zbus\",\"optional\":true,\"req\":\"^4\",\"target\":\"cfg(target_os = \\\"openbsd\\\")\"},{\"name\":\"zeroize\",\"req\":\"^1.8.1\",\"target\":\"cfg(target_os = \\\"windows\\\")\"}],\"features\":{\"apple-native\":[\"dep:security-framework\"],\"async-io\":[\"zbus?/async-io\"],\"async-secret-service\":[\"dep:secret-service\",\"dep:zbus\"],\"crypto-openssl\":[\"dbus-secret-service?/crypto-openssl\",\"secret-service?/crypto-openssl\"],\"crypto-rust\":[\"dbus-secret-service?/crypto-rust\",\"secret-service?/crypto-rust\"],\"linux-native\":[\"dep:linux-keyutils\"],\"linux-native-async-persistent\":[\"linux-native\",\"async-secret-service\"],\"linux-native-sync-persistent\":[\"linux-native\",\"sync-secret-service\"],\"sync-secret-service\":[\"dep:dbus-secret-service\"],\"tokio\":[\"zbus?/tokio\"],\"vendored\":[\"dbus-secret-service?/vendored\",\"openssl?/vendored\"],\"windows-native\":[\"dep:windows-sys\",\"dep:byteorder\"]}}",
"kqueue-sys_1.0.4": "{\"dependencies\":[{\"name\":\"bitflags\",\"req\":\"^1.2.1\"},{\"name\":\"libc\",\"req\":\"^0.2.74\"}],\"features\":{}}",
"kqueue_1.1.1": "{\"dependencies\":[{\"features\":[\"html_reports\"],\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5\"},{\"kind\":\"dev\",\"name\":\"dhat\",\"req\":\"^0.3.2\"},{\"name\":\"kqueue-sys\",\"req\":\"^1.0.4\"},{\"name\":\"libc\",\"req\":\"^0.2.17\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\"}],\"features\":{}}",
@@ -849,9 +630,10 @@
"linux-raw-sys_0.4.15": "{\"dependencies\":[{\"name\":\"compiler_builtins\",\"optional\":true,\"req\":\"^0.1.49\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"libc\",\"req\":\"^0.2.100\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1.1.0\"}],\"features\":{\"bootparam\":[],\"btrfs\":[],\"default\":[\"std\",\"general\",\"errno\"],\"elf\":[],\"elf_uapi\":[],\"errno\":[],\"general\":[],\"if_arp\":[],\"if_ether\":[],\"if_packet\":[],\"io_uring\":[],\"ioctl\":[],\"landlock\":[],\"loop_device\":[],\"mempolicy\":[],\"net\":[],\"netlink\":[],\"no_std\":[],\"prctl\":[],\"ptrace\":[],\"rustc-dep-of-std\":[\"core\",\"compiler_builtins\",\"no_std\"],\"std\":[],\"system\":[],\"xdp\":[]}}",
"linux-raw-sys_0.9.4": "{\"dependencies\":[{\"name\":\"compiler_builtins\",\"optional\":true,\"req\":\"^0.1.49\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"libc\",\"req\":\"^0.2.100\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1.1.0\"}],\"features\":{\"bootparam\":[],\"btrfs\":[],\"default\":[\"std\",\"general\",\"errno\"],\"elf\":[],\"elf_uapi\":[],\"errno\":[],\"general\":[],\"if_arp\":[],\"if_ether\":[],\"if_packet\":[],\"image\":[],\"io_uring\":[],\"ioctl\":[],\"landlock\":[],\"loop_device\":[],\"mempolicy\":[],\"net\":[],\"netlink\":[],\"no_std\":[],\"prctl\":[],\"ptrace\":[],\"rustc-dep-of-std\":[\"core\",\"compiler_builtins\",\"no_std\"],\"std\":[],\"system\":[],\"xdp\":[]}}",
"litemap_0.8.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.3.1\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5.0\",\"target\":\"cfg(not(target_arch = \\\"wasm32\\\"))\"},{\"default_features\":false,\"name\":\"databake\",\"optional\":true,\"req\":\"^0.2.0\"},{\"default_features\":false,\"features\":[\"use-std\"],\"kind\":\"dev\",\"name\":\"postcard\",\"req\":\"^1.0.3\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.9\"},{\"features\":[\"validation\"],\"kind\":\"dev\",\"name\":\"rkyv\",\"req\":\"^0.7\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.110\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0.110\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.45\"},{\"default_features\":false,\"features\":[\"derive\"],\"name\":\"yoke\",\"optional\":true,\"req\":\"^0.8.0\"}],\"features\":{\"alloc\":[],\"databake\":[\"dep:databake\"],\"default\":[\"alloc\"],\"serde\":[\"dep:serde\",\"alloc\"],\"testing\":[\"alloc\"],\"yoke\":[\"dep:yoke\"]}}",
"litrs_1.0.0": "{\"dependencies\":[{\"name\":\"proc-macro2\",\"optional\":true,\"req\":\"^1.0.63\"},{\"name\":\"unicode-xid\",\"optional\":true,\"req\":\"^0.2.4\"}],\"features\":{\"check_suffix\":[\"unicode-xid\"]}}",
"local-waker_0.1.4": "{\"dependencies\":[],\"features\":{}}",
"lock_api_0.4.13": "{\"dependencies\":[{\"kind\":\"build\",\"name\":\"autocfg\",\"req\":\"^1.1.0\"},{\"name\":\"owning_ref\",\"optional\":true,\"req\":\"^0.4.1\"},{\"default_features\":false,\"name\":\"scopeguard\",\"req\":\"^1.1.0\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.126\"}],\"features\":{\"arc_lock\":[],\"atomic_usize\":[],\"default\":[\"atomic_usize\"],\"nightly\":[]}}",
"log_0.4.29": "{\"dependencies\":[{\"default_features\":false,\"kind\":\"dev\",\"name\":\"proc-macro2\",\"req\":\"^1.0.63\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"serde_core\",\"optional\":true,\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"sval\",\"optional\":true,\"req\":\"^2.16\"},{\"kind\":\"dev\",\"name\":\"sval\",\"req\":\"^2.16\"},{\"kind\":\"dev\",\"name\":\"sval_derive\",\"req\":\"^2.16\"},{\"default_features\":false,\"name\":\"sval_ref\",\"optional\":true,\"req\":\"^2.16\"},{\"default_features\":false,\"features\":[\"inline-i128\"],\"name\":\"value-bag\",\"optional\":true,\"req\":\"^1.12\"},{\"features\":[\"test\"],\"kind\":\"dev\",\"name\":\"value-bag\",\"req\":\"^1.12\"}],\"features\":{\"kv\":[],\"kv_serde\":[\"kv_std\",\"value-bag/serde\",\"serde\"],\"kv_std\":[\"std\",\"kv\",\"value-bag/error\"],\"kv_sval\":[\"kv\",\"value-bag/sval\",\"sval\",\"sval_ref\"],\"kv_unstable\":[\"kv\",\"value-bag\"],\"kv_unstable_serde\":[\"kv_serde\",\"kv_unstable_std\"],\"kv_unstable_std\":[\"kv_std\",\"kv_unstable\"],\"kv_unstable_sval\":[\"kv_sval\",\"kv_unstable\"],\"max_level_debug\":[],\"max_level_error\":[],\"max_level_info\":[],\"max_level_off\":[],\"max_level_trace\":[],\"max_level_warn\":[],\"release_max_level_debug\":[],\"release_max_level_error\":[],\"release_max_level_info\":[],\"release_max_level_off\":[],\"release_max_level_trace\":[],\"release_max_level_warn\":[],\"serde\":[\"serde_core\"],\"std\":[]}}",
"log_0.4.28": "{\"dependencies\":[{\"default_features\":false,\"kind\":\"dev\",\"name\":\"proc-macro2\",\"req\":\"^1.0.63\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_test\",\"req\":\"^1.0\"},{\"default_features\":false,\"name\":\"sval\",\"optional\":true,\"req\":\"^2.14.1\"},{\"kind\":\"dev\",\"name\":\"sval\",\"req\":\"^2.1\"},{\"kind\":\"dev\",\"name\":\"sval_derive\",\"req\":\"^2.1\"},{\"default_features\":false,\"name\":\"sval_ref\",\"optional\":true,\"req\":\"^2.1\"},{\"default_features\":false,\"features\":[\"inline-i128\"],\"name\":\"value-bag\",\"optional\":true,\"req\":\"^1.7\"},{\"features\":[\"test\"],\"kind\":\"dev\",\"name\":\"value-bag\",\"req\":\"^1.7\"}],\"features\":{\"kv\":[],\"kv_serde\":[\"kv_std\",\"value-bag/serde\",\"serde\"],\"kv_std\":[\"std\",\"kv\",\"value-bag/error\"],\"kv_sval\":[\"kv\",\"value-bag/sval\",\"sval\",\"sval_ref\"],\"kv_unstable\":[\"kv\",\"value-bag\"],\"kv_unstable_serde\":[\"kv_serde\",\"kv_unstable_std\"],\"kv_unstable_std\":[\"kv_std\",\"kv_unstable\"],\"kv_unstable_sval\":[\"kv_sval\",\"kv_unstable\"],\"max_level_debug\":[],\"max_level_error\":[],\"max_level_info\":[],\"max_level_off\":[],\"max_level_trace\":[],\"max_level_warn\":[],\"release_max_level_debug\":[],\"release_max_level_error\":[],\"release_max_level_info\":[],\"release_max_level_off\":[],\"release_max_level_trace\":[],\"release_max_level_warn\":[],\"std\":[]}}",
"logos-derive_0.12.1": "{\"dependencies\":[{\"name\":\"beef\",\"req\":\"^0.5.0\"},{\"name\":\"fnv\",\"req\":\"^1.0.6\"},{\"kind\":\"dev\",\"name\":\"pretty_assertions\",\"req\":\"^0.6.1\"},{\"name\":\"proc-macro2\",\"req\":\"^1.0.9\"},{\"name\":\"quote\",\"req\":\"^1.0.3\"},{\"name\":\"regex-syntax\",\"req\":\"^0.6\"},{\"features\":[\"full\"],\"name\":\"syn\",\"req\":\"^1.0.17\"}],\"features\":{}}",
"logos_0.12.1": "{\"dependencies\":[{\"name\":\"logos-derive\",\"optional\":true,\"req\":\"^0.12.1\"}],\"features\":{\"default\":[\"export_derive\",\"std\"],\"export_derive\":[\"logos-derive\"],\"std\":[]}}",
"lru-slab_0.1.2": "{\"dependencies\":[],\"features\":{}}",
@@ -979,6 +761,7 @@
"rand_core_0.6.4": "{\"dependencies\":[{\"name\":\"getrandom\",\"optional\":true,\"req\":\"^0.2\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"alloc\":[],\"serde1\":[\"serde\"],\"std\":[\"alloc\",\"getrandom\",\"getrandom/std\"]}}",
"rand_core_0.9.3": "{\"dependencies\":[{\"name\":\"getrandom\",\"optional\":true,\"req\":\"^0.3.0\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"os_rng\":[\"dep:getrandom\"],\"serde\":[\"dep:serde\"],\"std\":[\"getrandom?/std\"]}}",
"rand_xorshift_0.4.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1\"},{\"name\":\"rand_core\",\"req\":\"^0.9.0\"},{\"default_features\":false,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.118\"}],\"features\":{\"serde\":[\"dep:serde\"]}}",
"ratatui-core_0.1.0": "{\"dependencies\":[{\"name\":\"anstyle\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"bitflags\",\"req\":\"^2.10\"},{\"default_features\":false,\"name\":\"compact_str\",\"req\":\"^0.9\"},{\"name\":\"document-features\",\"optional\":true,\"req\":\"^0.2\"},{\"name\":\"hashbrown\",\"req\":\"^0.16\"},{\"name\":\"indoc\",\"req\":\"^2\"},{\"default_features\":false,\"features\":[\"use_alloc\"],\"name\":\"itertools\",\"req\":\"^0.14\"},{\"default_features\":false,\"name\":\"kasuari\",\"req\":\"^0.4\"},{\"name\":\"lru\",\"req\":\"^0.16\"},{\"name\":\"palette\",\"optional\":true,\"req\":\"^0.7\"},{\"kind\":\"dev\",\"name\":\"pretty_assertions\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"rstest\",\"req\":\"^0.26\"},{\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"derive\"],\"name\":\"strum\",\"req\":\"^0.27\"},{\"default_features\":false,\"name\":\"thiserror\",\"req\":\"^2\"},{\"name\":\"unicode-segmentation\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"unicode-truncate\",\"req\":\"^2\"},{\"name\":\"unicode-width\",\"req\":\">=0.2.0, <=0.2.2\"}],\"features\":{\"anstyle\":[\"dep:anstyle\"],\"default\":[],\"layout-cache\":[\"std\"],\"palette\":[\"std\",\"dep:palette\"],\"portable-atomic\":[\"kasuari/portable-atomic\"],\"scrolling-regions\":[],\"serde\":[\"std\",\"dep:serde\",\"bitflags/serde\",\"compact_str/serde\"],\"std\":[\"itertools/use_std\",\"thiserror/std\",\"kasuari/std\",\"compact_str/std\",\"unicode-truncate/std\",\"strum/std\"],\"underline-color\":[]}}",
"ratatui-macros_0.6.0": "{\"dependencies\":[{\"features\":[\"user-hooks\"],\"kind\":\"dev\",\"name\":\"cargo-husky\",\"req\":\"^1.5.0\"},{\"name\":\"ratatui\",\"req\":\"^0.29.0\"},{\"features\":[\"diff\"],\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.101\"}],\"features\":{}}",
"redox_syscall_0.5.15": "{\"dependencies\":[{\"name\":\"bitflags\",\"req\":\"^2.4\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"loom\",\"req\":\"^0.7\",\"target\":\"cfg(loom)\"}],\"features\":{\"default\":[\"userspace\"],\"rustc-dep-of-std\":[\"core\",\"bitflags/rustc-dep-of-std\"],\"std\":[],\"userspace\":[]}}",
"redox_users_0.4.6": "{\"dependencies\":[{\"features\":[\"std\"],\"name\":\"getrandom\",\"req\":\"^0.2\"},{\"default_features\":false,\"features\":[\"std\",\"call\"],\"name\":\"libredox\",\"req\":\"^0.1.3\"},{\"name\":\"rust-argon2\",\"optional\":true,\"req\":\"^0.8\"},{\"name\":\"thiserror\",\"req\":\"^1.0\"},{\"features\":[\"zeroize_derive\"],\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1.4\"}],\"features\":{\"auth\":[\"rust-argon2\",\"zeroize\"],\"default\":[\"auth\"]}}",
@@ -1124,8 +907,9 @@
"tokio-rustls_0.26.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"argh\",\"req\":\"^0.1.1\"},{\"kind\":\"dev\",\"name\":\"futures-util\",\"req\":\"^0.3.1\"},{\"kind\":\"dev\",\"name\":\"lazy_static\",\"req\":\"^1.1\"},{\"features\":[\"pem\"],\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.13\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"rustls\",\"req\":\"^0.23.22\"},{\"name\":\"tokio\",\"req\":\"^1.0\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"webpki-roots\",\"req\":\"^0.26\"}],\"features\":{\"aws-lc-rs\":[\"aws_lc_rs\"],\"aws_lc_rs\":[\"rustls/aws_lc_rs\"],\"default\":[\"logging\",\"tls12\",\"aws_lc_rs\"],\"early-data\":[],\"fips\":[\"rustls/fips\"],\"logging\":[\"rustls/logging\"],\"ring\":[\"rustls/ring\"],\"tls12\":[\"rustls/tls12\"]}}",
"tokio-stream_0.1.18": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3\"},{\"name\":\"futures-core\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"req\":\"^1.15.0\"},{\"features\":[\"full\",\"test-util\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.2.0\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4\"},{\"name\":\"tokio-util\",\"optional\":true,\"req\":\"^0.7.0\"}],\"features\":{\"default\":[\"time\"],\"fs\":[\"tokio/fs\"],\"full\":[\"time\",\"net\",\"io-util\",\"fs\",\"sync\",\"signal\"],\"io-util\":[\"tokio/io-util\"],\"net\":[\"tokio/net\"],\"signal\":[\"tokio/signal\"],\"sync\":[\"tokio/sync\",\"tokio-util\"],\"time\":[\"tokio/time\"]}}",
"tokio-test_0.4.4": "{\"dependencies\":[{\"name\":\"async-stream\",\"req\":\"^0.3.3\"},{\"name\":\"bytes\",\"req\":\"^1.0.0\"},{\"name\":\"futures-core\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-util\",\"req\":\"^0.3.0\"},{\"features\":[\"rt\",\"sync\",\"time\",\"test-util\"],\"name\":\"tokio\",\"req\":\"^1.2.0\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.2.0\"},{\"name\":\"tokio-stream\",\"req\":\"^0.1.1\"}],\"features\":{}}",
"tokio-tungstenite_0.21.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.10.0\"},{\"kind\":\"dev\",\"name\":\"futures-channel\",\"req\":\"^0.3.28\"},{\"default_features\":false,\"features\":[\"sink\",\"std\"],\"name\":\"futures-util\",\"req\":\"^0.3.28\"},{\"default_features\":false,\"features\":[\"http1\",\"server\",\"tcp\"],\"kind\":\"dev\",\"name\":\"hyper\",\"req\":\"^0.14.25\"},{\"name\":\"log\",\"req\":\"^0.4.17\"},{\"name\":\"native-tls-crate\",\"optional\":true,\"package\":\"native-tls\",\"req\":\"^0.2.11\"},{\"name\":\"rustls\",\"optional\":true,\"req\":\"^0.22.0\"},{\"name\":\"rustls-native-certs\",\"optional\":true,\"req\":\"^0.7.0\"},{\"name\":\"rustls-pki-types\",\"optional\":true,\"req\":\"^1.0\"},{\"default_features\":false,\"features\":[\"io-util\"],\"name\":\"tokio\",\"req\":\"^1.0.0\"},{\"default_features\":false,\"features\":[\"io-std\",\"macros\",\"net\",\"rt-multi-thread\",\"time\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.27.0\"},{\"name\":\"tokio-native-tls\",\"optional\":true,\"req\":\"^0.3.1\"},{\"name\":\"tokio-rustls\",\"optional\":true,\"req\":\"^0.25.0\"},{\"default_features\":false,\"name\":\"tungstenite\",\"req\":\"^0.21.0\"},{\"kind\":\"dev\",\"name\":\"url\",\"req\":\"^2.3.1\"},{\"name\":\"webpki-roots\",\"optional\":true,\"req\":\"^0.26.0\"}],\"features\":{\"__rustls-tls\":[\"rustls\",\"rustls-pki-types\",\"tokio-rustls\",\"stream\",\"tungstenite/__rustls-tls\",\"handshake\"],\"connect\":[\"stream\",\"tokio/net\",\"handshake\"],\"default\":[\"connect\",\"handshake\"],\"handshake\":[\"tungstenite/handshake\"],\"native-tls\":[\"native-tls-crate\",\"tokio-native-tls\",\"stream\",\"tungstenite/native-tls\",\"handshake\"],\"native-tls-vendored\":[\"native-tls\",\"native-tls-crate/vendored\",\"tungstenite/native-tls-vendored\"],\"rustls-tls-native-roots\":[\"__rustls-tls\",\"rustls-native-certs\"],\"rustls-tls-webpki-roots\":[\"__rustls-tls\",\"webpki-roots\"],\"stream\":[]}}",
"tokio-util_0.7.18": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3.0\"},{\"name\":\"bytes\",\"req\":\"^1.5.0\"},{\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3.0\"},{\"name\":\"futures-core\",\"req\":\"^0.3.0\"},{\"name\":\"futures-io\",\"optional\":true,\"req\":\"^0.3.0\"},{\"name\":\"futures-sink\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-test\",\"req\":\"^0.3.5\"},{\"name\":\"futures-util\",\"optional\":true,\"req\":\"^0.3.0\"},{\"default_features\":false,\"name\":\"hashbrown\",\"optional\":true,\"req\":\"^0.15.0\"},{\"features\":[\"futures\",\"checkpoint\"],\"kind\":\"dev\",\"name\":\"loom\",\"req\":\"^0.7\",\"target\":\"cfg(loom)\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"name\":\"slab\",\"optional\":true,\"req\":\"^0.4.4\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"req\":\"^1.44.0\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"tokio-stream\",\"req\":\"^0.1\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4.0\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.29\"}],\"features\":{\"__docs_rs\":[\"futures-util\"],\"codec\":[],\"compat\":[\"futures-io\"],\"default\":[],\"full\":[\"codec\",\"compat\",\"io-util\",\"time\",\"net\",\"rt\",\"join-map\"],\"io\":[],\"io-util\":[\"io\",\"tokio/rt\",\"tokio/io-util\"],\"join-map\":[\"rt\",\"hashbrown\"],\"net\":[\"tokio/net\"],\"rt\":[\"tokio/rt\",\"tokio/sync\",\"futures-util\"],\"time\":[\"tokio/time\",\"slab\"]}}",
"tokio_1.49.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3\"},{\"name\":\"backtrace\",\"optional\":true,\"req\":\"^0.3.58\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1.2.1\"},{\"features\":[\"async-await\"],\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-concurrency\",\"req\":\"^7.6.3\"},{\"kind\":\"dev\",\"name\":\"futures-test\",\"req\":\"^0.3.31\"},{\"default_features\":false,\"name\":\"io-uring\",\"optional\":true,\"req\":\"^0.7.6\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.168\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.168\",\"target\":\"cfg(unix)\"},{\"kind\":\"dev\",\"name\":\"libc\",\"req\":\"^0.2.168\",\"target\":\"cfg(unix)\"},{\"features\":[\"futures\",\"checkpoint\"],\"kind\":\"dev\",\"name\":\"loom\",\"req\":\"^0.7\",\"target\":\"cfg(loom)\"},{\"default_features\":false,\"name\":\"mio\",\"optional\":true,\"req\":\"^1.0.1\"},{\"default_features\":false,\"features\":[\"os-poll\",\"os-ext\"],\"name\":\"mio\",\"optional\":true,\"req\":\"^1.0.1\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"features\":[\"tokio\"],\"kind\":\"dev\",\"name\":\"mio-aio\",\"req\":\"^1\",\"target\":\"cfg(target_os = \\\"freebsd\\\")\"},{\"kind\":\"dev\",\"name\":\"mockall\",\"req\":\"^0.13.0\"},{\"default_features\":false,\"features\":[\"aio\",\"fs\",\"socket\"],\"kind\":\"dev\",\"name\":\"nix\",\"req\":\"^0.29.0\",\"target\":\"cfg(unix)\"},{\"name\":\"parking_lot\",\"optional\":true,\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.9\",\"target\":\"cfg(not(all(target_family = \\\"wasm\\\", target_os = \\\"unknown\\\")))\"},{\"name\":\"signal-hook-registry\",\"optional\":true,\"req\":\"^1.1.1\",\"target\":\"cfg(unix)\"},{\"name\":\"slab\",\"optional\":true,\"req\":\"^0.4.9\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"features\":[\"all\"],\"name\":\"socket2\",\"optional\":true,\"req\":\"^0.6.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"socket2\",\"req\":\"^0.6.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"name\":\"tokio-macros\",\"optional\":true,\"req\":\"~2.6.0\"},{\"kind\":\"dev\",\"name\":\"tokio-stream\",\"req\":\"^0.1\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4.0\"},{\"features\":[\"rt\"],\"kind\":\"dev\",\"name\":\"tokio-util\",\"req\":\"^0.7\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.29\",\"target\":\"cfg(tokio_unstable)\"},{\"kind\":\"dev\",\"name\":\"tracing-mock\",\"req\":\"=0.1.0-beta.1\",\"target\":\"cfg(all(tokio_unstable, target_has_atomic = \\\"64\\\"))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3.0\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", not(target_os = \\\"wasi\\\")))\"},{\"name\":\"windows-sys\",\"optional\":true,\"req\":\"^0.61\",\"target\":\"cfg(windows)\"},{\"features\":[\"Win32_Foundation\",\"Win32_Security_Authorization\"],\"kind\":\"dev\",\"name\":\"windows-sys\",\"req\":\"^0.61\",\"target\":\"cfg(windows)\"}],\"features\":{\"default\":[],\"fs\":[],\"full\":[\"fs\",\"io-util\",\"io-std\",\"macros\",\"net\",\"parking_lot\",\"process\",\"rt\",\"rt-multi-thread\",\"signal\",\"sync\",\"time\"],\"io-std\":[],\"io-uring\":[\"dep:io-uring\",\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"dep:slab\"],\"io-util\":[\"bytes\"],\"macros\":[\"tokio-macros\"],\"net\":[\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"mio/net\",\"socket2\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_Security\",\"windows-sys/Win32_Storage_FileSystem\",\"windows-sys/Win32_System_Pipes\",\"windows-sys/Win32_System_SystemServices\"],\"process\":[\"bytes\",\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"mio/net\",\"signal-hook-registry\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_System_Threading\",\"windows-sys/Win32_System_WindowsProgramming\"],\"rt\":[],\"rt-multi-thread\":[\"rt\"],\"signal\":[\"libc\",\"mio/os-poll\",\"mio/net\",\"mio/os-ext\",\"signal-hook-registry\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_System_Console\"],\"sync\":[],\"taskdump\":[\"dep:backtrace\"],\"test-util\":[\"rt\",\"sync\",\"time\"],\"time\":[]}}",
"tokio_1.48.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3\"},{\"name\":\"backtrace\",\"optional\":true,\"req\":\"^0.3.58\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1.2.1\"},{\"features\":[\"async-await\"],\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-concurrency\",\"req\":\"^7.6.3\"},{\"default_features\":false,\"name\":\"io-uring\",\"optional\":true,\"req\":\"^0.7.6\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.168\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.168\",\"target\":\"cfg(unix)\"},{\"kind\":\"dev\",\"name\":\"libc\",\"req\":\"^0.2.168\",\"target\":\"cfg(unix)\"},{\"features\":[\"futures\",\"checkpoint\"],\"kind\":\"dev\",\"name\":\"loom\",\"req\":\"^0.7\",\"target\":\"cfg(loom)\"},{\"default_features\":false,\"name\":\"mio\",\"optional\":true,\"req\":\"^1.0.1\"},{\"default_features\":false,\"features\":[\"os-poll\",\"os-ext\"],\"name\":\"mio\",\"optional\":true,\"req\":\"^1.0.1\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"features\":[\"tokio\"],\"kind\":\"dev\",\"name\":\"mio-aio\",\"req\":\"^1\",\"target\":\"cfg(target_os = \\\"freebsd\\\")\"},{\"kind\":\"dev\",\"name\":\"mockall\",\"req\":\"^0.13.0\"},{\"default_features\":false,\"features\":[\"aio\",\"fs\",\"socket\"],\"kind\":\"dev\",\"name\":\"nix\",\"req\":\"^0.29.0\",\"target\":\"cfg(unix)\"},{\"name\":\"parking_lot\",\"optional\":true,\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.9\",\"target\":\"cfg(not(all(target_family = \\\"wasm\\\", target_os = \\\"unknown\\\")))\"},{\"name\":\"signal-hook-registry\",\"optional\":true,\"req\":\"^1.1.1\",\"target\":\"cfg(unix)\"},{\"name\":\"slab\",\"optional\":true,\"req\":\"^0.4.9\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"features\":[\"all\"],\"name\":\"socket2\",\"optional\":true,\"req\":\"^0.6.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"socket2\",\"req\":\"^0.6.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"name\":\"tokio-macros\",\"optional\":true,\"req\":\"~2.6.0\"},{\"kind\":\"dev\",\"name\":\"tokio-stream\",\"req\":\"^0.1\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4.0\"},{\"features\":[\"rt\"],\"kind\":\"dev\",\"name\":\"tokio-util\",\"req\":\"^0.7\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.29\",\"target\":\"cfg(tokio_unstable)\"},{\"kind\":\"dev\",\"name\":\"tracing-mock\",\"req\":\"=0.1.0-beta.1\",\"target\":\"cfg(all(tokio_unstable, target_has_atomic = \\\"64\\\"))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3.0\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", not(target_os = \\\"wasi\\\")))\"},{\"name\":\"windows-sys\",\"optional\":true,\"req\":\"^0.61\",\"target\":\"cfg(windows)\"},{\"features\":[\"Win32_Foundation\",\"Win32_Security_Authorization\"],\"kind\":\"dev\",\"name\":\"windows-sys\",\"req\":\"^0.61\",\"target\":\"cfg(windows)\"}],\"features\":{\"default\":[],\"fs\":[],\"full\":[\"fs\",\"io-util\",\"io-std\",\"macros\",\"net\",\"parking_lot\",\"process\",\"rt\",\"rt-multi-thread\",\"signal\",\"sync\",\"time\"],\"io-std\":[],\"io-uring\":[\"dep:io-uring\",\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"dep:slab\"],\"io-util\":[\"bytes\"],\"macros\":[\"tokio-macros\"],\"net\":[\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"mio/net\",\"socket2\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_Security\",\"windows-sys/Win32_Storage_FileSystem\",\"windows-sys/Win32_System_Pipes\",\"windows-sys/Win32_System_SystemServices\"],\"process\":[\"bytes\",\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"mio/net\",\"signal-hook-registry\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_System_Threading\",\"windows-sys/Win32_System_WindowsProgramming\"],\"rt\":[],\"rt-multi-thread\":[\"rt\"],\"signal\":[\"libc\",\"mio/os-poll\",\"mio/net\",\"mio/os-ext\",\"signal-hook-registry\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_System_Console\"],\"sync\":[],\"taskdump\":[\"dep:backtrace\"],\"test-util\":[\"rt\",\"sync\",\"time\"],\"time\":[]}}",
"toml_0.5.11": "{\"dependencies\":[{\"name\":\"indexmap\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"serde\",\"req\":\"^1.0.97\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"}],\"features\":{\"default\":[],\"preserve_order\":[\"indexmap\"]}}",
"toml_0.9.5": "{\"dependencies\":[{\"name\":\"anstream\",\"optional\":true,\"req\":\"^0.6.15\"},{\"name\":\"anstyle\",\"optional\":true,\"req\":\"^1.0.8\"},{\"default_features\":false,\"name\":\"foldhash\",\"optional\":true,\"req\":\"^0.1.5\"},{\"default_features\":false,\"name\":\"indexmap\",\"optional\":true,\"req\":\"^2.3.0\"},{\"kind\":\"dev\",\"name\":\"itertools\",\"req\":\"^0.14.0\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.145\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0.199\"},{\"kind\":\"dev\",\"name\":\"serde-untagged\",\"req\":\"^0.1.7\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.116\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"serde_spanned\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.0\"},{\"kind\":\"dev\",\"name\":\"toml-test-data\",\"req\":\"^2.3.0\"},{\"features\":[\"snapshot\"],\"kind\":\"dev\",\"name\":\"toml-test-harness\",\"req\":\"^1.3.2\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"toml_datetime\",\"req\":\"^0.7.0\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"toml_parser\",\"optional\":true,\"req\":\"^1.0.2\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"toml_writer\",\"optional\":true,\"req\":\"^1.0.2\"},{\"kind\":\"dev\",\"name\":\"walkdir\",\"req\":\"^2.5.0\"},{\"default_features\":false,\"name\":\"winnow\",\"optional\":true,\"req\":\"^0.7.10\"}],\"features\":{\"debug\":[\"std\",\"toml_parser?/debug\",\"dep:anstream\",\"dep:anstyle\"],\"default\":[\"std\",\"serde\",\"parse\",\"display\"],\"display\":[\"dep:toml_writer\"],\"fast_hash\":[\"preserve_order\",\"dep:foldhash\"],\"parse\":[\"dep:toml_parser\",\"dep:winnow\"],\"preserve_order\":[\"dep:indexmap\",\"std\"],\"serde\":[\"dep:serde\",\"toml_datetime/serde\",\"serde_spanned/serde\"],\"std\":[\"indexmap?/std\",\"serde?/std\",\"toml_parser?/std\",\"toml_writer?/std\",\"toml_datetime/std\",\"serde_spanned/std\"],\"unbounded\":[]}}",
"toml_datetime_0.7.5+spec-1.1.0": "{\"dependencies\":[{\"default_features\":false,\"name\":\"serde_core\",\"optional\":true,\"req\":\"^1.0.225\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.21\"}],\"features\":{\"alloc\":[\"serde_core?/alloc\"],\"default\":[\"std\"],\"serde\":[\"dep:serde_core\"],\"std\":[\"alloc\",\"serde_core?/std\"]}}",
@@ -1157,6 +941,8 @@
"try-lock_0.2.5": "{\"dependencies\":[],\"features\":{}}",
"ts-rs-macros_11.1.0": "{\"dependencies\":[{\"name\":\"proc-macro2\",\"req\":\"^1\"},{\"name\":\"quote\",\"req\":\"^1\"},{\"features\":[\"full\",\"extra-traits\"],\"name\":\"syn\",\"req\":\"^2.0.28\"},{\"name\":\"termcolor\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"no-serde-warnings\":[],\"serde-compat\":[\"termcolor\"]}}",
"ts-rs_11.1.0": "{\"dependencies\":[{\"features\":[\"serde\"],\"name\":\"bigdecimal\",\"optional\":true,\"req\":\">=0.0.13, <0.5\"},{\"name\":\"bson\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"chrono\",\"optional\":true,\"req\":\"^0.4\"},{\"features\":[\"serde\"],\"kind\":\"dev\",\"name\":\"chrono\",\"req\":\"^0.4\"},{\"name\":\"dprint-plugin-typescript\",\"optional\":true,\"req\":\"=0.95\"},{\"name\":\"heapless\",\"optional\":true,\"req\":\">=0.7, <0.9\"},{\"name\":\"indexmap\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"ordered-float\",\"optional\":true,\"req\":\">=3, <6\"},{\"name\":\"semver\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"name\":\"serde_json\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"name\":\"smol_str\",\"optional\":true,\"req\":\"^0.3\"},{\"name\":\"thiserror\",\"req\":\"^2\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"sync\",\"rt\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.40\"},{\"name\":\"ts-rs-macros\",\"req\":\"=11.1.0\"},{\"name\":\"url\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"uuid\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"bigdecimal-impl\":[\"bigdecimal\"],\"bson-uuid-impl\":[\"bson\"],\"bytes-impl\":[\"bytes\"],\"chrono-impl\":[\"chrono\"],\"default\":[\"serde-compat\"],\"format\":[\"dprint-plugin-typescript\"],\"heapless-impl\":[\"heapless\"],\"import-esm\":[],\"indexmap-impl\":[\"indexmap\"],\"no-serde-warnings\":[\"ts-rs-macros/no-serde-warnings\"],\"ordered-float-impl\":[\"ordered-float\"],\"semver-impl\":[\"semver\"],\"serde-compat\":[\"ts-rs-macros/serde-compat\"],\"serde-json-impl\":[\"serde_json\"],\"smol_str-impl\":[\"smol_str\"],\"tokio-impl\":[\"tokio\"],\"url-impl\":[\"url\"],\"uuid-impl\":[\"uuid\"]}}",
"tui-scrollbar_0.2.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"color-eyre\",\"req\":\"^0.6\"},{\"name\":\"crossterm_0_28\",\"optional\":true,\"package\":\"crossterm\",\"req\":\"^0.28\"},{\"name\":\"crossterm_0_29\",\"optional\":true,\"package\":\"crossterm\",\"req\":\"^0.29\"},{\"name\":\"document-features\",\"req\":\"^0.2.11\"},{\"kind\":\"dev\",\"name\":\"ratatui\",\"req\":\"^0.30.0\"},{\"name\":\"ratatui-core\",\"req\":\"^0.1\"}],\"features\":{\"crossterm\":[\"crossterm_0_29\"],\"crossterm_0_28\":[\"dep:crossterm_0_28\"],\"crossterm_0_29\":[\"dep:crossterm_0_29\"],\"default\":[]}}",
"tungstenite_0.21.0": "{\"dependencies\":[{\"name\":\"byteorder\",\"req\":\"^1.3.2\"},{\"name\":\"bytes\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5.0\"},{\"name\":\"data-encoding\",\"optional\":true,\"req\":\"^2\"},{\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.10.0\"},{\"name\":\"http\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"httparse\",\"optional\":true,\"req\":\"^1.3.4\"},{\"kind\":\"dev\",\"name\":\"input_buffer\",\"req\":\"^0.5.0\"},{\"name\":\"log\",\"req\":\"^0.4.8\"},{\"name\":\"native-tls-crate\",\"optional\":true,\"package\":\"native-tls\",\"req\":\"^0.2.3\"},{\"name\":\"rand\",\"req\":\"^0.8.0\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8.4\"},{\"name\":\"rustls\",\"optional\":true,\"req\":\"^0.22.0\"},{\"name\":\"rustls-native-certs\",\"optional\":true,\"req\":\"^0.7.0\"},{\"name\":\"rustls-pki-types\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"sha1\",\"optional\":true,\"req\":\"^0.10\"},{\"kind\":\"dev\",\"name\":\"socket2\",\"req\":\"^0.5.5\"},{\"name\":\"thiserror\",\"req\":\"^1.0.23\"},{\"name\":\"url\",\"optional\":true,\"req\":\"^2.1.0\"},{\"name\":\"utf-8\",\"req\":\"^0.7.5\"},{\"name\":\"webpki-roots\",\"optional\":true,\"req\":\"^0.26\"}],\"features\":{\"__rustls-tls\":[\"rustls\",\"rustls-pki-types\"],\"default\":[\"handshake\"],\"handshake\":[\"data-encoding\",\"http\",\"httparse\",\"sha1\",\"url\"],\"native-tls\":[\"native-tls-crate\"],\"native-tls-vendored\":[\"native-tls\",\"native-tls-crate/vendored\"],\"rustls-tls-native-roots\":[\"__rustls-tls\",\"rustls-native-certs\"],\"rustls-tls-webpki-roots\":[\"__rustls-tls\",\"webpki-roots\"]}}",
"typenum_1.18.0": "{\"dependencies\":[{\"default_features\":false,\"name\":\"scale-info\",\"optional\":true,\"req\":\"^1.0\"}],\"features\":{\"const-generics\":[],\"force_unix_path_separator\":[],\"i128\":[],\"no_std\":[],\"scale_info\":[\"scale-info/derive\"],\"strict\":[]}}",
"uds_windows_1.1.0": "{\"dependencies\":[{\"name\":\"memoffset\",\"req\":\"^0.9.0\"},{\"name\":\"tempfile\",\"req\":\"^3\",\"target\":\"cfg(windows)\"},{\"features\":[\"winsock2\",\"ws2def\",\"minwinbase\",\"ntdef\",\"processthreadsapi\",\"handleapi\",\"ws2tcpip\",\"winbase\"],\"name\":\"winapi\",\"req\":\"^0.3.9\",\"target\":\"cfg(windows)\"}],\"features\":{}}",
"uname_0.1.1": "{\"dependencies\":[{\"name\":\"libc\",\"req\":\"^0.2\"}],\"features\":{}}",
@@ -1166,6 +952,7 @@
"unicode-linebreak_0.1.5": "{\"dependencies\":[],\"features\":{}}",
"unicode-segmentation_1.12.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5\"},{\"kind\":\"dev\",\"name\":\"quickcheck\",\"req\":\"^0.7\"}],\"features\":{\"no_std\":[]}}",
"unicode-truncate_1.1.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5\"},{\"default_features\":false,\"name\":\"itertools\",\"req\":\"^0.13\"},{\"default_features\":false,\"name\":\"unicode-segmentation\",\"req\":\"^1\"},{\"name\":\"unicode-width\",\"req\":\"^0.1\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"unicode-truncate_2.0.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5\"},{\"default_features\":false,\"name\":\"itertools\",\"req\":\"^0.13\"},{\"default_features\":false,\"name\":\"unicode-segmentation\",\"req\":\"^1\"},{\"name\":\"unicode-width\",\"req\":\"^0.2\"}],\"features\":{\"default\":[\"std\"],\"std\":[]}}",
"unicode-width_0.1.14": "{\"dependencies\":[{\"name\":\"compiler_builtins\",\"optional\":true,\"req\":\"^0.1\"},{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0\"},{\"name\":\"std\",\"optional\":true,\"package\":\"rustc-std-workspace-std\",\"req\":\"^1.0\"}],\"features\":{\"cjk\":[],\"default\":[\"cjk\"],\"no_std\":[],\"rustc-dep-of-std\":[\"std\",\"core\",\"compiler_builtins\"]}}",
"unicode-width_0.2.1": "{\"dependencies\":[{\"name\":\"core\",\"optional\":true,\"package\":\"rustc-std-workspace-core\",\"req\":\"^1.0\"},{\"name\":\"std\",\"optional\":true,\"package\":\"rustc-std-workspace-std\",\"req\":\"^1.0\"}],\"features\":{\"cjk\":[],\"default\":[\"cjk\"],\"no_std\":[],\"rustc-dep-of-std\":[\"std\",\"core\"]}}",
"unicode-xid_0.2.6": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.3\"}],\"features\":{\"bench\":[],\"default\":[],\"no_std\":[]}}",
@@ -1206,7 +993,6 @@
"web-time_1.1.0": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"futures-channel\",\"req\":\"^0.3\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", target_feature = \\\"atomics\\\"))\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"futures-util\",\"req\":\"^0.3\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", target_feature = \\\"atomics\\\"))\"},{\"features\":[\"js\"],\"kind\":\"dev\",\"name\":\"getrandom\",\"req\":\"^0.2\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"},{\"name\":\"js-sys\",\"req\":\"^0.3.20\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", target_os = \\\"unknown\\\"))\"},{\"features\":[\"macro\"],\"kind\":\"dev\",\"name\":\"pollster\",\"req\":\"^0.3\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"},{\"name\":\"serde\",\"optional\":true,\"req\":\"^1\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", target_os = \\\"unknown\\\"))\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1\"},{\"default_features\":false,\"name\":\"wasm-bindgen\",\"req\":\"^0.2.70\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", target_os = \\\"unknown\\\"))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-futures\",\"req\":\"^0.4\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"},{\"features\":[\"WorkerGlobalScope\"],\"kind\":\"dev\",\"name\":\"web-sys\",\"req\":\"^0.3\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", target_feature = \\\"atomics\\\"))\"},{\"features\":[\"CssStyleDeclaration\",\"Document\",\"Element\",\"HtmlTableElement\",\"HtmlTableRowElement\",\"Performance\",\"Window\"],\"kind\":\"dev\",\"name\":\"web-sys\",\"req\":\"^0.3\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"}],\"features\":{\"serde\":[\"dep:serde\"]}}",
"webbrowser_1.0.6": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"actix-files\",\"req\":\"^0.6\"},{\"kind\":\"dev\",\"name\":\"actix-web\",\"req\":\"^4\"},{\"name\":\"core-foundation\",\"req\":\"^0.10\",\"target\":\"cfg(target_os = \\\"macos\\\")\"},{\"kind\":\"dev\",\"name\":\"crossbeam-channel\",\"req\":\"^0.5\"},{\"kind\":\"dev\",\"name\":\"env_logger\",\"req\":\"^0.9.0\"},{\"name\":\"jni\",\"req\":\"^0.21\",\"target\":\"cfg(target_os = \\\"android\\\")\"},{\"name\":\"log\",\"req\":\"^0.4\"},{\"name\":\"ndk-context\",\"req\":\"^0.1\",\"target\":\"cfg(target_os = \\\"android\\\")\"},{\"kind\":\"dev\",\"name\":\"ndk-glue\",\"req\":\">=0.3, <=0.7\",\"target\":\"cfg(target_os = \\\"android\\\")\"},{\"name\":\"objc2\",\"req\":\"^0.6\",\"target\":\"cfg(any(target_os = \\\"ios\\\", target_os = \\\"tvos\\\", target_os = \\\"visionos\\\"))\"},{\"default_features\":false,\"features\":[\"std\",\"NSDictionary\",\"NSString\",\"NSURL\"],\"name\":\"objc2-foundation\",\"req\":\"^0.3\",\"target\":\"cfg(any(target_os = \\\"ios\\\", target_os = \\\"tvos\\\", target_os = \\\"visionos\\\"))\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8\"},{\"kind\":\"dev\",\"name\":\"serial_test\",\"req\":\"^0.10\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"name\":\"url\",\"req\":\"^2\"},{\"kind\":\"dev\",\"name\":\"urlencoding\",\"req\":\"^2.1\"},{\"features\":[\"Window\"],\"name\":\"web-sys\",\"req\":\"^0.3\",\"target\":\"cfg(target_family = \\\"wasm\\\")\"}],\"features\":{\"disable-wsl\":[],\"hardened\":[],\"wasm-console\":[\"web-sys/console\"]}}",
"webpki-root-certs_1.0.4": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"^0.4.3\"},{\"kind\":\"dev\",\"name\":\"percent-encoding\",\"req\":\"^2.3\"},{\"default_features\":false,\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.8\"},{\"kind\":\"dev\",\"name\":\"ring\",\"req\":\"^0.17.0\"},{\"features\":[\"macros\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"webpki\",\"package\":\"rustls-webpki\",\"req\":\"^0.103\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.17.0\"}],\"features\":{}}",
"webpki-roots_0.26.11": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"^0.4.3\"},{\"name\":\"parent\",\"package\":\"webpki-roots\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"percent-encoding\",\"req\":\"^2.3\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.8\"},{\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.13\"},{\"kind\":\"dev\",\"name\":\"ring\",\"req\":\"^0.17.0\"},{\"kind\":\"dev\",\"name\":\"rustls\",\"req\":\"^0.23\"},{\"features\":[\"macros\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"webpki\",\"package\":\"rustls-webpki\",\"req\":\"^0.102\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.17.0\"},{\"kind\":\"dev\",\"name\":\"yasna\",\"req\":\"^0.5.2\"}],\"features\":{}}",
"webpki-roots_1.0.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"^0.4.3\"},{\"kind\":\"dev\",\"name\":\"percent-encoding\",\"req\":\"^2.3\"},{\"default_features\":false,\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.8\"},{\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.14\"},{\"kind\":\"dev\",\"name\":\"ring\",\"req\":\"^0.17.0\"},{\"kind\":\"dev\",\"name\":\"rustls\",\"req\":\"^0.23\"},{\"features\":[\"macros\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"webpki\",\"package\":\"rustls-webpki\",\"req\":\"^0.103\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.17.0\"},{\"kind\":\"dev\",\"name\":\"yasna\",\"req\":\"^0.5.2\"}],\"features\":{}}",
"weezl_0.1.10": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.3.1\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"futures\",\"optional\":true,\"req\":\"^0.3.12\"},{\"default_features\":false,\"features\":[\"macros\",\"io-util\",\"net\",\"rt\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"compat\"],\"kind\":\"dev\",\"name\":\"tokio-util\",\"req\":\"^0.6.2\"}],\"features\":{\"alloc\":[],\"async\":[\"futures\",\"std\"],\"default\":[\"std\"],\"std\":[\"alloc\"]}}",
"which_8.0.0": "{\"dependencies\":[{\"name\":\"env_home\",\"optional\":true,\"req\":\"^0.1.0\",\"target\":\"cfg(any(windows, unix, target_os = \\\"redox\\\"))\"},{\"name\":\"regex\",\"optional\":true,\"req\":\"^1.10.2\"},{\"default_features\":false,\"features\":[\"fs\",\"std\"],\"name\":\"rustix\",\"optional\":true,\"req\":\"^1.0.5\",\"target\":\"cfg(any(unix, target_os = \\\"wasi\\\", target_os = \\\"redox\\\"))\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.9.0\"},{\"default_features\":false,\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.40\"},{\"features\":[\"kernel\"],\"name\":\"winsafe\",\"optional\":true,\"req\":\"^0.0.19\",\"target\":\"cfg(windows)\"}],\"features\":{\"default\":[\"real-sys\"],\"real-sys\":[\"dep:env_home\",\"dep:rustix\",\"dep:winsafe\"],\"regex\":[\"dep:regex\"],\"tracing\":[\"dep:tracing\"]}}",

View File

@@ -15,7 +15,7 @@ This project has been migrated from npm to pnpm to improve dependency management
```bash
# Global installation of pnpm
npm install -g pnpm@10.28.2
npm install -g pnpm@10.8.1
# Or with corepack (available with Node.js 22+)
corepack enable
@@ -59,12 +59,12 @@ codex/
## CI/CD
CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.28.2 or higher.
CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.8.1 or higher.
## Known issues
If you encounter issues with pnpm, try the following solutions:
1. Remove the `node_modules` folder and `pnpm-lock.yaml` file, then run `pnpm install`
2. Make sure you're using pnpm 10.28.2 or higher
2. Make sure you're using pnpm 10.8.1 or higher
3. Verify that Node.js 22 or higher is installed

0
codex-cli/bin/codex.js Executable file → Normal file
View File

1137
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -27,7 +27,6 @@ members = [
"login",
"mcp-server",
"mcp-types",
"network-proxy",
"ollama",
"process-hardening",
"protocol",
@@ -36,6 +35,7 @@ members = [
"stdio-to-uds",
"otel",
"tui",
"tui2",
"utils/absolute-path",
"utils/cargo-bin",
"utils/git",
@@ -93,6 +93,7 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-tui2 = { path = "tui2" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
@@ -121,12 +122,12 @@ axum = { version = "0.8", default-features = false }
base64 = "0.22.1"
bytes = "1.10.1"
chardetng = "0.1.17"
chrono = "0.4.43"
chrono = "0.4.42"
clap = "4"
clap_complete = "4"
color-eyre = "0.6.3"
crossterm = "0.28.1"
ctor = "0.6.3"
ctor = "0.5.0"
derive_more = "2"
diffy = "0.4.2"
dirs = "6"
@@ -137,7 +138,6 @@ env-flags = "0.1.1"
env_logger = "0.11.5"
eventsource-stream = "0.2.3"
futures = { version = "0.3", default-features = false }
globset = "0.4"
http = "1.3.1"
icu_decimal = "2.1"
icu_locale_core = "2.1"
@@ -178,6 +178,7 @@ pretty_assertions = "1.4.1"
pulldown-cmark = "0.10"
rand = "0.9"
ratatui = "0.29.0"
ratatui-core = "0.1.0"
ratatui-macros = "0.6.0"
regex = "1.12.2"
regex-lite = "0.1.8"
@@ -188,7 +189,6 @@ seccompiler = "0.5.0"
sentry = "0.46.0"
serde = "1"
serde_json = "1"
serde_path_to_error = "0.1.20"
serde_with = "3.16"
serde_yaml = "0.9"
serial_test = "3.2.0"
@@ -212,11 +212,11 @@ tiny_http = "0.12"
tokio = "1"
tokio-stream = "0.1.18"
tokio-test = "0.4"
tokio-tungstenite = { version = "0.28.0", features = ["proxy", "rustls-tls-native-roots"] }
tokio-tungstenite = "0.21.0"
tokio-util = "0.7.18"
toml = "0.9.5"
toml_edit = "0.24.0"
tracing = "0.1.44"
tracing = "0.1.43"
tracing-appender = "0.2.3"
tracing-subscriber = "0.3.22"
tracing-test = "0.2.5"
@@ -225,6 +225,7 @@ tree-sitter-bash = "0.25"
zstd = "0.13"
tree-sitter-highlight = "0.25.10"
ts-rs = "11"
tui-scrollbar = "0.2.2"
uds_windows = "1.1.0"
unicode-segmentation = "1.12.0"
unicode-width = "0.2"
@@ -302,10 +303,6 @@ opt-level = 0
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
tokio-tungstenite = { git = "https://github.com/JakkuSakura/tokio-tungstenite", rev = "2ae536b0de793f3ddf31fc2f22d445bf1ef2023d" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }
[patch."ssh://git@github.com/JakkuSakura/tungstenite-rs.git"]
tungstenite = { git = "https://github.com/JakkuSakura/tungstenite-rs", rev = "f514de8644821113e5d18a027d6d28a5c8cc0a6e" }

View File

@@ -117,10 +117,6 @@ client_request_definitions! {
params: v2::ThreadArchiveParams,
response: v2::ThreadArchiveResponse,
},
ThreadUnarchive => "thread/unarchive" {
params: v2::ThreadUnarchiveParams,
response: v2::ThreadUnarchiveResponse,
},
ThreadRollback => "thread/rollback" {
params: v2::ThreadRollbackParams,
response: v2::ThreadRollbackResponse,
@@ -133,22 +129,10 @@ client_request_definitions! {
params: v2::ThreadLoadedListParams,
response: v2::ThreadLoadedListResponse,
},
ThreadRead => "thread/read" {
params: v2::ThreadReadParams,
response: v2::ThreadReadResponse,
},
SkillsList => "skills/list" {
params: v2::SkillsListParams,
response: v2::SkillsListResponse,
},
AppsList => "app/list" {
params: v2::AppsListParams,
response: v2::AppsListResponse,
},
SkillsConfigWrite => "skills/config/write" {
params: v2::SkillsConfigWriteParams,
response: v2::SkillsConfigWriteResponse,
},
TurnStart => "turn/start" {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
@@ -166,11 +150,6 @@ client_request_definitions! {
params: v2::ModelListParams,
response: v2::ModelListResponse,
},
/// EXPERIMENTAL - list collaboration mode presets.
CollaborationModeList => "collaborationMode/list" {
params: v2::CollaborationModeListParams,
response: v2::CollaborationModeListResponse,
},
McpServerOauthLogin => "mcpServer/oauth/login" {
params: v2::McpServerOauthLoginParams,
@@ -522,18 +501,6 @@ server_request_definitions! {
response: v2::FileChangeRequestApprovalResponse,
},
/// EXPERIMENTAL - Request input from the user for a tool call.
ToolRequestUserInput => "item/tool/requestUserInput" {
params: v2::ToolRequestUserInputParams,
response: v2::ToolRequestUserInputResponse,
},
/// Execute a dynamic tool call on the client.
DynamicToolCall => "item/tool/call" {
params: v2::DynamicToolCallParams,
response: v2::DynamicToolCallResponse,
},
/// DEPRECATED APIs below
/// Request to approve a patch.
/// This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).
@@ -598,7 +565,6 @@ server_notification_definitions! {
ReasoningSummaryTextDelta => "item/reasoning/summaryTextDelta" (v2::ReasoningSummaryTextDeltaNotification),
ReasoningSummaryPartAdded => "item/reasoning/summaryPartAdded" (v2::ReasoningSummaryPartAddedNotification),
ReasoningTextDelta => "item/reasoning/textDelta" (v2::ReasoningTextDeltaNotification),
ContextCompactionStarted => "thread/compaction/started" (v2::ContextCompactionStartedNotification),
ContextCompacted => "thread/compacted" (v2::ContextCompactedNotification),
DeprecationNotice => "deprecationNotice" (v2::DeprecationNoticeNotification),
ConfigWarning => "configWarning" (v2::ConfigWarningNotification),
@@ -908,21 +874,4 @@ mod tests {
);
Ok(())
}
#[test]
fn serialize_list_collaboration_modes() -> Result<()> {
let request = ClientRequest::CollaborationModeList {
request_id: RequestId::Integer(7),
params: v2::CollaborationModeListParams::default(),
};
assert_eq!(
json!({
"method": "collaborationMode/list",
"id": 7,
"params": {}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
}

View File

@@ -56,8 +56,6 @@ impl ThreadHistoryBuilder {
self.handle_agent_reasoning_raw_content(payload)
}
EventMsg::TokenCount(_) => {}
EventMsg::ContextCompactionStarted(_) => {}
EventMsg::ContextCompactionEnded(_) => {}
EventMsg::EnteredReviewMode(_) => {}
EventMsg::ExitedReviewMode(_) => {}
EventMsg::ThreadRolledBack(payload) => self.handle_thread_rollback(payload),

View File

@@ -128,7 +128,6 @@ pub struct ConversationSummary {
pub path: PathBuf,
pub preview: String,
pub timestamp: Option<String>,
pub updated_at: Option<String>,
pub model_provider: String,
pub cwd: PathBuf,
pub cli_version: String,
@@ -503,14 +502,17 @@ impl From<CoreTextElement> for V1TextElement {
fn from(value: CoreTextElement) -> Self {
Self {
byte_range: value.byte_range.into(),
placeholder: value._placeholder_for_conversion_only().map(str::to_string),
placeholder: value.placeholder,
}
}
}
impl From<V1TextElement> for CoreTextElement {
fn from(value: V1TextElement) -> Self {
Self::new(value.byte_range.into(), value.placeholder)
Self {
byte_range: value.byte_range.into(),
placeholder: value.placeholder,
}
}
}

View File

@@ -4,10 +4,7 @@ use std::path::PathBuf;
use crate::protocol::common::AuthMode;
use codex_protocol::account::PlanType;
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode as CoreSandboxMode;
use codex_protocol::config_types::Verbosity;
@@ -31,7 +28,6 @@ use codex_protocol::protocol::SkillErrorInfo as CoreSkillErrorInfo;
use codex_protocol::protocol::SkillInterface as CoreSkillInterface;
use codex_protocol::protocol::SkillMetadata as CoreSkillMetadata;
use codex_protocol::protocol::SkillScope as CoreSkillScope;
use codex_protocol::protocol::SubAgentSource as CoreSubAgentSource;
use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
use codex_protocol::user_input::ByteRange as CoreByteRange;
@@ -220,6 +216,7 @@ v2_enum_from_core!(
}
);
// TODO(mbolin): Support in-repo layer.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
@@ -325,15 +322,6 @@ pub struct ToolsV2 {
pub view_image: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolSpec {
pub name: String,
pub description: String,
pub input_schema: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
@@ -404,8 +392,6 @@ pub struct ConfigLayer {
pub name: ConfigLayerSource,
pub version: String,
pub config: JsonValue,
#[serde(skip_serializing_if = "Option::is_none")]
pub disabled_reason: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
@@ -462,10 +448,6 @@ pub enum ConfigWriteErrorCode {
pub struct ConfigReadParams {
#[serde(default)]
pub include_layers: bool,
/// Optional working directory to resolve project config layers. If specified,
/// return the effective config as seen from that directory (i.e., including any
/// project layers between `cwd` and the project/repo root).
pub cwd: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -711,7 +693,6 @@ pub enum SessionSource {
VsCode,
Exec,
AppServer,
SubAgent(CoreSubAgentSource),
#[serde(other)]
Unknown,
}
@@ -723,7 +704,7 @@ impl From<CoreSessionSource> for SessionSource {
CoreSessionSource::VSCode => SessionSource::VsCode,
CoreSessionSource::Exec => SessionSource::Exec,
CoreSessionSource::Mcp => SessionSource::AppServer,
CoreSessionSource::SubAgent(sub) => SessionSource::SubAgent(sub),
CoreSessionSource::SubAgent(_) => SessionSource::Unknown,
CoreSessionSource::Unknown => SessionSource::Unknown,
}
}
@@ -736,7 +717,6 @@ impl From<SessionSource> for CoreSessionSource {
SessionSource::VsCode => CoreSessionSource::VSCode,
SessionSource::Exec => CoreSessionSource::Exec,
SessionSource::AppServer => CoreSessionSource::Mcp,
SessionSource::SubAgent(sub) => CoreSessionSource::SubAgent(sub),
SessionSource::Unknown => CoreSessionSource::Unknown,
}
}
@@ -914,8 +894,6 @@ pub struct Model {
pub description: String,
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
pub default_reasoning_effort: ReasoningEffort,
#[serde(default)]
pub supports_personality: bool,
// Only one model should be marked as default.
pub is_default: bool,
}
@@ -938,20 +916,6 @@ pub struct ModelListResponse {
pub next_cursor: Option<String>,
}
/// EXPERIMENTAL - list collaboration mode presets.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CollaborationModeListParams {}
/// EXPERIMENTAL - collaboration mode presets response.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CollaborationModeListResponse {
pub data: Vec<CollaborationModeMask>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -983,39 +947,6 @@ pub struct ListMcpServerStatusResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppsListParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppInfo {
pub id: String,
pub name: String,
pub description: Option<String>,
pub logo_url: Option<String>,
pub install_url: Option<String>,
#[serde(default)]
pub is_accessible: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppsListResponse {
pub data: Vec<AppInfo>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// If None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1097,9 +1028,6 @@ pub struct ThreadStartParams {
pub config: Option<HashMap<String, JsonValue>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
pub ephemeral: Option<bool>,
pub dynamic_tools: Option<Vec<DynamicToolSpec>>,
/// If true, opt into emitting raw response items on the event stream.
///
/// This is for internal use only (e.g. Codex Cloud).
@@ -1154,7 +1082,6 @@ pub struct ThreadResumeParams {
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1223,20 +1150,6 @@ pub struct ThreadArchiveParams {
#[ts(export_to = "v2/")]
pub struct ThreadArchiveResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadUnarchiveParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadUnarchiveResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1269,43 +1182,9 @@ pub struct ThreadListParams {
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
/// Optional sort key; defaults to created_at.
pub sort_key: Option<ThreadSortKey>,
/// Optional provider filter; when set, only sessions recorded under these
/// providers are returned. When present but empty, includes all providers.
pub model_providers: Option<Vec<String>>,
/// Optional source filter; when set, only sessions from these source kinds
/// are returned. When omitted or empty, defaults to interactive sources.
pub source_kinds: Option<Vec<ThreadSourceKind>>,
/// Optional archived filter; when set to true, only archived threads are returned.
/// If false or null, only non-archived threads are returned.
pub archived: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase", export_to = "v2/")]
pub enum ThreadSourceKind {
Cli,
#[serde(rename = "vscode")]
#[ts(rename = "vscode")]
VsCode,
Exec,
AppServer,
SubAgent,
SubAgentReview,
SubAgentCompact,
SubAgentThreadSpawn,
SubAgentOther,
Unknown,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub enum ThreadSortKey {
CreatedAt,
UpdatedAt,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1339,23 +1218,6 @@ pub struct ThreadLoadedListResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadReadParams {
pub thread_id: String,
/// When true, include turns and their items from rollout history.
#[serde(default)]
pub include_turns: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadReadResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1402,7 +1264,6 @@ pub struct SkillMetadata {
pub interface: Option<SkillInterface>,
pub path: PathBuf,
pub scope: SkillScope,
pub enabled: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1440,21 +1301,6 @@ pub struct SkillsListEntry {
pub errors: Vec<SkillErrorInfo>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsConfigWriteParams {
pub path: PathBuf,
pub enabled: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsConfigWriteResponse {
pub effective_enabled: bool,
}
impl From<CoreSkillMetadata> for SkillMetadata {
fn from(value: CoreSkillMetadata) -> Self {
Self {
@@ -1464,7 +1310,6 @@ impl From<CoreSkillMetadata> for SkillMetadata {
interface: value.interface.map(SkillInterface::from),
path: value.path,
scope: value.scope.into(),
enabled: true,
}
}
}
@@ -1514,11 +1359,8 @@ pub struct Thread {
/// Unix timestamp (in seconds) when the thread was created.
#[ts(type = "number")]
pub created_at: i64,
/// Unix timestamp (in seconds) when the thread was last updated.
#[ts(type = "number")]
pub updated_at: i64,
/// [UNSTABLE] Path to the thread on disk.
pub path: Option<PathBuf>,
pub path: PathBuf,
/// Working directory captured for the thread.
pub cwd: PathBuf,
/// Version of the CLI that created the thread.
@@ -1527,8 +1369,7 @@ pub struct Thread {
pub source: SessionSource,
/// Optional Git metadata captured when the thread was created.
pub git_info: Option<GitInfo>,
/// Only populated on `thread/resume`, `thread/rollback`, `thread/fork`, and `thread/read`
/// (when `includeTurns` is true) responses.
/// Only populated on `thread/resume`, `thread/rollback`, `thread/fork` responses.
/// For all other responses and notifications returning a Thread,
/// the turns field will be an empty list.
pub turns: Vec<Turn>,
@@ -1665,14 +1506,8 @@ pub struct TurnStartParams {
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
/// Override the personality for this turn and subsequent turns.
pub personality: Option<Personality>,
/// Optional JSON Schema used to constrain the final assistant message for this turn.
pub output_schema: Option<JsonValue>,
/// EXPERIMENTAL - set a pre-set collaboration mode.
/// Takes precedence over model, reasoning_effort, and developer instructions if set.
pub collaboration_mode: Option<CollaborationMode>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1781,38 +1616,24 @@ pub struct TextElement {
/// Byte range in the parent `text` buffer that this element occupies.
pub byte_range: ByteRange,
/// Optional human-readable placeholder for the element, displayed in the UI.
placeholder: Option<String>,
}
impl TextElement {
pub fn new(byte_range: ByteRange, placeholder: Option<String>) -> Self {
Self {
byte_range,
placeholder,
}
}
pub fn set_placeholder(&mut self, placeholder: Option<String>) {
self.placeholder = placeholder;
}
pub fn placeholder(&self) -> Option<&str> {
self.placeholder.as_deref()
}
pub placeholder: Option<String>,
}
impl From<CoreTextElement> for TextElement {
fn from(value: CoreTextElement) -> Self {
Self::new(
value.byte_range.into(),
value._placeholder_for_conversion_only().map(str::to_string),
)
Self {
byte_range: value.byte_range.into(),
placeholder: value.placeholder,
}
}
}
impl From<TextElement> for CoreTextElement {
fn from(value: TextElement) -> Self {
Self::new(value.byte_range.into(), value.placeholder)
Self {
byte_range: value.byte_range.into(),
placeholder: value.placeholder,
}
}
}
@@ -1969,9 +1790,6 @@ pub enum ThreadItem {
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ExitedReviewMode { id: String, review: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ContextCompaction { id: String },
}
impl From<CoreTurnItem> for ThreadItem {
@@ -2000,9 +1818,6 @@ impl From<CoreTurnItem> for ThreadItem {
id: search.id,
query: search.query,
},
CoreTurnItem::ContextCompaction(compaction) => {
ThreadItem::ContextCompaction { id: compaction.id }
}
}
}
}
@@ -2373,14 +2188,6 @@ pub struct ContextCompactedNotification {
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ContextCompactionStartedNotification {
pub thread_id: String,
pub turn_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2390,18 +2197,6 @@ pub struct CommandExecutionRequestApprovalParams {
pub item_id: String,
/// Optional explanatory reason (e.g. request for network access).
pub reason: Option<String>,
/// The command to be executed.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command: Option<String>,
/// The command's working directory.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cwd: Option<PathBuf>,
/// Best-effort parsed command actions for friendly display.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command_actions: Option<Vec<CommandAction>>,
/// Optional proposed execpolicy amendment to allow similar commands without prompting.
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
}
@@ -2433,74 +2228,6 @@ pub struct FileChangeRequestApprovalResponse {
pub decision: FileChangeApprovalDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolCallParams {
pub thread_id: String,
pub turn_id: String,
pub call_id: String,
pub tool: String,
pub arguments: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolCallResponse {
pub output: String,
pub success: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Defines a single selectable option for request_user_input.
pub struct ToolRequestUserInputOption {
pub label: String,
pub description: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Represents one request_user_input question and its optional options.
pub struct ToolRequestUserInputQuestion {
pub id: String,
pub header: String,
pub question: String,
#[serde(default)]
pub is_other: bool,
pub options: Option<Vec<ToolRequestUserInputOption>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Params sent with a request_user_input event.
pub struct ToolRequestUserInputParams {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub questions: Vec<ToolRequestUserInputQuestion>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Captures a user's answer to a request_user_input question.
pub struct ToolRequestUserInputAnswer {
pub answers: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Response payload mapping question ids to answers.
pub struct ToolRequestUserInputResponse {
pub answers: HashMap<String, ToolRequestUserInputAnswer>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2590,24 +2317,6 @@ pub struct DeprecationNoticeNotification {
pub details: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TextPosition {
/// 1-based line number.
pub line: usize,
/// 1-based column number (in Unicode scalar values).
pub column: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TextRange {
pub start: TextPosition,
pub end: TextPosition,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2616,14 +2325,6 @@ pub struct ConfigWarningNotification {
pub summary: String,
/// Optional extra guidance or error details.
pub details: Option<String>,
/// Optional path to the config file that triggered the warning.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub path: Option<String>,
/// Optional range for the error location inside the config file.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub range: Option<TextRange>,
}
#[cfg(test)]
@@ -2631,12 +2332,10 @@ mod tests {
use super::*;
use codex_protocol::items::AgentMessageContent;
use codex_protocol::items::AgentMessageItem;
use codex_protocol::items::ContextCompactionItem;
use codex_protocol::items::ReasoningItem;
use codex_protocol::items::TurnItem;
use codex_protocol::items::UserMessageItem;
use codex_protocol::items::WebSearchItem;
use codex_protocol::models::WebSearchAction;
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
use codex_protocol::user_input::UserInput as CoreUserInput;
use pretty_assertions::assert_eq;
@@ -2744,9 +2443,6 @@ mod tests {
let search_item = TurnItem::WebSearch(WebSearchItem {
id: "search-1".to_string(),
query: "docs".to_string(),
action: WebSearchAction::Search {
query: Some("docs".to_string()),
},
});
assert_eq!(
@@ -2756,17 +2452,6 @@ mod tests {
query: "docs".to_string(),
}
);
let compaction_item = TurnItem::ContextCompaction(ContextCompactionItem {
id: "compact-1".to_string(),
});
assert_eq!(
ThreadItem::from(compaction_item),
ThreadItem::ContextCompaction {
id: "compact-1".to_string(),
}
);
}
#[test]

View File

@@ -258,7 +258,7 @@ fn send_message_v2_with_policies(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: user_message,
// Test client sends plain text without UI element ranges.
// Plain text conversion has no UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
@@ -292,7 +292,6 @@ fn send_follow_up_v2(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: first_message,
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
@@ -305,7 +304,6 @@ fn send_follow_up_v2(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: follow_up_message,
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
@@ -479,7 +477,6 @@ impl CodexClient {
conversation_id: *conversation_id,
items: vec![InputItem::Text {
text: message.to_string(),
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
},
@@ -842,9 +839,6 @@ impl CodexClient {
turn_id,
item_id,
reason,
command,
cwd,
command_actions,
proposed_execpolicy_amendment,
} = params;
@@ -854,17 +848,6 @@ impl CodexClient {
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(command) = command.as_deref() {
println!("< command: {command}");
}
if let Some(cwd) = cwd.as_ref() {
println!("< cwd: {}", cwd.display());
}
if let Some(command_actions) = command_actions.as_ref()
&& !command_actions.is_empty()
{
println!("< command actions: {command_actions:?}");
}
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
}

View File

@@ -22,7 +22,6 @@ codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-backend-client = { workspace = true }
codex-file-search = { workspace = true }
codex-chatgpt = { workspace = true }
codex-login = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
@@ -35,7 +34,6 @@ serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
mcp-types = { workspace = true }
tempfile = { workspace = true }
time = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
@@ -50,20 +48,11 @@ uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]
app_test_support = { workspace = true }
axum = { workspace = true, default-features = false, features = [
"http1",
"json",
"tokio",
] }
base64 = { workspace = true }
core_test_support = { workspace = true }
mcp-types = { workspace = true }
os_info = { workspace = true }
pretty_assertions = { workspace = true }
rmcp = { workspace = true, default-features = false, features = [
"server",
"transport-streamable-http-server",
] }
serial_test = { workspace = true }
wiremock = { workspace = true }
shlex = { workspace = true }

View File

@@ -79,21 +79,15 @@ Example (from OpenAI's official VSCode extension):
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; emits `thread/started` and auto-subscribes you to turn/item events for the new thread.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
- `thread/loaded/list` — list the thread ids currently loaded in memory.
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`.
- `thread/archive` — move a threads rollout file into the archived directory; returns `{}` on success.
- `thread/unarchive` — move an archived rollout file back into the sessions directory; returns the restored `thread` on success.
- `thread/rollback` — drop the last N turns from the agents in-memory context and persist a rollback marker in the rollout so future resumes see the pruned history; returns the updated `thread` (with `turns` populated) on success.
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
- `review/start` — kick off Codexs automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `model/list` — list available models (with reasoning effort options).
- `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination).
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).
- `app/list` — list available apps.
- `skills/config/write` — write user-level skill config by path.
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
- `tool/requestUserInput` — prompt the user with 13 short questions for a tool call and return their answers (experimental).
- `config/mcpServer/reload` — reload MCP server config from disk and queue a refresh for loaded threads (applied on each thread's next active turn); returns `{}`. Use this after editing `config.toml` without restarting the server.
- `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
@@ -115,20 +109,6 @@ Start a fresh thread when you need a new Codex conversation.
"cwd": "/Users/me/project",
"approvalPolicy": "never",
"sandbox": "workspaceWrite",
"personality": "friendly",
"dynamicTools": [
{
"name": "lookup_ticket",
"description": "Fetch a ticket by id",
"inputSchema": {
"type": "object",
"properties": {
"id": { "type": "string" }
},
"required": ["id"]
}
}
],
} }
{ "id": 10, "result": {
"thread": {
@@ -141,13 +121,10 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/started", "params": { "thread": { } } }
```
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted:
```json
{ "method": "thread/resume", "id": 11, "params": {
"threadId": "thr_123",
"personality": "friendly"
} }
{ "method": "thread/resume", "id": 11, "params": { "threadId": "thr_123" } }
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
@@ -161,14 +138,11 @@ To branch from a stored session, call `thread/fork` with the `thread.id`. This c
### Example: List threads (with pagination & filters)
`thread/list` lets you render a history UI. Results default to `createdAt` (newest first) descending. Pass any combination of:
`thread/list` lets you render a history UI. Pass any combination of:
- `cursor` — opaque string from a prior response; omit for the first page.
- `limit` — server defaults to a reasonable page size if unset.
- `sortKey``created_at` (default) or `updated_at`.
- `modelProviders` — restrict results to specific providers; unset, null, or an empty array will include all providers.
- `sourceKinds` — restrict results to specific sources; omit or pass `[]` for interactive sessions only (`cli`, `vscode`).
- `archived` — when `true`, list archived threads only. When `false` or `null`, list non-archived threads (default).
Example:
@@ -176,12 +150,11 @@ Example:
{ "method": "thread/list", "id": 20, "params": {
"cursor": null,
"limit": 25,
"sortKey": "created_at"
} }
{ "id": 20, "result": {
"data": [
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111, "updatedAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000, "updatedAt": 1730750000 }
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000 }
],
"nextCursor": "opaque-token-or-null"
} }
@@ -200,20 +173,6 @@ When `nextCursor` is `null`, youve reached the final page.
} }
```
### Example: Read a thread
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want the rollout history loaded into `thread.turns`.
```json
{ "method": "thread/read", "id": 22, "params": { "threadId": "thr_123" } }
{ "id": 22, "result": { "thread": { "id": "thr_123", "turns": [] } } }
```
```json
{ "method": "thread/read", "id": 23, "params": { "threadId": "thr_123", "includeTurns": true } }
{ "id": 23, "result": { "thread": { "id": "thr_123", "turns": [ ... ] } } }
```
### Example: Archive a thread
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
@@ -223,16 +182,7 @@ Use `thread/archive` to move the persisted rollout (stored as a JSONL file on di
{ "id": 21, "result": {} }
```
An archived thread will not appear in `thread/list` unless `archived` is set to `true`.
### Example: Unarchive a thread
Use `thread/unarchive` to move an archived rollout back into the sessions directory.
```json
{ "method": "thread/unarchive", "id": 24, "params": { "threadId": "thr_b" } }
{ "id": 24, "result": { "thread": { "id": "thr_b" } } }
```
An archived thread will not appear in future calls to `thread/list`.
### Example: Start a turn (send user input)
@@ -259,7 +209,6 @@ You can optionally specify config overrides on the new turn. If specified, these
"model": "gpt-5.1-codex",
"effort": "medium",
"summary": "concise",
"personality": "friendly",
// Optional JSON Schema to constrain the final assistant message for this turn.
"outputSchema": {
"type": "object",
@@ -491,7 +440,7 @@ Certain actions (shell commands or modifying files) may require explicit user ap
Order of messages:
1. `item/started` — shows the pending `commandExecution` item with `command`, `cwd`, and other fields so you can render the proposed action.
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason`, plus `command`, `cwd`, and `commandActions` for friendly display.
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason` or `risk`, plus `parsedCmd` for friendly display.
3. Client response — `{ "decision": "accept", "acceptSettings": { "forSession": false } }` or `{ "decision": "decline" }`.
4. `item/completed` — final `commandExecution` item with `status: "completed" | "failed" | "declined"` and execution output. Render this as the authoritative result.
@@ -517,15 +466,8 @@ Invoke a skill by including `$<skill-name>` in the text input. Add a `skill` inp
"params": {
"threadId": "thread-1",
"input": [
{
"type": "text",
"text": "$skill-creator Add a new skill for triaging flaky CI."
},
{
"type": "skill",
"name": "skill-creator",
"path": "/Users/me/.codex/skills/skill-creator/SKILL.md"
}
{ "type": "text", "text": "$skill-creator Add a new skill for triaging flaky CI." },
{ "type": "skill", "name": "skill-creator", "path": "/Users/me/.codex/skills/skill-creator/SKILL.md" }
]
}
}
@@ -539,49 +481,20 @@ Example:
$skill-creator Add a new skill for triaging flaky CI and include step-by-step usage.
```
Use `skills/list` to fetch the available skills (optionally scoped by `cwds`, with `forceReload`).
Use `skills/list` to fetch the available skills (optionally scoped by `cwd` and/or with `forceReload`).
```json
{ "method": "skills/list", "id": 25, "params": {
"cwds": ["/Users/me/project"],
"cwd": "/Users/me/project",
"forceReload": false
} }
{ "id": 25, "result": {
"data": [{
"cwd": "/Users/me/project",
"skills": [
{
"name": "skill-creator",
"description": "Create or update a Codex skill",
"enabled": true,
"interface": {
"displayName": "Skill Creator",
"shortDescription": "Create or update a Codex skill",
"iconSmall": "icon.svg",
"iconLarge": "icon-large.svg",
"brandColor": "#111111",
"defaultPrompt": "Add a new skill for triaging flaky CI."
}
}
],
"errors": []
}]
"skills": [
{ "name": "skill-creator", "description": "Create or update a Codex skill" }
]
} }
```
To enable or disable a skill by path:
```json
{
"method": "skills/config/write",
"id": 26,
"params": {
"path": "/Users/me/.codex/skills/skill-creator/SKILL.md",
"enabled": false
}
}
```
## Auth endpoints
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.

View File

@@ -24,9 +24,7 @@ use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::CommandExecutionStatus;
use codex_app_server_protocol::ContextCompactedNotification;
use codex_app_server_protocol::ContextCompactionStartedNotification;
use codex_app_server_protocol::DeprecationNoticeNotification;
use codex_app_server_protocol::DynamicToolCallParams;
use codex_app_server_protocol::ErrorNotification;
use codex_app_server_protocol::ExecCommandApprovalParams;
use codex_app_server_protocol::ExecCommandApprovalResponse;
@@ -56,10 +54,6 @@ use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadRollbackResponse;
use codex_app_server_protocol::ThreadTokenUsage;
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
use codex_app_server_protocol::ToolRequestUserInputOption;
use codex_app_server_protocol::ToolRequestUserInputParams;
use codex_app_server_protocol::ToolRequestUserInputQuestion;
use codex_app_server_protocol::ToolRequestUserInputResponse;
use codex_app_server_protocol::Turn;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnDiffUpdatedNotification;
@@ -87,11 +81,8 @@ use codex_core::protocol::TurnDiffEvent;
use codex_core::review_format::format_review_findings_block;
use codex_core::review_prompts;
use codex_protocol::ThreadId;
use codex_protocol::dynamic_tools::DynamicToolResponse as CoreDynamicToolResponse;
use codex_protocol::plan_tool::UpdatePlanArgs;
use codex_protocol::protocol::ReviewOutputEvent;
use codex_protocol::request_user_input::RequestUserInputAnswer as CoreRequestUserInputAnswer;
use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestUserInputResponse;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::PathBuf;
@@ -244,9 +235,6 @@ pub(crate) async fn apply_bespoke_event_handling(
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
item_id: item_id.clone(),
reason,
command: Some(command_string.clone()),
cwd: Some(cwd.clone()),
command_actions: Some(command_actions.clone()),
proposed_execpolicy_amendment: proposed_execpolicy_amendment_v2,
};
let rx = outgoing
@@ -270,92 +258,6 @@ pub(crate) async fn apply_bespoke_event_handling(
});
}
},
EventMsg::RequestUserInput(request) => {
if matches!(api_version, ApiVersion::V2) {
let questions = request
.questions
.into_iter()
.map(|question| ToolRequestUserInputQuestion {
id: question.id,
header: question.header,
question: question.question,
is_other: question.is_other,
options: question.options.map(|options| {
options
.into_iter()
.map(|option| ToolRequestUserInputOption {
label: option.label,
description: option.description,
})
.collect()
}),
})
.collect();
let params = ToolRequestUserInputParams {
thread_id: conversation_id.to_string(),
turn_id: request.turn_id,
item_id: request.call_id,
questions,
};
let rx = outgoing
.send_request(ServerRequestPayload::ToolRequestUserInput(params))
.await;
tokio::spawn(async move {
on_request_user_input_response(event_turn_id, rx, conversation).await;
});
} else {
error!(
"request_user_input is only supported on api v2 (call_id: {})",
request.call_id
);
let empty = CoreRequestUserInputResponse {
answers: HashMap::new(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response: empty,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
}
}
EventMsg::DynamicToolCallRequest(request) => {
if matches!(api_version, ApiVersion::V2) {
let call_id = request.call_id;
let params = DynamicToolCallParams {
thread_id: conversation_id.to_string(),
turn_id: request.turn_id,
call_id: call_id.clone(),
tool: request.tool,
arguments: request.arguments,
};
let rx = outgoing
.send_request(ServerRequestPayload::DynamicToolCall(params))
.await;
tokio::spawn(async move {
crate::dynamic_tools::on_call_response(call_id, rx, conversation).await;
});
} else {
error!(
"dynamic tool calls are only supported on api v2 (call_id: {})",
request.call_id
);
let call_id = request.call_id;
let _ = conversation
.submit(Op::DynamicToolResponse {
id: call_id.clone(),
response: CoreDynamicToolResponse {
call_id,
output: "dynamic tool calls require api v2".to_string(),
success: false,
},
})
.await;
}
}
// TODO(celia): properly construct McpToolCall TurnItem in core.
EventMsg::McpToolCallBegin(begin_event) => {
let notification = construct_mcp_tool_call_notification(
@@ -602,18 +504,7 @@ pub(crate) async fn apply_bespoke_event_handling(
.send_server_notification(ServerNotification::AgentMessageDelta(notification))
.await;
}
EventMsg::ContextCompactionStarted(..) => {
let notification = ContextCompactionStartedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
};
outgoing
.send_server_notification(ServerNotification::ContextCompactionStarted(
notification,
))
.await;
}
EventMsg::ContextCompactionEnded(..) => {
EventMsg::ContextCompacted(..) => {
let notification = ContextCompactedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
@@ -1055,15 +946,7 @@ pub(crate) async fn apply_bespoke_event_handling(
};
if let Some(request_id) = pending {
let Some(rollout_path) = conversation.rollout_path() else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "thread has no persisted rollout".to_string(),
data: None,
};
outgoing.send_error(request_id, error).await;
return;
};
let rollout_path = conversation.rollout_path();
let response = match read_summary_from_rollout(
rollout_path.as_path(),
fallback_model_provider.as_str(),
@@ -1464,65 +1347,6 @@ async fn on_exec_approval_response(
}
}
async fn on_request_user_input_response(
event_turn_id: String,
receiver: oneshot::Receiver<JsonValue>,
conversation: Arc<CodexThread>,
) {
let response = receiver.await;
let value = match response {
Ok(value) => value,
Err(err) => {
error!("request failed: {err:?}");
let empty = CoreRequestUserInputResponse {
answers: HashMap::new(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response: empty,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
return;
}
};
let response =
serde_json::from_value::<ToolRequestUserInputResponse>(value).unwrap_or_else(|err| {
error!("failed to deserialize ToolRequestUserInputResponse: {err}");
ToolRequestUserInputResponse {
answers: HashMap::new(),
}
});
let response = CoreRequestUserInputResponse {
answers: response
.answers
.into_iter()
.map(|(id, answer)| {
(
id,
CoreRequestUserInputAnswer {
answers: answer.answers,
},
)
})
.collect(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
}
const REVIEW_FALLBACK_MESSAGE: &str = "Reviewer failed to output a response.";
fn render_review_output_text(output: &ReviewOutputEvent) -> String {

File diff suppressed because it is too large Load Diff

View File

@@ -1,58 +0,0 @@
use codex_app_server_protocol::DynamicToolCallResponse;
use codex_core::CodexThread;
use codex_protocol::dynamic_tools::DynamicToolResponse as CoreDynamicToolResponse;
use codex_protocol::protocol::Op;
use std::sync::Arc;
use tokio::sync::oneshot;
use tracing::error;
pub(crate) async fn on_call_response(
call_id: String,
receiver: oneshot::Receiver<serde_json::Value>,
conversation: Arc<CodexThread>,
) {
let response = receiver.await;
let value = match response {
Ok(value) => value,
Err(err) => {
error!("request failed: {err:?}");
let fallback = CoreDynamicToolResponse {
call_id: call_id.clone(),
output: "dynamic tool request failed".to_string(),
success: false,
};
if let Err(err) = conversation
.submit(Op::DynamicToolResponse {
id: call_id.clone(),
response: fallback,
})
.await
{
error!("failed to submit DynamicToolResponse: {err}");
}
return;
}
};
let response = serde_json::from_value::<DynamicToolCallResponse>(value).unwrap_or_else(|err| {
error!("failed to deserialize DynamicToolCallResponse: {err}");
DynamicToolCallResponse {
output: "dynamic tool response was invalid".to_string(),
success: false,
}
});
let response = CoreDynamicToolResponse {
call_id: call_id.clone(),
output: response.output,
success: response.success,
};
if let Err(err) = conversation
.submit(Op::DynamicToolResponse {
id: call_id,
response,
})
.await
{
error!("failed to submit DynamicToolResponse: {err}");
}
}

View File

@@ -1,155 +0,0 @@
use codex_app_server_protocol::ThreadSourceKind;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SubAgentSource as CoreSubAgentSource;
pub(crate) fn compute_source_filters(
source_kinds: Option<Vec<ThreadSourceKind>>,
) -> (Vec<CoreSessionSource>, Option<Vec<ThreadSourceKind>>) {
let Some(source_kinds) = source_kinds else {
return (INTERACTIVE_SESSION_SOURCES.to_vec(), None);
};
if source_kinds.is_empty() {
return (INTERACTIVE_SESSION_SOURCES.to_vec(), None);
}
let requires_post_filter = source_kinds.iter().any(|kind| {
matches!(
kind,
ThreadSourceKind::Exec
| ThreadSourceKind::AppServer
| ThreadSourceKind::SubAgent
| ThreadSourceKind::SubAgentReview
| ThreadSourceKind::SubAgentCompact
| ThreadSourceKind::SubAgentThreadSpawn
| ThreadSourceKind::SubAgentOther
| ThreadSourceKind::Unknown
)
});
if requires_post_filter {
(Vec::new(), Some(source_kinds))
} else {
let interactive_sources = source_kinds
.iter()
.filter_map(|kind| match kind {
ThreadSourceKind::Cli => Some(CoreSessionSource::Cli),
ThreadSourceKind::VsCode => Some(CoreSessionSource::VSCode),
ThreadSourceKind::Exec
| ThreadSourceKind::AppServer
| ThreadSourceKind::SubAgent
| ThreadSourceKind::SubAgentReview
| ThreadSourceKind::SubAgentCompact
| ThreadSourceKind::SubAgentThreadSpawn
| ThreadSourceKind::SubAgentOther
| ThreadSourceKind::Unknown => None,
})
.collect::<Vec<_>>();
(interactive_sources, Some(source_kinds))
}
}
pub(crate) fn source_kind_matches(source: &CoreSessionSource, filter: &[ThreadSourceKind]) -> bool {
filter.iter().any(|kind| match kind {
ThreadSourceKind::Cli => matches!(source, CoreSessionSource::Cli),
ThreadSourceKind::VsCode => matches!(source, CoreSessionSource::VSCode),
ThreadSourceKind::Exec => matches!(source, CoreSessionSource::Exec),
ThreadSourceKind::AppServer => matches!(source, CoreSessionSource::Mcp),
ThreadSourceKind::SubAgent => matches!(source, CoreSessionSource::SubAgent(_)),
ThreadSourceKind::SubAgentReview => {
matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::Review)
)
}
ThreadSourceKind::SubAgentCompact => {
matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::Compact)
)
}
ThreadSourceKind::SubAgentThreadSpawn => matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::ThreadSpawn { .. })
),
ThreadSourceKind::SubAgentOther => matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::Other(_))
),
ThreadSourceKind::Unknown => matches!(source, CoreSessionSource::Unknown),
})
}
#[cfg(test)]
mod tests {
use super::*;
use codex_protocol::ThreadId;
use pretty_assertions::assert_eq;
use uuid::Uuid;
#[test]
fn compute_source_filters_defaults_to_interactive_sources() {
let (allowed_sources, filter) = compute_source_filters(None);
assert_eq!(allowed_sources, INTERACTIVE_SESSION_SOURCES.to_vec());
assert_eq!(filter, None);
}
#[test]
fn compute_source_filters_empty_means_interactive_sources() {
let (allowed_sources, filter) = compute_source_filters(Some(Vec::new()));
assert_eq!(allowed_sources, INTERACTIVE_SESSION_SOURCES.to_vec());
assert_eq!(filter, None);
}
#[test]
fn compute_source_filters_interactive_only_skips_post_filtering() {
let source_kinds = vec![ThreadSourceKind::Cli, ThreadSourceKind::VsCode];
let (allowed_sources, filter) = compute_source_filters(Some(source_kinds.clone()));
assert_eq!(
allowed_sources,
vec![CoreSessionSource::Cli, CoreSessionSource::VSCode]
);
assert_eq!(filter, Some(source_kinds));
}
#[test]
fn compute_source_filters_subagent_variant_requires_post_filtering() {
let source_kinds = vec![ThreadSourceKind::SubAgentReview];
let (allowed_sources, filter) = compute_source_filters(Some(source_kinds.clone()));
assert_eq!(allowed_sources, Vec::new());
assert_eq!(filter, Some(source_kinds));
}
#[test]
fn source_kind_matches_distinguishes_subagent_variants() {
let parent_thread_id =
ThreadId::from_string(&Uuid::new_v4().to_string()).expect("valid thread id");
let review = CoreSessionSource::SubAgent(CoreSubAgentSource::Review);
let spawn = CoreSessionSource::SubAgent(CoreSubAgentSource::ThreadSpawn {
parent_thread_id,
depth: 1,
});
assert!(source_kind_matches(
&review,
&[ThreadSourceKind::SubAgentReview]
));
assert!(!source_kind_matches(
&review,
&[ThreadSourceKind::SubAgentThreadSpawn]
));
assert!(source_kind_matches(
&spawn,
&[ThreadSourceKind::SubAgentThreadSpawn]
));
assert!(!source_kind_matches(
&spawn,
&[ThreadSourceKind::SubAgentReview]
));
}
}

View File

@@ -3,7 +3,6 @@
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::config_loader::LoaderOverrides;
use std::io::ErrorKind;
use std::io::Result as IoResult;
@@ -12,15 +11,9 @@ use std::path::PathBuf;
use crate::message_processor::MessageProcessor;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use codex_app_server_protocol::ConfigLayerSource;
use codex_app_server_protocol::ConfigWarningNotification;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::TextPosition as AppTextPosition;
use codex_app_server_protocol::TextRange as AppTextRange;
use codex_core::ExecPolicyError;
use codex_core::check_execpolicy_for_warnings;
use codex_core::config_loader::ConfigLoadError;
use codex_core::config_loader::TextRange as CoreTextRange;
use codex_feedback::CodexFeedback;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
@@ -40,9 +33,7 @@ use tracing_subscriber::util::SubscriberInitExt;
mod bespoke_event_handling;
mod codex_message_processor;
mod config_api;
mod dynamic_tools;
mod error_code;
mod filters;
mod fuzzy_file_search;
mod message_processor;
mod models;
@@ -53,116 +44,6 @@ mod outgoing_message;
/// plenty for an interactive CLI.
const CHANNEL_CAPACITY: usize = 128;
fn config_warning_from_error(
summary: impl Into<String>,
err: &std::io::Error,
) -> ConfigWarningNotification {
let (path, range) = match config_error_location(err) {
Some((path, range)) => (Some(path), Some(range)),
None => (None, None),
};
ConfigWarningNotification {
summary: summary.into(),
details: Some(err.to_string()),
path,
range,
}
}
fn config_error_location(err: &std::io::Error) -> Option<(String, AppTextRange)> {
err.get_ref()
.and_then(|err| err.downcast_ref::<ConfigLoadError>())
.map(|err| {
let config_error = err.config_error();
(
config_error.path.to_string_lossy().to_string(),
app_text_range(&config_error.range),
)
})
}
fn exec_policy_warning_location(err: &ExecPolicyError) -> (Option<String>, Option<AppTextRange>) {
match err {
ExecPolicyError::ParsePolicy { path, source } => {
if let Some(location) = source.location() {
let range = AppTextRange {
start: AppTextPosition {
line: location.range.start.line,
column: location.range.start.column,
},
end: AppTextPosition {
line: location.range.end.line,
column: location.range.end.column,
},
};
return (Some(location.path), Some(range));
}
(Some(path.clone()), None)
}
_ => (None, None),
}
}
fn app_text_range(range: &CoreTextRange) -> AppTextRange {
AppTextRange {
start: AppTextPosition {
line: range.start.line,
column: range.start.column,
},
end: AppTextPosition {
line: range.end.line,
column: range.end.column,
},
}
}
fn project_config_warning(config: &Config) -> Option<ConfigWarningNotification> {
let mut disabled_folders = Vec::new();
for layer in config
.config_layer_stack
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true)
{
if !matches!(layer.name, ConfigLayerSource::Project { .. })
|| layer.disabled_reason.is_none()
{
continue;
}
if let ConfigLayerSource::Project { dot_codex_folder } = &layer.name {
disabled_folders.push((
dot_codex_folder.as_path().display().to_string(),
layer
.disabled_reason
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| "config.toml is disabled.".to_string()),
));
}
}
if disabled_folders.is_empty() {
return None;
}
let mut message = concat!(
"Project config.toml files are disabled in the following folders. ",
"Settings in those files are ignored, but skills and exec policies still load.\n",
)
.to_string();
for (index, (folder, reason)) in disabled_folders.iter().enumerate() {
let display_index = index + 1;
message.push_str(&format!(" {display_index}. {folder}\n"));
message.push_str(&format!(" {reason}\n"));
}
Some(ConfigWarningNotification {
summary: message,
details: None,
path: None,
range: None,
})
}
pub async fn run_main(
codex_linux_sandbox_exe: Option<PathBuf>,
cli_config_overrides: CliConfigOverrides,
@@ -214,7 +95,10 @@ pub async fn run_main(
{
Ok(config) => config,
Err(err) => {
let message = config_warning_from_error("Invalid configuration; using defaults.", &err);
let message = ConfigWarningNotification {
summary: "Invalid configuration; using defaults.".to_string(),
details: Some(err.to_string()),
};
config_warnings.push(message);
Config::load_default_with_cli_overrides(cli_kv_overrides.clone()).map_err(|e| {
std::io::Error::new(
@@ -228,20 +112,13 @@ pub async fn run_main(
if let Ok(Some(err)) =
check_execpolicy_for_warnings(&config.features, &config.config_layer_stack).await
{
let (path, range) = exec_policy_warning_location(&err);
let message = ConfigWarningNotification {
summary: "Error parsing rules; custom rules not applied.".to_string(),
details: Some(err.to_string()),
path,
range,
};
config_warnings.push(message);
}
if let Some(warning) = project_config_warning(&config) {
config_warnings.push(warning);
}
let feedback = CodexFeedback::new();
let otel = codex_core::otel_init::build_provider(

View File

@@ -28,7 +28,6 @@ fn model_from_preset(preset: ModelPreset) -> Model {
preset.supported_reasoning_efforts,
),
default_reasoning_effort: preset.default_reasoning_effort,
supports_personality: preset.supports_personality,
is_default: preset.is_default,
}
}

View File

@@ -286,8 +286,6 @@ mod tests {
let notification = ServerNotification::ConfigWarning(ConfigWarningNotification {
summary: "Config error: using defaults".to_string(),
details: Some("error loading config: bad config".to_string()),
path: None,
range: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);

View File

@@ -49,16 +49,6 @@ impl ChatGptAuthFixture {
self
}
pub fn chatgpt_user_id(mut self, chatgpt_user_id: impl Into<String>) -> Self {
self.claims.chatgpt_user_id = Some(chatgpt_user_id.into());
self
}
pub fn chatgpt_account_id(mut self, chatgpt_account_id: impl Into<String>) -> Self {
self.claims.chatgpt_account_id = Some(chatgpt_account_id.into());
self
}
pub fn email(mut self, email: impl Into<String>) -> Self {
self.claims.email = Some(email.into());
self
@@ -79,8 +69,6 @@ impl ChatGptAuthFixture {
pub struct ChatGptIdTokenClaims {
pub email: Option<String>,
pub plan_type: Option<String>,
pub chatgpt_user_id: Option<String>,
pub chatgpt_account_id: Option<String>,
}
impl ChatGptIdTokenClaims {
@@ -97,16 +85,6 @@ impl ChatGptIdTokenClaims {
self.plan_type = Some(plan_type.into());
self
}
pub fn chatgpt_user_id(mut self, chatgpt_user_id: impl Into<String>) -> Self {
self.chatgpt_user_id = Some(chatgpt_user_id.into());
self
}
pub fn chatgpt_account_id(mut self, chatgpt_account_id: impl Into<String>) -> Self {
self.chatgpt_account_id = Some(chatgpt_account_id.into());
self
}
}
pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
@@ -115,20 +93,10 @@ pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
if let Some(email) = &claims.email {
payload.insert("email".to_string(), json!(email));
}
let mut auth_payload = serde_json::Map::new();
if let Some(plan_type) = &claims.plan_type {
auth_payload.insert("chatgpt_plan_type".to_string(), json!(plan_type));
}
if let Some(chatgpt_user_id) = &claims.chatgpt_user_id {
auth_payload.insert("chatgpt_user_id".to_string(), json!(chatgpt_user_id));
}
if let Some(chatgpt_account_id) = &claims.chatgpt_account_id {
auth_payload.insert("chatgpt_account_id".to_string(), json!(chatgpt_account_id));
}
if !auth_payload.is_empty() {
payload.insert(
"https://api.openai.com/auth".to_string(),
serde_json::Value::Object(auth_payload),
json!({ "chatgpt_plan_type": plan_type }),
);
}
let payload = serde_json::Value::Object(payload);

View File

@@ -27,12 +27,9 @@ pub use models_cache::write_models_cache_with_models;
pub use responses::create_apply_patch_sse_response;
pub use responses::create_exec_command_sse_response;
pub use responses::create_final_assistant_message_sse_response;
pub use responses::create_request_user_input_sse_response;
pub use responses::create_shell_command_sse_response;
pub use rollout::create_fake_rollout;
pub use rollout::create_fake_rollout_with_source;
pub use rollout::create_fake_rollout_with_text_elements;
pub use rollout::rollout_path;
use serde::de::DeserializeOwned;
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {

View File

@@ -12,13 +12,11 @@ use tokio::process::ChildStdout;
use anyhow::Context;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientNotification;
use codex_app_server_protocol::CollaborationModeListParams;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigValueWriteParams;
@@ -49,14 +47,11 @@ use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadForkParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadLoadedListParams;
use codex_app_server_protocol::ThreadReadParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadRollbackParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadUnarchiveParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use codex_core::default_client::CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR;
use tokio::process::Command;
pub struct McpProcess {
@@ -96,7 +91,6 @@ impl McpProcess {
cmd.stderr(Stdio::piped());
cmd.env("CODEX_HOME", codex_home);
cmd.env("RUST_LOG", "debug");
cmd.env_remove(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR);
for (k, v) in env_overrides {
match v {
@@ -366,15 +360,6 @@ impl McpProcess {
self.send_request("thread/archive", params).await
}
/// Send a `thread/unarchive` JSON-RPC request.
pub async fn send_thread_unarchive_request(
&mut self,
params: ThreadUnarchiveParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/unarchive", params).await
}
/// Send a `thread/rollback` JSON-RPC request.
pub async fn send_thread_rollback_request(
&mut self,
@@ -402,15 +387,6 @@ impl McpProcess {
self.send_request("thread/loaded/list", params).await
}
/// Send a `thread/read` JSON-RPC request.
pub async fn send_thread_read_request(
&mut self,
params: ThreadReadParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/read", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
@@ -420,21 +396,6 @@ impl McpProcess {
self.send_request("model/list", params).await
}
/// Send an `app/list` JSON-RPC request.
pub async fn send_apps_list_request(&mut self, params: AppsListParams) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("app/list", params).await
}
/// Send a `collaborationMode/list` JSON-RPC request.
pub async fn send_list_collaboration_modes_request(
&mut self,
params: CollaborationModeListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("collaborationMode/list", params).await
}
/// Send a `resumeConversation` JSON-RPC request.
pub async fn send_resume_conversation_request(
&mut self,

View File

@@ -27,7 +27,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
priority,
upgrade: preset.upgrade.as_ref().map(|u| u.into()),
base_instructions: "base instructions".to_string(),
model_instructions_template: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,

View File

@@ -60,27 +60,3 @@ pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result<String>
responses::ev_completed("resp-1"),
]))
}
pub fn create_request_user_input_sse_response(call_id: &str) -> anyhow::Result<String> {
let tool_call_arguments = serde_json::to_string(&json!({
"questions": [{
"id": "confirm_path",
"header": "Confirm",
"question": "Proceed with the plan?",
"isOther": false,
"options": [{
"label": "Yes (Recommended)",
"description": "Continue the current plan."
}, {
"label": "No",
"description": "Stop and revisit the approach."
}]
}]
}))?;
Ok(responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, "request_user_input", &tool_call_arguments),
responses::ev_completed("resp-1"),
]))
}

View File

@@ -6,23 +6,10 @@ use codex_protocol::protocol::SessionMetaLine;
use codex_protocol::protocol::SessionSource;
use serde_json::json;
use std::fs;
use std::fs::FileTimes;
use std::path::Path;
use std::path::PathBuf;
use uuid::Uuid;
pub fn rollout_path(codex_home: &Path, filename_ts: &str, thread_id: &str) -> PathBuf {
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
codex_home
.join("sessions")
.join(year)
.join(month)
.join(day)
.join(format!("rollout-{filename_ts}-{thread_id}.jsonl"))
}
/// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`.
///
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
@@ -38,49 +25,30 @@ pub fn create_fake_rollout(
preview: &str,
model_provider: Option<&str>,
git_info: Option<GitInfo>,
) -> Result<String> {
create_fake_rollout_with_source(
codex_home,
filename_ts,
meta_rfc3339,
preview,
model_provider,
git_info,
SessionSource::Cli,
)
}
/// Create a minimal rollout file with an explicit session source.
pub fn create_fake_rollout_with_source(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
git_info: Option<GitInfo>,
source: SessionSource,
) -> Result<String> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let conversation_id = ThreadId::from_string(&uuid_str)?;
let file_path = rollout_path(codex_home, filename_ts, &uuid_str);
let dir = file_path
.parent()
.ok_or_else(|| anyhow::anyhow!("missing rollout parent directory"))?;
fs::create_dir_all(dir)?;
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
// Build JSONL lines
let meta = SessionMeta {
id: conversation_id,
forked_from_id: None,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
source,
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,
@@ -116,13 +84,7 @@ pub fn create_fake_rollout_with_source(
.to_string(),
];
fs::write(&file_path, lines.join("\n") + "\n")?;
let parsed = chrono::DateTime::parse_from_rfc3339(meta_rfc3339)?.with_timezone(&chrono::Utc);
let times = FileTimes::new().set_modified(parsed.into());
std::fs::OpenOptions::new()
.append(true)
.open(&file_path)?
.set_times(times)?;
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(uuid_str)
}
@@ -151,14 +113,13 @@ pub fn create_fake_rollout_with_text_elements(
// Build JSONL lines
let meta = SessionMeta {
id: conversation_id,
forked_from_id: None,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,

View File

@@ -108,10 +108,6 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
let AddConversationSubscriptionResponse { subscription_id } =
to_response::<AddConversationSubscriptionResponse>(add_listener_resp)?;
// Drop any buffered events from conversation setup to avoid
// matching an earlier task_complete.
mcp.clear_message_buffer();
// 3) sendUserMessage (should trigger notifications; we only validate an OK response)
let send_user_id = mcp
.send_send_user_message_request(SendUserMessageParams {
@@ -129,38 +125,13 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
.await??;
let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)?;
let task_started_notification: JSONRPCNotification = timeout(
// Verify the task_finished notification is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_started"),
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let task_started_event: Event = serde_json::from_value(
task_started_notification
.params
.clone()
.expect("task_started should have params"),
)
.expect("task_started should deserialize to Event");
// Verify the task_finished notification for this turn is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let event: Event = serde_json::from_value(
notification
.params
.clone()
.expect("task_complete should have params"),
)
.expect("task_complete should deserialize to Event");
if event.id == task_started_event.id {
break notification;
}
};
let serde_json::Value::Object(map) = task_finished_notification
.params
.expect("notification should have params")

View File

@@ -307,7 +307,6 @@ async fn test_list_and_resume_conversations() -> Result<()> {
content: vec![ContentItem::InputText {
text: fork_history_text.to_string(),
}],
end_turn: None,
}];
let resume_with_history_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {

View File

@@ -1,381 +0,0 @@
use std::borrow::Cow;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use axum::Json;
use axum::Router;
use axum::extract::State;
use axum::http::HeaderMap;
use axum::http::StatusCode;
use axum::http::header::AUTHORIZATION;
use axum::routing::post;
use codex_app_server_protocol::AppInfo;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::AppsListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::connectors::ConnectorInfo;
use pretty_assertions::assert_eq;
use rmcp::handler::server::ServerHandler;
use rmcp::model::JsonObject;
use rmcp::model::ListToolsResult;
use rmcp::model::Meta;
use rmcp::model::ServerCapabilities;
use rmcp::model::ServerInfo;
use rmcp::model::Tool;
use rmcp::model::ToolAnnotations;
use rmcp::transport::StreamableHttpServerConfig;
use rmcp::transport::StreamableHttpService;
use rmcp::transport::streamable_http_server::session::local::LocalSessionManager;
use serde_json::json;
use tempfile::TempDir;
use tokio::net::TcpListener;
use tokio::task::JoinHandle;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
#[tokio::test]
async fn list_apps_returns_empty_when_connectors_disabled() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_apps_list_request(AppsListParams {
limit: Some(50),
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let AppsListResponse { data, next_cursor } = to_response(response)?;
assert!(data.is_empty());
assert!(next_cursor.is_none());
Ok(())
}
#[tokio::test]
async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
let connectors = vec![
ConnectorInfo {
connector_id: "alpha".to_string(),
connector_name: "Alpha".to_string(),
connector_description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
install_url: None,
is_accessible: false,
},
ConnectorInfo {
connector_id: "beta".to_string(),
connector_name: "beta".to_string(),
connector_description: None,
logo_url: None,
install_url: None,
is_accessible: false,
},
];
let tools = vec![connector_tool("beta", "Beta App")?];
let (server_url, server_handle) = start_apps_server(connectors.clone(), tools).await?;
let codex_home = TempDir::new()?;
write_connectors_config(codex_home.path(), &server_url)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.chatgpt_user_id("user-123")
.chatgpt_account_id("account-123"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_apps_list_request(AppsListParams {
limit: None,
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let AppsListResponse { data, next_cursor } = to_response(response)?;
let expected = vec![
AppInfo {
id: "beta".to_string(),
name: "Beta App".to_string(),
description: None,
logo_url: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
},
AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
},
];
assert_eq!(data, expected);
assert!(next_cursor.is_none());
server_handle.abort();
Ok(())
}
#[tokio::test]
async fn list_apps_paginates_results() -> Result<()> {
let connectors = vec![
ConnectorInfo {
connector_id: "alpha".to_string(),
connector_name: "Alpha".to_string(),
connector_description: Some("Alpha connector".to_string()),
logo_url: None,
install_url: None,
is_accessible: false,
},
ConnectorInfo {
connector_id: "beta".to_string(),
connector_name: "beta".to_string(),
connector_description: None,
logo_url: None,
install_url: None,
is_accessible: false,
},
];
let tools = vec![connector_tool("beta", "Beta App")?];
let (server_url, server_handle) = start_apps_server(connectors.clone(), tools).await?;
let codex_home = TempDir::new()?;
write_connectors_config(codex_home.path(), &server_url)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.chatgpt_user_id("user-123")
.chatgpt_account_id("account-123"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_apps_list_request(AppsListParams {
limit: Some(1),
cursor: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let AppsListResponse {
data: first_page,
next_cursor: first_cursor,
} = to_response(first_response)?;
let expected_first = vec![AppInfo {
id: "beta".to_string(),
name: "Beta App".to_string(),
description: None,
logo_url: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
}];
assert_eq!(first_page, expected_first);
let next_cursor = first_cursor.ok_or_else(|| anyhow::anyhow!("missing cursor"))?;
let second_request = mcp
.send_apps_list_request(AppsListParams {
limit: Some(1),
cursor: Some(next_cursor),
})
.await?;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let AppsListResponse {
data: second_page,
next_cursor: second_cursor,
} = to_response(second_response)?;
let expected_second = vec![AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: None,
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
}];
assert_eq!(second_page, expected_second);
assert!(second_cursor.is_none());
server_handle.abort();
Ok(())
}
#[derive(Clone)]
struct AppsServerState {
expected_bearer: String,
expected_account_id: String,
response: serde_json::Value,
}
#[derive(Clone)]
struct AppListMcpServer {
tools: Arc<Vec<Tool>>,
}
impl AppListMcpServer {
fn new(tools: Arc<Vec<Tool>>) -> Self {
Self { tools }
}
}
impl ServerHandler for AppListMcpServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
capabilities: ServerCapabilities::builder().enable_tools().build(),
..ServerInfo::default()
}
}
fn list_tools(
&self,
_request: Option<rmcp::model::PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListToolsResult, rmcp::ErrorData>> + Send + '_
{
let tools = self.tools.clone();
async move {
Ok(ListToolsResult {
tools: (*tools).clone(),
next_cursor: None,
meta: None,
})
}
}
}
async fn start_apps_server(
connectors: Vec<ConnectorInfo>,
tools: Vec<Tool>,
) -> Result<(String, JoinHandle<()>)> {
let state = AppsServerState {
expected_bearer: "Bearer chatgpt-token".to_string(),
expected_account_id: "account-123".to_string(),
response: json!({ "connectors": connectors }),
};
let state = Arc::new(state);
let tools = Arc::new(tools);
let listener = TcpListener::bind("127.0.0.1:0").await?;
let addr = listener.local_addr()?;
let mcp_service = StreamableHttpService::new(
{
let tools = tools.clone();
move || Ok(AppListMcpServer::new(tools.clone()))
},
Arc::new(LocalSessionManager::default()),
StreamableHttpServerConfig::default(),
);
let router = Router::new()
.route("/aip/connectors/list_accessible", post(list_connectors))
.with_state(state)
.nest_service("/api/codex/apps", mcp_service);
let handle = tokio::spawn(async move {
let _ = axum::serve(listener, router).await;
});
Ok((format!("http://{addr}"), handle))
}
async fn list_connectors(
State(state): State<Arc<AppsServerState>>,
headers: HeaderMap,
) -> Result<impl axum::response::IntoResponse, StatusCode> {
let bearer_ok = headers
.get(AUTHORIZATION)
.and_then(|value| value.to_str().ok())
.is_some_and(|value| value == state.expected_bearer);
let account_ok = headers
.get("chatgpt-account-id")
.and_then(|value| value.to_str().ok())
.is_some_and(|value| value == state.expected_account_id);
if bearer_ok && account_ok {
Ok(Json(state.response.clone()))
} else {
Err(StatusCode::UNAUTHORIZED)
}
}
fn connector_tool(connector_id: &str, connector_name: &str) -> Result<Tool> {
let schema: JsonObject = serde_json::from_value(json!({
"type": "object",
"additionalProperties": false
}))?;
let mut tool = Tool::new(
Cow::Owned(format!("connector_{connector_id}")),
Cow::Borrowed("Connector test tool"),
Arc::new(schema),
);
tool.annotations = Some(ToolAnnotations::new().read_only(true));
let mut meta = Meta::new();
meta.0
.insert("connector_id".to_string(), json!(connector_id));
meta.0
.insert("connector_name".to_string(), json!(connector_name));
tool.meta = Some(meta);
Ok(tool)
}
fn write_connectors_config(codex_home: &std::path::Path, base_url: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
chatgpt_base_url = "{base_url}"
[features]
connectors = true
"#
),
)
}

View File

@@ -1,111 +0,0 @@
//! Validates that the collaboration mode list endpoint returns the expected default presets.
//!
//! The test drives the app server through the MCP harness and asserts that the list response
//! includes the plan, coding, pair programming, and execute modes with their default model and reasoning
//! effort settings, which keeps the API contract visible in one place.
#![allow(clippy::unwrap_used)]
use std::time::Duration;
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::CollaborationModeListParams;
use codex_app_server_protocol::CollaborationModeListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::models_manager::test_builtin_collaboration_mode_presets;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ModeKind;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
/// Confirms the server returns the default collaboration mode presets in a stable order.
#[tokio::test]
async fn list_collaboration_modes_returns_presets() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_collaboration_modes_request(CollaborationModeListParams {})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let CollaborationModeListResponse { data: items } =
to_response::<CollaborationModeListResponse>(response)?;
let expected = [
plan_preset(),
code_preset(),
pair_programming_preset(),
execute_preset(),
];
assert_eq!(expected.len(), items.len());
for (expected_mask, actual_mask) in expected.iter().zip(items.iter()) {
assert_eq!(expected_mask.name, actual_mask.name);
assert_eq!(expected_mask.mode, actual_mask.mode);
assert_eq!(expected_mask.model, actual_mask.model);
assert_eq!(expected_mask.reasoning_effort, actual_mask.reasoning_effort);
assert_eq!(
expected_mask.developer_instructions,
actual_mask.developer_instructions
);
}
Ok(())
}
/// Builds the plan preset that the list response is expected to return.
///
/// If the defaults change in the app server, this helper should be updated alongside the
/// contract, or the test will fail in ways that imply a regression in the API.
fn plan_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Plan))
.unwrap()
}
/// Builds the pair programming preset that the list response is expected to return.
///
/// The helper keeps the expected model and reasoning defaults co-located with the test
/// so that mismatches point directly at the API contract being exercised.
fn pair_programming_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::PairProgramming))
.unwrap()
}
/// Builds the code preset that the list response is expected to return.
fn code_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Code))
.unwrap()
}
/// Builds the execute preset that the list response is expected to return.
///
/// The execute preset uses a different reasoning effort to capture the higher-effort
/// execution contract the server currently exposes.
fn execute_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Execute))
.unwrap()
}

View File

@@ -18,10 +18,7 @@ use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxMode;
use codex_app_server_protocol::ToolsV2;
use codex_app_server_protocol::WriteStatus;
use codex_core::config::set_project_trust_level;
use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use codex_utils_absolute_path::AbsolutePathBuf;
use pretty_assertions::assert_eq;
use serde_json::json;
@@ -56,7 +53,6 @@ sandbox_mode = "workspace-write"
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -105,7 +101,6 @@ view_image = false
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -146,52 +141,6 @@ view_image = false
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_project_layers_for_cwd() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(&codex_home, r#"model = "gpt-user""#)?;
let workspace = TempDir::new()?;
let project_config_dir = workspace.path().join(".codex");
std::fs::create_dir_all(&project_config_dir)?;
std::fs::write(
project_config_dir.join("config.toml"),
r#"
model_reasoning_effort = "high"
"#,
)?;
set_project_trust_level(codex_home.path(), workspace.path(), TrustLevel::Trusted)?;
let project_config = AbsolutePathBuf::try_from(project_config_dir)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: Some(workspace.path().to_string_lossy().into_owned()),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ConfigReadResponse {
config, origins, ..
} = to_response(resp)?;
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::High));
assert_eq!(
origins.get("model_reasoning_effort").expect("origin").name,
ConfigLayerSource::Project {
dot_codex_folder: project_config
}
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -246,7 +195,6 @@ writable_roots = [{}]
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -333,7 +281,6 @@ model = "gpt-old"
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
@@ -368,7 +315,6 @@ model = "gpt-old"
let verify_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let verify_resp: JSONRPCResponse = timeout(
@@ -465,7 +411,6 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let read_resp: JSONRPCResponse = timeout(

View File

@@ -1,286 +0,0 @@
use anyhow::Context;
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::DynamicToolCallParams;
use codex_app_server_protocol::DynamicToolCallResponse;
use codex_app_server_protocol::DynamicToolSpec;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::MockServer;
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10);
/// Ensures dynamic tool specs are serialized into the model request payload.
#[tokio::test]
async fn thread_start_injects_dynamic_tools_into_model_requests() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Use a minimal JSON schema so we can assert the tool payload round-trips.
let input_schema = json!({
"type": "object",
"properties": {
"city": { "type": "string" }
},
"required": ["city"],
"additionalProperties": false,
});
let dynamic_tool = DynamicToolSpec {
name: "demo_tool".to_string(),
description: "Demo dynamic tool".to_string(),
input_schema: input_schema.clone(),
};
// Thread start injects dynamic tools into the thread's tool registry.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
dynamic_tools: Some(vec![dynamic_tool.clone()]),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn so a model request is issued.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
// Inspect the captured model request to assert the tool spec made it through.
let bodies = responses_bodies(&server).await?;
let body = bodies
.first()
.context("expected at least one responses request")?;
let tool = find_tool(body, &dynamic_tool.name)
.context("expected dynamic tool to be injected into request")?;
assert_eq!(
tool.get("description"),
Some(&Value::String(dynamic_tool.description.clone()))
);
assert_eq!(tool.get("parameters"), Some(&input_schema));
Ok(())
}
/// Exercises the full dynamic tool call path (server request, client response, model output).
#[tokio::test]
async fn dynamic_tool_call_round_trip_sends_output_to_model() -> Result<()> {
let call_id = "dyn-call-1";
let tool_name = "demo_tool";
let tool_args = json!({ "city": "Paris" });
let tool_call_arguments = serde_json::to_string(&tool_args)?;
// First response triggers a dynamic tool call, second closes the turn.
let responses = vec![
responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, tool_name, &tool_call_arguments),
responses::ev_completed("resp-1"),
]),
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let dynamic_tool = DynamicToolSpec {
name: tool_name.to_string(),
description: "Demo dynamic tool".to_string(),
input_schema: json!({
"type": "object",
"properties": {
"city": { "type": "string" }
},
"required": ["city"],
"additionalProperties": false,
}),
};
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
dynamic_tools: Some(vec![dynamic_tool]),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn so the tool call is emitted.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Run the tool".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
// Read the tool call request from the app server.
let request = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let (request_id, params) = match request {
ServerRequest::DynamicToolCall { request_id, params } => (request_id, params),
other => panic!("expected DynamicToolCall request, got {other:?}"),
};
let expected = DynamicToolCallParams {
thread_id: thread.id,
turn_id: turn.id,
call_id: call_id.to_string(),
tool: tool_name.to_string(),
arguments: tool_args.clone(),
};
assert_eq!(params, expected);
// Respond to the tool call so the model receives a function_call_output.
let response = DynamicToolCallResponse {
output: "dynamic-ok".to_string(),
success: true,
};
mcp.send_response(request_id, serde_json::to_value(response)?)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let bodies = responses_bodies(&server).await?;
let output = bodies
.iter()
.find_map(|body| function_call_output_text(body, call_id))
.context("expected function_call_output in follow-up request")?;
assert_eq!(output, "dynamic-ok");
Ok(())
}
async fn responses_bodies(server: &MockServer) -> Result<Vec<Value>> {
let requests = server
.received_requests()
.await
.context("failed to fetch received requests")?;
requests
.into_iter()
.filter(|req| req.url.path().ends_with("/responses"))
.map(|req| {
req.body_json::<Value>()
.context("request body should be JSON")
})
.collect()
}
fn find_tool<'a>(body: &'a Value, name: &str) -> Option<&'a Value> {
body.get("tools")
.and_then(Value::as_array)
.and_then(|tools| {
tools
.iter()
.find(|tool| tool.get("name").and_then(Value::as_str) == Some(name))
})
}
fn function_call_output_text(body: &Value, call_id: &str) -> Option<String> {
body.get("input")
.and_then(Value::as_array)
.and_then(|items| {
items.iter().find(|item| {
item.get("type").and_then(Value::as_str) == Some("function_call_output")
&& item.get("call_id").and_then(Value::as_str) == Some(call_id)
})
})
.and_then(|item| item.get("output"))
.and_then(Value::as_str)
.map(str::to_string)
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,23 +1,17 @@
mod account;
mod analytics;
mod app_list;
mod collaboration_mode_list;
mod config_rpc;
mod dynamic_tools;
mod initialize;
mod model_list;
mod output_schema;
mod rate_limits;
mod request_user_input;
mod review;
mod thread_archive;
mod thread_fork;
mod thread_list;
mod thread_loaded_list;
mod thread_read;
mod thread_resume;
mod thread_rollback;
mod thread_start;
mod thread_unarchive;
mod turn_interrupt;
mod turn_start;

View File

@@ -72,7 +72,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: true,
},
Model {
@@ -100,7 +99,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@@ -120,7 +118,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@@ -154,7 +151,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
];

View File

@@ -1,138 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_request_user_input_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn request_user_input_round_trip() -> Result<()> {
let codex_home = tempfile::TempDir::new()?;
let responses = vec![
create_request_user_input_sse_response("call1")?,
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response(thread_start_resp)?;
let turn_start_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "ask something".to_string(),
text_elements: Vec::new(),
}],
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
collaboration_mode: Some(CollaborationMode {
mode: ModeKind::Plan,
settings: Settings {
model: "mock-model".to_string(),
reasoning_effort: Some(ReasoningEffort::Medium),
developer_instructions: None,
},
}),
..Default::default()
})
.await?;
let turn_start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_start_id)),
)
.await??;
let TurnStartResponse { turn, .. } = to_response(turn_start_resp)?;
let server_req = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ToolRequestUserInput { request_id, params } = server_req else {
panic!("expected ToolRequestUserInput request, got: {server_req:?}");
};
assert_eq!(params.thread_id, thread.id);
assert_eq!(params.turn_id, turn.id);
assert_eq!(params.item_id, "call1");
assert_eq!(params.questions.len(), 1);
mcp.send_response(
request_id,
serde_json::json!({
"answers": {
"confirm_path": { "answers": ["yes"] }
}
}),
)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
Ok(())
}
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "untrusted"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
collaboration_modes = true
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -77,9 +77,8 @@ async fn thread_fork_creates_new_thread_and_emits_started() -> Result<()> {
assert_ne!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
let thread_path = thread.path.clone().expect("thread path");
assert!(thread_path.is_absolute());
assert_ne!(thread_path, original_path);
assert!(thread.path.is_absolute());
assert_ne!(thread.path, original_path);
assert!(thread.cwd.is_absolute());
assert_eq!(thread.source, SessionSource::VsCode);

View File

@@ -1,34 +1,17 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::create_fake_rollout_with_source;
use app_test_support::rollout_path;
use app_test_support::to_response;
use chrono::DateTime;
use chrono::Utc;
use codex_app_server_protocol::GitInfo as ApiGitInfo;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadListResponse;
use codex_app_server_protocol::ThreadSortKey;
use codex_app_server_protocol::ThreadSourceKind;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_protocol::ThreadId;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SubAgentSource;
use pretty_assertions::assert_eq;
use std::cmp::Reverse;
use std::fs;
use std::fs::FileTimes;
use std::fs::OpenOptions;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -43,29 +26,12 @@ async fn list_threads(
cursor: Option<String>,
limit: Option<u32>,
providers: Option<Vec<String>>,
source_kinds: Option<Vec<ThreadSourceKind>>,
archived: Option<bool>,
) -> Result<ThreadListResponse> {
list_threads_with_sort(mcp, cursor, limit, providers, source_kinds, None, archived).await
}
async fn list_threads_with_sort(
mcp: &mut McpProcess,
cursor: Option<String>,
limit: Option<u32>,
providers: Option<Vec<String>>,
source_kinds: Option<Vec<ThreadSourceKind>>,
sort_key: Option<ThreadSortKey>,
archived: Option<bool>,
) -> Result<ThreadListResponse> {
let request_id = mcp
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
cursor,
limit,
sort_key,
model_providers: providers,
source_kinds,
archived,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -116,16 +82,6 @@ fn timestamp_at(
)
}
fn set_rollout_mtime(path: &Path, updated_at_rfc3339: &str) -> Result<()> {
let parsed = DateTime::parse_from_rfc3339(updated_at_rfc3339)?.with_timezone(&Utc);
let times = FileTimes::new().set_modified(parsed.into());
OpenOptions::new()
.append(true)
.open(path)?
.set_times(times)?;
Ok(())
}
#[tokio::test]
async fn thread_list_basic_empty() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -138,8 +94,6 @@ async fn thread_list_basic_empty() -> Result<()> {
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert!(data.is_empty());
@@ -202,8 +156,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
None,
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(data1.len(), 2);
@@ -211,7 +163,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.updated_at, thread.created_at);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -228,8 +179,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
Some(cursor1),
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert!(data2.len() <= 2);
@@ -237,7 +186,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.updated_at, thread.created_at);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -279,8 +227,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
None,
Some(10),
Some(vec!["other_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(data.len(), 1);
@@ -290,7 +236,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
assert_eq!(thread.model_provider, "other_provider");
let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_ts);
assert_eq!(thread.updated_at, expected_ts);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -299,207 +244,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_list_empty_source_kinds_defaults_to_interactive_only() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let cli_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"CLI",
Some("mock_provider"),
None,
)?;
let exec_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Exec",
Some("mock_provider"),
None,
CoreSessionSource::Exec,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(Vec::new()),
None,
)
.await?;
assert_eq!(next_cursor, None);
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![cli_id.as_str()]);
assert_ne!(cli_id, exec_id);
assert_eq!(data[0].source, SessionSource::Cli);
Ok(())
}
#[tokio::test]
async fn thread_list_filters_by_source_kind_subagent_thread_spawn() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let cli_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"CLI",
Some("mock_provider"),
None,
)?;
let parent_thread_id = ThreadId::from_string(&Uuid::new_v4().to_string())?;
let subagent_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"SubAgent",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id,
depth: 1,
}),
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentThreadSpawn]),
None,
)
.await?;
assert_eq!(next_cursor, None);
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![subagent_id.as_str()]);
assert_ne!(cli_id, subagent_id);
assert!(matches!(data[0].source, SessionSource::SubAgent(_)));
Ok(())
}
#[tokio::test]
async fn thread_list_filters_by_subagent_variant() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let parent_thread_id = ThreadId::from_string(&Uuid::new_v4().to_string())?;
let review_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T09-00-00",
"2025-02-02T09:00:00Z",
"Review",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::Review),
)?;
let compact_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T10-00-00",
"2025-02-02T10:00:00Z",
"Compact",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::Compact),
)?;
let spawn_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T11-00-00",
"2025-02-02T11:00:00Z",
"Spawn",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id,
depth: 1,
}),
)?;
let other_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T12-00-00",
"2025-02-02T12:00:00Z",
"Other",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::Other("custom".to_string())),
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let review = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentReview]),
None,
)
.await?;
let review_ids: Vec<_> = review
.data
.iter()
.map(|thread| thread.id.as_str())
.collect();
assert_eq!(review_ids, vec![review_id.as_str()]);
let compact = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentCompact]),
None,
)
.await?;
let compact_ids: Vec<_> = compact
.data
.iter()
.map(|thread| thread.id.as_str())
.collect();
assert_eq!(compact_ids, vec![compact_id.as_str()]);
let spawn = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentThreadSpawn]),
None,
)
.await?;
let spawn_ids: Vec<_> = spawn.data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(spawn_ids, vec![spawn_id.as_str()]);
let other = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentOther]),
None,
)
.await?;
let other_ids: Vec<_> = other.data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(other_ids, vec![other_id.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -531,8 +275,6 @@ async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
None,
Some(8),
Some(vec!["target_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(
@@ -577,8 +319,6 @@ async fn thread_list_enforces_max_limit() -> Result<()> {
None,
Some(200),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(
@@ -624,8 +364,6 @@ async fn thread_list_stops_when_not_enough_filtered_results_exist() -> Result<()
None,
Some(10),
Some(vec!["target_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(
@@ -672,8 +410,6 @@ async fn thread_list_includes_git_info() -> Result<()> {
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
let thread = data
@@ -693,428 +429,3 @@ async fn thread_list_includes_git_info() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_list_default_sorts_by_created_at() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_c = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![id_a.as_str(), id_b.as_str(), id_c.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_sort_updated_at_orders_by_mtime() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_old = create_fake_rollout(
codex_home.path(),
"2025-01-01T10-00-00",
"2025-01-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_mid = create_fake_rollout(
codex_home.path(),
"2025-01-01T11-00-00",
"2025-01-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_new = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T10-00-00", &id_old).as_path(),
"2025-01-03T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T11-00-00", &id_mid).as_path(),
"2025-01-02T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T12-00-00", &id_new).as_path(),
"2025-01-01T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![id_old.as_str(), id_mid.as_str(), id_new.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_paginates_with_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_c = create_fake_rollout(
codex_home.path(),
"2025-02-01T12-00-00",
"2025-02-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &id_a).as_path(),
"2025-02-03T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T11-00-00", &id_b).as_path(),
"2025-02-02T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T12-00-00", &id_c).as_path(),
"2025-02-01T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse {
data: page1,
next_cursor: cursor1,
} = list_threads_with_sort(
&mut mcp,
None,
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids_page1: Vec<_> = page1.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids_page1, vec![id_a.as_str(), id_b.as_str()]);
let cursor1 = cursor1.expect("expected nextCursor on first page");
let ThreadListResponse {
data: page2,
next_cursor: cursor2,
} = list_threads_with_sort(
&mut mcp,
Some(cursor1),
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids_page2: Vec<_> = page2.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids_page2, vec![id_c.as_str()]);
assert_eq!(cursor2, None);
Ok(())
}
#[tokio::test]
async fn thread_list_created_at_tie_breaks_by_uuid() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
let mut expected = [id_a, id_b];
expected.sort_by_key(|id| Reverse(Uuid::parse_str(id).expect("uuid should parse")));
let expected: Vec<_> = expected.iter().map(String::as_str).collect();
assert_eq!(ids, expected);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_tie_breaks_by_uuid() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let updated_at = "2025-02-03T00:00:00Z";
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &id_a).as_path(),
updated_at,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T11-00-00", &id_b).as_path(),
updated_at,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
let mut expected = [id_a, id_b];
expected.sort_by_key(|id| Reverse(Uuid::parse_str(id).expect("uuid should parse")));
let expected: Vec<_> = expected.iter().map(String::as_str).collect();
assert_eq!(ids, expected);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_uses_mtime() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let thread_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &thread_id).as_path(),
"2025-02-05T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let thread = data
.iter()
.find(|item| item.id == thread_id)
.expect("expected thread for created rollout");
let expected_created =
chrono::DateTime::parse_from_rfc3339("2025-02-01T10:00:00Z")?.timestamp();
let expected_updated =
chrono::DateTime::parse_from_rfc3339("2025-02-05T00:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_created);
assert_eq!(thread.updated_at, expected_updated);
Ok(())
}
#[tokio::test]
async fn thread_list_archived_filter() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let active_id = create_fake_rollout(
codex_home.path(),
"2025-03-01T10-00-00",
"2025-03-01T10:00:00Z",
"Active",
Some("mock_provider"),
None,
)?;
let archived_id = create_fake_rollout(
codex_home.path(),
"2025-03-01T09-00-00",
"2025-03-01T09:00:00Z",
"Archived",
Some("mock_provider"),
None,
)?;
let archived_dir = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
fs::create_dir_all(&archived_dir)?;
let archived_source = rollout_path(codex_home.path(), "2025-03-01T09-00-00", &archived_id);
let archived_dest = archived_dir.join(
archived_source
.file_name()
.expect("archived rollout should have a file name"),
);
fs::rename(&archived_source, &archived_dest)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(data.len(), 1);
assert_eq!(data[0].id, active_id);
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(true),
)
.await?;
assert_eq!(data.len(), 1);
assert_eq!(data[0].id, archived_id);
Ok(())
}
#[tokio::test]
async fn thread_list_invalid_cursor_returns_error() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let mut mcp = init_mcp(codex_home.path()).await?;
let request_id = mcp
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
cursor: Some("not-a-cursor".to_string()),
limit: Some(2),
sort_key: None,
model_providers: Some(vec!["mock_provider".to_string()]),
source_kinds: None,
archived: None,
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.error.code, -32600);
assert_eq!(error.error.message, "invalid cursor: not-a-cursor");
Ok(())
}

View File

@@ -1,159 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout_with_text_elements;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadReadParams;
use codex_app_server_protocol::ThreadReadResponse;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use pretty_assertions::assert_eq;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_read_returns_summary_without_turns() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = [TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
text_elements
.iter()
.map(|elem| serde_json::to_value(elem).expect("serialize text element"))
.collect(),
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let read_id = mcp
.send_thread_read_request(ThreadReadParams {
thread_id: conversation_id.clone(),
include_turns: false,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let ThreadReadResponse { thread } = to_response::<ThreadReadResponse>(read_resp)?;
assert_eq!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.path.as_ref().expect("thread path").is_absolute());
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
assert_eq!(thread.git_info, None);
assert_eq!(thread.turns.len(), 0);
Ok(())
}
#[tokio::test]
async fn thread_read_can_include_turns() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
text_elements
.iter()
.map(|elem| serde_json::to_value(elem).expect("serialize text element"))
.collect(),
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let read_id = mcp
.send_thread_read_request(ThreadReadParams {
thread_id: conversation_id.clone(),
include_turns: true,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let ThreadReadResponse { thread } = to_response::<ThreadReadResponse>(read_resp)?;
assert_eq!(thread.turns.len(), 1);
let turn = &thread.turns[0];
assert_eq!(turn.status, TurnStatus::Completed);
assert_eq!(turn.items.len(), 1, "expected user message item");
match &turn.items[0] {
ThreadItem::UserMessage { content, .. } => {
assert_eq!(
content,
&vec![UserInput::Text {
text: preview.to_string(),
text_elements: text_elements.clone().into_iter().map(Into::into).collect(),
}]
);
}
other => panic!("expected user message item, got {other:?}"),
}
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -2,9 +2,7 @@ use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout_with_text_elements;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::rollout_path;
use app_test_support::to_response;
use chrono::Utc;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
@@ -13,25 +11,18 @@ use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_protocol::config_types::Personality;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::fs::FileTimes;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const DEFAULT_BASE_INSTRUCTIONS: &str = "You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.";
#[tokio::test]
async fn thread_resume_returns_original_thread() -> Result<()> {
@@ -71,9 +62,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> {
let ThreadResumeResponse {
thread: resumed, ..
} = to_response::<ThreadResumeResponse>(resume_resp)?;
let mut expected = thread;
expected.updated_at = resumed.updated_at;
assert_eq!(resumed, expected);
assert_eq!(resumed, thread);
Ok(())
}
@@ -85,10 +74,10 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let text_elements = vec![TextElement {
byte_range: ByteRange { start: 0, end: 5 },
placeholder: Some("<note>".into()),
}];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
@@ -121,7 +110,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
assert_eq!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.path.as_ref().expect("thread path").is_absolute());
assert!(thread.path.is_absolute());
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -151,116 +140,6 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_without_overrides_does_not_change_updated_at_or_mtime() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
let rollout = setup_rollout_fixture(codex_home.path(), &server.uri())?;
let thread_id = rollout.conversation_id.clone();
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread_id.clone(),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(thread.updated_at, rollout.expected_updated_at);
let after_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert_eq!(after_modified, rollout.before_modified);
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let after_turn_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert!(after_turn_modified > rollout.before_modified);
Ok(())
}
#[tokio::test]
async fn thread_resume_with_overrides_defers_updated_at_until_turn_start() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
let rollout = setup_rollout_fixture(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: rollout.conversation_id.clone(),
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(thread.updated_at, rollout.expected_updated_at);
let after_resume_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert_eq!(after_resume_modified, rollout.before_modified);
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: rollout.conversation_id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let after_turn_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert!(after_turn_modified > rollout.before_modified);
Ok(())
}
#[tokio::test]
async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
@@ -283,7 +162,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let thread_path = thread.path.clone().expect("thread path");
let thread_path = thread.path.clone();
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: "not-a-valid-thread-id".to_string(),
@@ -300,9 +179,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let ThreadResumeResponse {
thread: resumed, ..
} = to_response::<ThreadResumeResponse>(resume_resp)?;
let mut expected = thread;
expected.updated_at = resumed.updated_at;
assert_eq!(resumed, expected);
assert_eq!(resumed, thread);
Ok(())
}
@@ -337,7 +214,6 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
content: vec![ContentItem::InputText {
text: history_text.to_string(),
}],
end_turn: None,
}];
// Resume with explicit history and override the model.
@@ -367,91 +243,6 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id.clone(),
model: Some("gpt-5.2-codex".to_string()),
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let _resume: ThreadResumeResponse = to_response::<ThreadResumeResponse>(resume_resp)?;
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
assert!(
!developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"did not expect a personality update message in developer input, got {developer_texts:?}"
);
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(DEFAULT_BASE_INSTRUCTIONS),
"expected default base instructions from history, got {instructions_text:?}"
);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
@@ -465,9 +256,6 @@ sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
remote_models = false
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
@@ -478,51 +266,3 @@ stream_max_retries = 0
),
)
}
fn set_rollout_mtime(path: &Path, updated_at_rfc3339: &str) -> Result<()> {
let parsed = chrono::DateTime::parse_from_rfc3339(updated_at_rfc3339)?.with_timezone(&Utc);
let times = FileTimes::new().set_modified(parsed.into());
std::fs::OpenOptions::new()
.append(true)
.open(path)?
.set_times(times)?;
Ok(())
}
struct RolloutFixture {
conversation_id: String,
rollout_file_path: PathBuf,
before_modified: std::time::SystemTime,
expected_updated_at: i64,
}
fn setup_rollout_fixture(codex_home: &Path, server_uri: &str) -> Result<RolloutFixture> {
create_config_toml(codex_home, server_uri)?;
let preview = "Saved user message";
let filename_ts = "2025-01-05T12-00-00";
let meta_rfc3339 = "2025-01-05T12:00:00Z";
let expected_updated_at_rfc3339 = "2025-01-07T00:00:00Z";
let conversation_id = create_fake_rollout_with_text_elements(
codex_home,
filename_ts,
meta_rfc3339,
preview,
Vec::new(),
Some("mock_provider"),
None,
)?;
let rollout_file_path = rollout_path(codex_home, filename_ts, &conversation_id);
set_rollout_mtime(rollout_file_path.as_path(), expected_updated_at_rfc3339)?;
let before_modified = std::fs::metadata(&rollout_file_path)?.modified()?;
let expected_updated_at = chrono::DateTime::parse_from_rfc3339(expected_updated_at_rfc3339)?
.with_timezone(&Utc)
.timestamp();
Ok(RolloutFixture {
conversation_id,
rollout_file_path,
before_modified,
expected_updated_at,
})
}

View File

@@ -8,9 +8,6 @@ use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_core::config::set_project_trust_level;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -72,47 +69,6 @@ async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_start_respects_project_config_from_cwd() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let workspace = TempDir::new()?;
let project_config_dir = workspace.path().join(".codex");
std::fs::create_dir_all(&project_config_dir)?;
std::fs::write(
project_config_dir.join("config.toml"),
r#"
model_reasoning_effort = "high"
"#,
)?;
set_project_trust_level(codex_home.path(), workspace.path(), TrustLevel::Trusted)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_thread_start_request(ThreadStartParams {
cwd: Some(workspace.path().to_string_lossy().into_owned()),
..Default::default()
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ThreadStartResponse {
reasoning_effort, ..
} = to_response::<ThreadStartResponse>(resp)?;
assert_eq!(reasoning_effort, Some(ReasoningEffort::High));
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");

View File

@@ -1,101 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadArchiveResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadUnarchiveParams;
use codex_app_server_protocol::ThreadUnarchiveResponse;
use codex_core::find_archived_thread_path_by_id_str;
use codex_core::find_thread_path_by_id_str;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
#[tokio::test]
async fn thread_unarchive_moves_rollout_back_into_sessions_directory() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let rollout_path = find_thread_path_by_id_str(codex_home.path(), &thread.id)
.await?
.expect("expected rollout path for thread id to exist");
let archive_id = mcp
.send_thread_archive_request(ThreadArchiveParams {
thread_id: thread.id.clone(),
})
.await?;
let archive_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(archive_id)),
)
.await??;
let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?;
let archived_path = find_archived_thread_path_by_id_str(codex_home.path(), &thread.id)
.await?
.expect("expected archived rollout path for thread id to exist");
let archived_path_display = archived_path.display();
assert!(
archived_path.exists(),
"expected {archived_path_display} to exist"
);
let unarchive_id = mcp
.send_thread_unarchive_request(ThreadUnarchiveParams {
thread_id: thread.id.clone(),
})
.await?;
let unarchive_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(unarchive_id)),
)
.await??;
let _: ThreadUnarchiveResponse = to_response::<ThreadUnarchiveResponse>(unarchive_resp)?;
let rollout_path_display = rollout_path.display();
assert!(
rollout_path.exists(),
"expected rollout path {rollout_path_display} to be restored"
);
assert!(
!archived_path.exists(),
"expected archived rollout path {archived_path_display} to be moved"
);
Ok(())
}
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, config_contents())
}
fn config_contents() -> &'static str {
r#"model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
"#
}

View File

@@ -34,18 +34,10 @@ use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -59,12 +51,7 @@ async fn turn_start_sends_originator_header() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(
@@ -134,12 +121,7 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -157,10 +139,10 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".to_string()),
)];
let text_elements = vec![TextElement {
byte_range: ByteRange { start: 0, end: 5 },
placeholder: Some("<note>".to_string()),
}];
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
@@ -226,12 +208,7 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -328,161 +305,6 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_collaboration_mode_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let collaboration_mode = CollaborationMode {
mode: ModeKind::Custom,
settings: Settings {
model: "mock-model-collab".to_string(),
reasoning_effort: Some(ReasoningEffort::High),
developer_instructions: None,
},
};
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
model: Some("mock-model-override".to_string()),
effort: Some(ReasoningEffort::Low),
summary: Some(ReasoningSummary::Auto),
output_schema: None,
collaboration_mode: Some(collaboration_mode),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let payload = request.body_json();
assert_eq!(payload["model"].as_str(), Some("mock-model-collab"));
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("exp-codex-personality".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
if developer_texts.is_empty() {
eprintln!("request body: {}", request.body_json());
}
assert!(
developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"expected personality update message in developer input, got {developer_texts:?}"
);
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
@@ -495,12 +317,7 @@ async fn turn_start_accepts_local_image_input() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -575,12 +392,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
];
let server = create_mock_responses_server_sequence(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -705,12 +517,7 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -857,12 +664,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -900,9 +702,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
.await?;
timeout(
@@ -931,9 +731,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
.await?;
timeout(
@@ -1002,12 +800,7 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
create_final_assistant_message_sse_response("patch applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1184,12 +977,7 @@ async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Res
create_final_assistant_message_sse_response("patch 2 applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1365,12 +1153,7 @@ async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
create_final_assistant_message_sse_response("patch declined")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1510,12 +1293,16 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
];
let server = create_mock_responses_server_sequence(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::UnifiedExec, true)]),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let config_toml = codex_home.path().join("config.toml");
let mut config_contents = std::fs::read_to_string(&config_toml)?;
config_contents.push_str(
r#"
[features]
unified_exec = true
"#,
);
std::fs::write(&config_toml, config_contents)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1608,18 +1395,8 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
unreachable!("loop ensures we break on command execution items");
};
assert_eq!(completed_id, "uexec-1");
assert!(
matches!(
completed_status,
CommandExecutionStatus::Completed | CommandExecutionStatus::Failed
),
"unexpected command execution status: {completed_status:?}"
);
if completed_status == CommandExecutionStatus::Completed {
assert_eq!(exit_code, Some(0));
} else {
assert!(exit_code.is_some(), "expected exit_code for failed command");
}
assert_eq!(completed_status, CommandExecutionStatus::Completed);
assert_eq!(exit_code, Some(0));
assert_eq!(
completed_process_id.as_deref(),
Some(started_process_id.as_str())
@@ -1639,24 +1416,7 @@ fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
feature_flags: &BTreeMap<Feature, bool>,
) -> std::io::Result<()> {
let mut features = BTreeMap::from([(Feature::RemoteModels, false)]);
for (feature, enabled) in feature_flags {
features.insert(*feature, *enabled);
}
let feature_entries = features
.into_iter()
.map(|(feature, enabled)| {
let key = FEATURES
.iter()
.find(|spec| spec.id == feature)
.map(|spec| spec.key)
.unwrap_or_else(|| panic!("missing feature key for {feature:?}"));
format!("{key} = {enabled}")
})
.collect::<Vec<_>>()
.join("\n");
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
@@ -1668,9 +1428,6 @@ sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
{feature_entries}
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"

View File

@@ -1,5 +1,4 @@
use crate::types::CodeTaskDetailsResponse;
use crate::types::ConfigFileResponse;
use crate::types::CreditStatusDetails;
use crate::types::PaginatedListTaskListItem;
use crate::types::RateLimitStatusPayload;
@@ -245,20 +244,6 @@ impl Client {
self.decode_json::<TurnAttemptsSiblingTurnsResponse>(&url, &ct, &body)
}
/// Fetch the managed requirements file from codex-backend.
///
/// `GET /api/codex/config/requirements` (Codex API style) or
/// `GET /wham/config/requirements` (ChatGPT backend-api style).
pub async fn get_config_requirements_file(&self) -> Result<ConfigFileResponse> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/config/requirements", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/config/requirements", self.base_url),
};
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
self.decode_json::<ConfigFileResponse>(&url, &ct, &body)
}
/// Create a new task (user turn) by POSTing to the appropriate backend path
/// based on `path_style`. Returns the created task id.
pub async fn create_task(&self, request_body: serde_json::Value) -> Result<String> {

View File

@@ -4,7 +4,6 @@ pub mod types;
pub use client::Client;
pub use types::CodeTaskDetailsResponse;
pub use types::CodeTaskDetailsResponseExt;
pub use types::ConfigFileResponse;
pub use types::PaginatedListTaskListItem;
pub use types::TaskListItem;
pub use types::TurnAttemptsSiblingTurnsResponse;

View File

@@ -1,4 +1,3 @@
pub use codex_backend_openapi_models::models::ConfigFileResponse;
pub use codex_backend_openapi_models::models::CreditStatusDetails;
pub use codex_backend_openapi_models::models::PaginatedListTaskListItem;
pub use codex_backend_openapi_models::models::PlanType;

View File

@@ -5,7 +5,6 @@ use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
use anyhow::Context;
use serde::Serialize;
use serde::de::DeserializeOwned;
/// Make a GET request to the ChatGPT backend API.
@@ -49,37 +48,3 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
anyhow::bail!("Request failed with status {status}: {body}")
}
}
pub(crate) async fn chatgpt_post_request<T: DeserializeOwned, P: Serialize>(
config: &Config,
access_token: &str,
account_id: &str,
path: &str,
payload: &P,
) -> anyhow::Result<T> {
let chatgpt_base_url = &config.chatgpt_base_url;
let client = create_client();
let url = format!("{chatgpt_base_url}{path}");
let response = client
.post(&url)
.bearer_auth(access_token)
.header("chatgpt-account-id", account_id)
.header("Content-Type", "application/json")
.json(payload)
.send()
.await
.context("Failed to send request")?;
if response.status().is_success() {
let result: T = response
.json()
.await
.context("Failed to parse JSON response")?;
Ok(result)
} else {
let status = response.status();
let body = response.text().await.unwrap_or_default();
anyhow::bail!("Request failed with status {status}: {body}")
}
}

View File

@@ -1,125 +0,0 @@
use codex_core::config::Config;
use codex_core::features::Feature;
use serde::Deserialize;
use serde::Serialize;
use crate::chatgpt_client::chatgpt_post_request;
use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
pub use codex_core::connectors::ConnectorInfo;
pub use codex_core::connectors::connector_display_label;
use codex_core::connectors::connector_install_url;
pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools;
use codex_core::connectors::merge_connectors;
#[derive(Debug, Serialize)]
struct ListConnectorsRequest {
principals: Vec<Principal>,
}
#[derive(Debug, Serialize)]
struct Principal {
#[serde(rename = "type")]
principal_type: PrincipalType,
id: String,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
enum PrincipalType {
User,
}
#[derive(Debug, Deserialize)]
struct ListConnectorsResponse {
connectors: Vec<ConnectorInfo>,
}
pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<ConnectorInfo>> {
if !config.features.enabled(Feature::Connectors) {
return Ok(Vec::new());
}
let (connectors_result, accessible_result) = tokio::join!(
list_all_connectors(config),
list_accessible_connectors_from_mcp_tools(config),
);
let connectors = connectors_result?;
let accessible = accessible_result?;
Ok(merge_connectors(connectors, accessible))
}
pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<ConnectorInfo>> {
if !config.features.enabled(Feature::Connectors) {
return Ok(Vec::new());
}
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
.await?;
let token_data =
get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?;
let user_id = token_data
.id_token
.chatgpt_user_id
.as_deref()
.ok_or_else(|| {
anyhow::anyhow!("ChatGPT user ID not available, please re-run `codex login`")
})?;
let account_id = token_data
.id_token
.chatgpt_account_id
.as_deref()
.ok_or_else(|| {
anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`")
})?;
let principal_id = format!("{user_id}__{account_id}");
let request = ListConnectorsRequest {
principals: vec![Principal {
principal_type: PrincipalType::User,
id: principal_id,
}],
};
let response: ListConnectorsResponse = chatgpt_post_request(
config,
token_data.access_token.as_str(),
account_id,
"/aip/connectors/list_accessible?skip_actions=true&external_logos=true",
&request,
)
.await?;
let mut connectors = response.connectors;
for connector in &mut connectors {
let install_url = match connector.install_url.take() {
Some(install_url) => install_url,
None => connector_install_url(&connector.connector_name, &connector.connector_id),
};
connector.connector_name =
normalize_connector_name(&connector.connector_name, &connector.connector_id);
connector.connector_description =
normalize_connector_value(connector.connector_description.as_deref());
connector.install_url = Some(install_url);
connector.is_accessible = false;
}
connectors.sort_by(|left, right| {
left.connector_name
.cmp(&right.connector_name)
.then_with(|| left.connector_id.cmp(&right.connector_id))
});
Ok(connectors)
}
fn normalize_connector_name(name: &str, connector_id: &str) -> String {
let trimmed = name.trim();
if trimmed.is_empty() {
connector_id.to_string()
} else {
trimmed.to_string()
}
}
fn normalize_connector_value(value: Option<&str>) -> Option<String> {
value
.map(str::trim)
.filter(|value| !value.is_empty())
.map(str::to_string)
}

View File

@@ -1,5 +1,4 @@
pub mod apply_command;
mod chatgpt_client;
mod chatgpt_token;
pub mod connectors;
pub mod get_task;

View File

@@ -35,6 +35,8 @@ codex-responses-api-proxy = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-stdio-to-uds = { workspace = true }
codex-tui = { workspace = true }
codex-tui2 = { workspace = true }
codex-utils-absolute-path = { workspace = true }
libc = { workspace = true }
owo-colors = { workspace = true }
regex-lite = { workspace = true }

View File

@@ -26,8 +26,8 @@ use codex_tui::AppExitInfo;
use codex_tui::Cli as TuiCli;
use codex_tui::ExitReason;
use codex_tui::update_action::UpdateAction;
use codex_tui2 as tui2;
use owo_colors::OwoColorize;
use std::io::IsTerminal;
use std::path::PathBuf;
use supports_color::Stream;
@@ -39,8 +39,13 @@ use crate::mcp_cmd::McpCli;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::find_codex_home;
use codex_core::config::load_config_as_toml_with_cli_overrides;
use codex_core::features::Feature;
use codex_core::features::FeatureOverrides;
use codex_core::features::Features;
use codex_core::features::is_known_feature_key;
use codex_core::terminal::TerminalName;
use codex_utils_absolute_path::AbsolutePathBuf;
/// Codex CLI
///
@@ -147,7 +152,7 @@ struct ResumeCommand {
session_id: Option<String>,
/// Continue the most recent session without showing the picker.
#[arg(long = "last", default_value_t = false)]
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
last: bool,
/// Show all sessions (disables cwd filtering and shows CWD column).
@@ -396,7 +401,8 @@ fn run_update_action(action: UpdateAction) -> anyhow::Result<()> {
if !status.success() {
anyhow::bail!("`{cmd_str}` failed with status {status}");
}
println!("\n🎉 Update ran successfully! Please restart Codex.");
println!();
println!("🎉 Update ran successfully! Please restart Codex.");
Ok(())
}
@@ -453,8 +459,8 @@ enum FeaturesSubcommand {
fn stage_str(stage: codex_core::features::Stage) -> &'static str {
use codex_core::features::Stage;
match stage {
Stage::UnderDevelopment => "under development",
Stage::Experimental { .. } => "experimental",
Stage::Experimental => "experimental",
Stage::Beta { .. } => "beta",
Stage::Stable => "stable",
Stage::Deprecated => "deprecated",
Stage::Removed => "removed",
@@ -695,20 +701,11 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
overrides,
)
.await?;
let mut rows = Vec::with_capacity(codex_core::features::FEATURES.len());
let mut name_width = 0;
let mut stage_width = 0;
for def in codex_core::features::FEATURES.iter() {
let name = def.key;
let stage = stage_str(def.stage);
let enabled = config.features.enabled(def.id);
name_width = name_width.max(name.len());
stage_width = stage_width.max(stage.len());
rows.push((name, stage, enabled));
}
for (name, stage, enabled) in rows {
println!("{name:<name_width$} {stage:<stage_width$} {enabled}");
println!("{name}\t{stage}\t{enabled}");
}
}
},
@@ -728,43 +725,44 @@ fn prepend_config_flags(
.splice(0..0, cli_config_overrides.raw_overrides);
}
/// Run the interactive Codex TUI, dispatching to either the legacy implementation or the
/// experimental TUI v2 shim based on feature flags resolved from config.
async fn run_interactive_tui(
mut interactive: TuiCli,
interactive: TuiCli,
codex_linux_sandbox_exe: Option<PathBuf>,
) -> std::io::Result<AppExitInfo> {
if let Some(prompt) = interactive.prompt.take() {
// Normalize CRLF/CR to LF so CLI-provided text can't leak `\r` into TUI state.
interactive.prompt = Some(prompt.replace("\r\n", "\n").replace('\r', "\n"));
if is_tui2_enabled(&interactive).await? {
let result = tui2::run_main(interactive.into(), codex_linux_sandbox_exe).await?;
Ok(result.into())
} else {
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await
}
let terminal_info = codex_core::terminal::terminal_info();
if terminal_info.name == TerminalName::Dumb {
if !(std::io::stdin().is_terminal() && std::io::stderr().is_terminal()) {
return Ok(AppExitInfo::fatal(
"TERM is set to \"dumb\". Refusing to start the interactive TUI because no terminal is available for a confirmation prompt (stdin/stderr is not a TTY). Run in a supported terminal or unset TERM.",
));
}
eprintln!(
"WARNING: TERM is set to \"dumb\". Codex's interactive TUI may not work in this terminal."
);
if !confirm("Continue anyway? [y/N]: ")? {
return Ok(AppExitInfo::fatal(
"Refusing to start the interactive TUI because TERM is set to \"dumb\". Run in a supported terminal or unset TERM.",
));
}
}
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await
}
fn confirm(prompt: &str) -> std::io::Result<bool> {
eprintln!("{prompt}");
/// Returns `Ok(true)` when the resolved configuration enables the `tui2` feature flag.
///
/// This performs a lightweight config load (honoring the same precedence as the lower-level TUI
/// bootstrap: `$CODEX_HOME`, config.toml, profile, and CLI `-c` overrides) solely to decide which
/// TUI frontend to launch. The full configuration is still loaded later by the interactive TUI.
async fn is_tui2_enabled(cli: &TuiCli) -> std::io::Result<bool> {
let raw_overrides = cli.config_overrides.raw_overrides.clone();
let overrides_cli = codex_common::CliConfigOverrides { raw_overrides };
let cli_kv_overrides = overrides_cli
.parse_overrides()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
let answer = input.trim();
Ok(answer.eq_ignore_ascii_case("y") || answer.eq_ignore_ascii_case("yes"))
let codex_home = find_codex_home()?;
let cwd = cli.cwd.clone();
let config_cwd = match cwd.as_deref() {
Some(path) => AbsolutePathBuf::from_absolute_path(path)?,
None => AbsolutePathBuf::current_dir()?,
};
let config_toml =
load_config_as_toml_with_cli_overrides(&codex_home, &config_cwd, cli_kv_overrides).await?;
let config_profile = config_toml.get_config_profile(cli.config_profile.clone())?;
let overrides = FeatureOverrides::default();
let features = Features::from_config(&config_toml, &config_profile, overrides);
Ok(features.enabled(Feature::Tui2))
}
/// Build the final `TuiCli` for a `codex resume` invocation.
@@ -857,8 +855,7 @@ fn merge_interactive_cli_flags(interactive: &mut TuiCli, subcommand_cli: TuiCli)
interactive.add_dir.extend(subcommand_cli.add_dir);
}
if let Some(prompt) = subcommand_cli.prompt {
// Normalize CRLF/CR to LF so CLI-provided text can't leak `\r` into TUI state.
interactive.prompt = Some(prompt.replace("\r\n", "\n").replace('\r', "\n"));
interactive.prompt = Some(prompt);
}
interactive
@@ -932,24 +929,6 @@ mod tests {
finalize_fork_interactive(interactive, root_overrides, session_id, last, all, fork_cli)
}
#[test]
fn exec_resume_last_accepts_prompt_positional() {
let cli =
MultitoolCli::try_parse_from(["codex", "exec", "--json", "resume", "--last", "2+2"])
.expect("parse should succeed");
let Some(Subcommand::Exec(exec)) = cli.subcommand else {
panic!("expected exec subcommand");
};
let Some(codex_exec::Command::Resume(args)) = exec.command else {
panic!("expected exec resume");
};
assert!(args.last);
assert_eq!(args.session_id, None);
assert_eq!(args.prompt.as_deref(), Some("2+2"));
}
fn app_server_from_args(args: &[&str]) -> AppServerCommand {
let cli = MultitoolCli::try_parse_from(args).expect("parse");
let Subcommand::AppServer(app_server) = cli.subcommand.expect("app-server present") else {

View File

@@ -20,12 +20,11 @@ use codex_rmcp_client::perform_oauth_login;
use codex_rmcp_client::supports_oauth_login;
/// Subcommands:
/// - `serve` — run the MCP server on stdio
/// - `list` — list configured servers (with `--json`)
/// - `get` — show a single server (with `--json`)
/// - `add` — add a server launcher entry to `~/.codex/config.toml`
/// - `remove` — delete a server entry
/// - `login` — authenticate with MCP server using OAuth
/// - `logout` — remove OAuth credentials for MCP server
#[derive(Debug, clap::Parser)]
pub struct McpCli {
#[clap(flatten)]
@@ -247,7 +246,6 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
};
servers.insert(name.clone(), new_entry);
@@ -349,11 +347,6 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
_ => bail!("OAuth login is only supported for streamable HTTP servers."),
};
let mut scopes = scopes;
if scopes.is_empty() {
scopes = server.scopes.clone().unwrap_or_default();
}
perform_oauth_login(
&name,
&url,

View File

@@ -42,10 +42,6 @@ pub enum ResponseEvent {
Created,
OutputItemDone(ResponseItem),
OutputItemAdded(ResponseItem),
/// Emitted when `X-Reasoning-Included: true` is present on the response,
/// meaning the server already accounted for past reasoning tokens and the
/// client should not re-estimate them.
ServerReasoningIncluded(bool),
Completed {
response_id: String,
token_usage: Option<TokenUsage>,

View File

@@ -157,9 +157,6 @@ impl Stream for AggregatedStream {
return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item))));
}
Poll::Ready(Some(Ok(ResponseEvent::ServerReasoningIncluded(included)))) => {
return Poll::Ready(Some(Ok(ResponseEvent::ServerReasoningIncluded(included))));
}
Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => {
return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot))));
}
@@ -193,7 +190,6 @@ impl Stream for AggregatedStream {
content: vec![ContentItem::OutputText {
text: std::mem::take(&mut this.cumulative),
}],
end_turn: None,
};
this.pending
.push_back(ResponseEvent::OutputItemDone(aggregated_message));

View File

@@ -24,28 +24,23 @@ use tokio_tungstenite::tungstenite::Error as WsError;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::tungstenite::client::IntoClientRequest;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::trace;
use url::Url;
type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>;
const X_CODEX_TURN_STATE_HEADER: &str = "x-codex-turn-state";
const X_REASONING_INCLUDED_HEADER: &str = "x-reasoning-included";
pub struct ResponsesWebsocketConnection {
stream: Arc<Mutex<Option<WsStream>>>,
// TODO (pakrym): is this the right place for timeout?
idle_timeout: Duration,
server_reasoning_included: bool,
}
impl ResponsesWebsocketConnection {
fn new(stream: WsStream, idle_timeout: Duration, server_reasoning_included: bool) -> Self {
fn new(stream: WsStream, idle_timeout: Duration) -> Self {
Self {
stream: Arc::new(Mutex::new(Some(stream))),
idle_timeout,
server_reasoning_included,
}
}
@@ -61,17 +56,11 @@ impl ResponsesWebsocketConnection {
mpsc::channel::<std::result::Result<ResponseEvent, ApiError>>(1600);
let stream = Arc::clone(&self.stream);
let idle_timeout = self.idle_timeout;
let server_reasoning_included = self.server_reasoning_included;
let request_body = serde_json::to_value(&request).map_err(|err| {
ApiError::Stream(format!("failed to encode websocket request: {err}"))
})?;
tokio::spawn(async move {
if server_reasoning_included {
let _ = tx_event
.send(Ok(ResponseEvent::ServerReasoningIncluded(true)))
.await;
}
let mut guard = stream.lock().await;
let Some(ws_stream) = guard.as_mut() else {
let _ = tx_event
@@ -115,21 +104,17 @@ impl<A: AuthProvider> ResponsesWebsocketClient<A> {
extra_headers: HeaderMap,
turn_state: Option<Arc<OnceLock<String>>>,
) -> Result<ResponsesWebsocketConnection, ApiError> {
let ws_url = self
.provider
.websocket_url_for_path("responses")
let ws_url = Url::parse(&self.provider.url_for_path("responses"))
.map_err(|err| ApiError::Stream(format!("failed to build websocket URL: {err}")))?;
let mut headers = self.provider.headers.clone();
headers.extend(extra_headers);
apply_auth_headers(&mut headers, &self.auth);
let (stream, server_reasoning_included) =
connect_websocket(ws_url, headers, turn_state).await?;
let stream = connect_websocket(ws_url, headers, turn_state).await?;
Ok(ResponsesWebsocketConnection::new(
stream,
self.provider.stream_idle_timeout,
server_reasoning_included,
))
}
}
@@ -152,32 +137,16 @@ async fn connect_websocket(
url: Url,
headers: HeaderMap,
turn_state: Option<Arc<OnceLock<String>>>,
) -> Result<(WsStream, bool), ApiError> {
info!("connecting to websocket: {url}");
) -> Result<WsStream, ApiError> {
let mut request = url
.as_str()
.clone()
.into_client_request()
.map_err(|err| ApiError::Stream(format!("failed to build websocket request: {err}")))?;
request.headers_mut().extend(headers);
let response = tokio_tungstenite::connect_async(request).await;
let (stream, response) = match response {
Ok((stream, response)) => {
info!(
"successfully connected to websocket: {url}, headers: {:?}",
response.headers()
);
(stream, response)
}
Err(err) => {
error!("failed to connect to websocket: {err}, url: {url}");
return Err(map_ws_error(err, &url));
}
};
let reasoning_included = response.headers().contains_key(X_REASONING_INCLUDED_HEADER);
let (stream, response) = tokio_tungstenite::connect_async(request)
.await
.map_err(|err| map_ws_error(err, &url))?;
if let Some(turn_state) = turn_state
&& let Some(header_value) = response
.headers()
@@ -186,7 +155,7 @@ async fn connect_websocket(
{
let _ = turn_state.set(header_value.to_string());
}
Ok((stream, reasoning_included))
Ok(stream)
}
fn map_ws_error(err: WsError, url: &Url) -> ApiError {
@@ -228,7 +197,7 @@ async fn run_websocket_response_stream(
}
};
if let Err(err) = ws_stream.send(Message::Text(request_text.into())).await {
if let Err(err) = ws_stream.send(Message::Text(request_text)).await {
return Err(ApiError::Stream(format!(
"failed to send websocket request: {err}"
)));
@@ -288,7 +257,7 @@ async fn run_websocket_response_stream(
Message::Pong(_) => {}
Message::Close(_) => {
return Err(ApiError::Stream(
"websocket closed by server before response.completed".into(),
"websocket closed before response.completed".into(),
));
}
_ => {}

View File

@@ -25,8 +25,6 @@ pub enum ApiError {
},
#[error("rate limit: {0}")]
RateLimit(String),
#[error("invalid request: {message}")]
InvalidRequest { message: String },
}
impl From<RateLimitError> for ApiError {

View File

@@ -6,7 +6,6 @@ use http::Method;
use http::header::HeaderMap;
use std::collections::HashMap;
use std::time::Duration;
use url::Url;
/// Wire-level APIs supported by a `Provider`.
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -106,19 +105,6 @@ impl Provider {
self.base_url.to_ascii_lowercase().contains("openai.azure.")
|| matches_azure_responses_base_url(&self.base_url)
}
pub fn websocket_url_for_path(&self, path: &str) -> Result<Url, url::ParseError> {
let mut url = Url::parse(&self.url_for_path(path))?;
let scheme = match url.scheme() {
"http" => "ws",
"https" => "wss",
"ws" | "wss" => return Ok(url),
_ => return Ok(url),
};
let _ = url.set_scheme(scheme);
Ok(url)
}
}
fn matches_azure_responses_base_url(base_url: &str) -> bool {

View File

@@ -386,7 +386,6 @@ mod tests {
content: vec![ContentItem::InputText {
text: "hi".to_string(),
}],
end_turn: None,
}];
let req = ChatRequestBuilder::new("gpt-test", "inst", &prompt_input, &[])
.conversation_id(Some("conv-1".into()))
@@ -413,7 +412,6 @@ mod tests {
content: vec![ContentItem::InputText {
text: "read these".to_string(),
}],
end_turn: None,
},
ResponseItem::FunctionCall {
id: None,

View File

@@ -15,12 +15,13 @@ pub(crate) fn subagent_header(source: &Option<SessionSource>) -> Option<String>
return None;
};
match sub {
codex_protocol::protocol::SubAgentSource::Review => Some("review".to_string()),
codex_protocol::protocol::SubAgentSource::Compact => Some("compact".to_string()),
codex_protocol::protocol::SubAgentSource::ThreadSpawn { .. } => {
Some("collab_spawn".to_string())
}
codex_protocol::protocol::SubAgentSource::Other(label) => Some(label.clone()),
other => Some(
serde_json::to_value(other)
.ok()
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
.unwrap_or_else(|| "other".to_string()),
),
}
}

View File

@@ -223,13 +223,11 @@ mod tests {
id: Some("m1".into()),
role: "assistant".into(),
content: Vec::new(),
end_turn: None,
},
ResponseItem::Message {
id: None,
role: "assistant".into(),
content: Vec::new(),
end_turn: None,
},
];

View File

@@ -330,7 +330,6 @@ async fn append_assistant_text(
id: None,
role: "assistant".to_string(),
content: vec![],
end_turn: None,
};
*assistant_item = Some(item.clone());
let _ = tx_event

View File

@@ -25,8 +25,6 @@ use tokio_util::io::ReaderStream;
use tracing::debug;
use tracing::trace;
const X_REASONING_INCLUDED_HEADER: &str = "x-reasoning-included";
/// Streams SSE events from an on-disk fixture for tests.
pub fn stream_from_fixture(
path: impl AsRef<Path>,
@@ -60,10 +58,6 @@ pub fn spawn_response_stream(
.get("X-Models-Etag")
.and_then(|v| v.to_str().ok())
.map(ToString::to_string);
let reasoning_included = stream_response
.headers
.get(X_REASONING_INCLUDED_HEADER)
.is_some();
if let Some(turn_state) = turn_state.as_ref()
&& let Some(header_value) = stream_response
.headers
@@ -80,11 +74,6 @@ pub fn spawn_response_stream(
if let Some(etag) = models_etag {
let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await;
}
if reasoning_included {
let _ = tx_event
.send(Ok(ResponseEvent::ServerReasoningIncluded(true)))
.await;
}
process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await;
});
@@ -228,11 +217,6 @@ pub fn process_responses_event(
response_error = ApiError::QuotaExceeded;
} else if is_usage_not_included(&error) {
response_error = ApiError::UsageNotIncluded;
} else if is_invalid_prompt_error(&error) {
let message = error
.message
.unwrap_or_else(|| "Invalid request.".to_string());
response_error = ApiError::InvalidRequest { message };
} else {
let delay = try_parse_retry_after(&error);
let message = error.message.unwrap_or_default();
@@ -291,7 +275,7 @@ pub fn process_responses_event(
if let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) {
return Ok(Some(ResponseEvent::OutputItemAdded(item)));
}
debug!("failed to parse ResponseItem from output_item.added");
debug!("failed to parse ResponseItem from output_item.done");
}
}
"response.reasoning_summary_part.added" => {
@@ -412,10 +396,6 @@ fn is_usage_not_included(error: &Error) -> bool {
error.code.as_deref() == Some("usage_not_included")
}
fn is_invalid_prompt_error(error: &Error) -> bool {
error.code.as_deref() == Some("invalid_prompt")
}
fn rate_limit_regex() -> &'static regex_lite::Regex {
static RE: std::sync::OnceLock<regex_lite::Regex> = std::sync::OnceLock::new();
#[expect(clippy::unwrap_used)]
@@ -731,27 +711,6 @@ mod tests {
assert_matches!(events[0], Err(ApiError::QuotaExceeded));
}
#[tokio::test]
async fn invalid_prompt_without_type_is_invalid_request() {
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_invalid_prompt_no_type","object":"response","created_at":1759771628,"status":"failed","background":false,"error":{"code":"invalid_prompt","message":"Invalid prompt: we've limited access to this content for safety reasons."},"incomplete_details":null}}"#;
let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
let events = collect_events(&[sse1.as_bytes()]).await;
assert_eq!(events.len(), 1);
match &events[0] {
Err(ApiError::InvalidRequest { message }) => {
assert_eq!(
message,
"Invalid prompt: we've limited access to this content for safety reasons."
);
}
other => panic!("unexpected event: {other:?}"),
}
}
#[tokio::test]
async fn table_driven_event_kinds() {
struct TestCase {

View File

@@ -308,7 +308,6 @@ async fn streaming_client_retries_on_transport_error() -> Result<()> {
content: vec![ContentItem::InputText {
text: "hi".to_string(),
}],
end_turn: None,
}],
tools: Vec::<Value>::new(),
parallel_tool_calls: false,

View File

@@ -77,7 +77,6 @@ async fn models_client_hits_models_endpoint() {
priority: 1,
upgrade: None,
base_instructions: "base instructions".to_string(),
model_instructions_template: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,

View File

@@ -1,40 +0,0 @@
/*
* codex-backend
*
* codex-backend
*
* The version of the OpenAPI document: 0.0.1
*
* Generated by: https://openapi-generator.tech
*/
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConfigFileResponse {
#[serde(rename = "contents", skip_serializing_if = "Option::is_none")]
pub contents: Option<String>,
#[serde(rename = "sha256", skip_serializing_if = "Option::is_none")]
pub sha256: Option<String>,
#[serde(rename = "updated_at", skip_serializing_if = "Option::is_none")]
pub updated_at: Option<String>,
#[serde(rename = "updated_by_user_id", skip_serializing_if = "Option::is_none")]
pub updated_by_user_id: Option<String>,
}
impl ConfigFileResponse {
pub fn new(
contents: Option<String>,
sha256: Option<String>,
updated_at: Option<String>,
updated_by_user_id: Option<String>,
) -> ConfigFileResponse {
ConfigFileResponse {
contents,
sha256,
updated_at,
updated_by_user_id,
}
}
}

View File

@@ -3,10 +3,6 @@
// Currently export only the types referenced by the workspace
// The process for this will change
// Config
pub mod config_file_response;
pub use self::config_file_response::ConfigFileResponse;
// Cloud Tasks
pub mod code_task_details_response;
pub use self::code_task_details_response::CodeTaskDetailsResponse;

View File

@@ -42,12 +42,9 @@ impl RateLimitStatusPayload {
}
}
#[derive(
Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Default,
)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum PlanType {
#[serde(rename = "guest")]
#[default]
Guest,
#[serde(rename = "free")]
Free,
@@ -74,3 +71,9 @@ pub enum PlanType {
#[serde(rename = "edu")]
Edu,
}
impl Default for PlanType {
fn default() -> PlanType {
Self::Guest
}
}

View File

@@ -24,21 +24,21 @@ pub fn builtin_approval_presets() -> Vec<ApprovalPreset> {
ApprovalPreset {
id: "read-only",
label: "Read Only",
description: "Codex can read files in the current workspace. Approval is required to edit files or access the internet.",
description: "Requires approval to edit files and run commands.",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::ReadOnly,
},
ApprovalPreset {
id: "auto",
label: "Default",
description: "Codex can read and edit files in the current workspace, and run commands. Approval is required to access the internet or edit other files. (Identical to Agent mode)",
label: "Agent",
description: "Read and edit files, and run commands.",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::new_workspace_write_policy(),
},
ApprovalPreset {
id: "full-access",
label: "Full Access",
description: "Codex can edit files outside this workspace and access the internet without asking for approval. Exercise caution when using.",
label: "Agent (full access)",
description: "Codex can edit files outside this workspace and run commands with network access. Exercise caution when using.",
approval: AskForApproval::Never,
sandbox: SandboxPolicy::DangerFullAccess,
},

View File

@@ -18,7 +18,6 @@ codex_rust_crate(
),
integration_compile_data_extra = [
"//codex-rs/apply-patch:apply_patch_tool_instructions.md",
"models.json",
"prompt.md",
],
test_data_extra = [

View File

@@ -18,7 +18,7 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
arc-swap = "1.8.0"
arc-swap = "1.7.1"
async-channel = { workspace = true }
async-trait = { workspace = true }
base64 = { workspace = true }
@@ -64,7 +64,6 @@ reqwest = { workspace = true, features = ["json", "stream"] }
schemars = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
serde_path_to_error = { workspace = true }
serde_yaml = { workspace = true }
sha1 = { workspace = true }
sha2 = { workspace = true }

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,4 @@
use crate::agent::AgentStatus;
use crate::agent::guards::Guards;
use crate::error::CodexErr;
use crate::error::Result as CodexResult;
use crate::thread_manager::ThreadManagerState;
@@ -13,25 +12,18 @@ use tokio::sync::watch;
/// Control-plane handle for multi-agent operations.
/// `AgentControl` is held by each session (via `SessionServices`). It provides capability to
/// spawn new agents and the inter-agent communication layer.
/// An `AgentControl` instance is shared per "user session" which means the same `AgentControl`
/// is used for every sub-agent spawned by Codex. By doing so, we make sure the guards are
/// scoped to a user session.
#[derive(Clone, Default)]
pub(crate) struct AgentControl {
/// Weak handle back to the global thread registry/state.
/// This is `Weak` to avoid reference cycles and shadow persistence of the form
/// `ThreadManagerState -> CodexThread -> Session -> SessionServices -> ThreadManagerState`.
manager: Weak<ThreadManagerState>,
state: Arc<Guards>,
}
impl AgentControl {
/// Construct a new `AgentControl` that can spawn/message agents via the given manager state.
pub(crate) fn new(manager: Weak<ThreadManagerState>) -> Self {
Self {
manager,
..Default::default()
}
Self { manager }
}
/// Spawn a new agent thread and submit the initial prompt.
@@ -39,21 +31,9 @@ impl AgentControl {
&self,
config: crate::config::Config,
prompt: String,
session_source: Option<codex_protocol::protocol::SessionSource>,
) -> CodexResult<ThreadId> {
let state = self.upgrade()?;
let reservation = self.state.reserve_spawn_slot(config.agent_max_threads)?;
// The same `AgentControl` is sent to spawn the thread.
let new_thread = match session_source {
Some(session_source) => {
state
.spawn_new_thread_with_source(config, self.clone(), session_source)
.await?
}
None => state.spawn_new_thread(config, self.clone()).await?,
};
reservation.commit(new_thread.thread_id);
let new_thread = state.spawn_new_thread(config, self.clone()).await?;
// Notify a new thread has been created. This notification will be processed by clients
// to subscribe or drain this newly created thread.
@@ -78,7 +58,7 @@ impl AgentControl {
Op::UserInput {
items: vec![UserInput::Text {
text: prompt,
// Agent control prompts are plain text with no UI text elements.
// Plain text conversion has no UI element ranges.
text_elements: Vec::new(),
}],
final_output_json_schema: None,
@@ -87,7 +67,6 @@ impl AgentControl {
.await;
if matches!(result, Err(CodexErr::InternalAgentDied)) {
let _ = state.remove_thread(&agent_id).await;
self.state.release_spawned_thread(agent_id);
}
result
}
@@ -103,10 +82,10 @@ impl AgentControl {
let state = self.upgrade()?;
let result = state.send_op(agent_id, Op::Shutdown {}).await;
let _ = state.remove_thread(&agent_id).await;
self.state.release_spawned_thread(agent_id);
result
}
#[allow(dead_code)] // Will be used for collab tools.
/// Fetch the last known status for `agent_id`, returning `NotFound` when unavailable.
pub(crate) async fn get_status(&self, agent_id: ThreadId) -> AgentStatus {
let Ok(state) = self.upgrade() else {
@@ -154,25 +133,17 @@ mod tests {
use codex_protocol::protocol::TurnStartedEvent;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use toml::Value as TomlValue;
async fn test_config_with_cli_overrides(
cli_overrides: Vec<(String, TomlValue)>,
) -> (TempDir, Config) {
async fn test_config() -> (TempDir, Config) {
let home = TempDir::new().expect("create temp dir");
let config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.cli_overrides(cli_overrides)
.build()
.await
.expect("load default test config");
(home, config)
}
async fn test_config() -> (TempDir, Config) {
test_config_with_cli_overrides(Vec::new()).await
}
struct AgentControlHarness {
_home: TempDir,
config: Config,
@@ -276,7 +247,7 @@ mod tests {
let control = AgentControl::default();
let (_home, config) = test_config().await;
let err = control
.spawn_agent(config, "hello".to_string(), None)
.spawn_agent(config, "hello".to_string())
.await
.expect_err("spawn_agent should fail without a manager");
assert_eq!(
@@ -378,7 +349,7 @@ mod tests {
let harness = AgentControlHarness::new().await;
let thread_id = harness
.control
.spawn_agent(harness.config.clone(), "spawned".to_string(), None)
.spawn_agent(harness.config.clone(), "spawned".to_string())
.await
.expect("spawn_agent should succeed");
let _thread = harness
@@ -403,117 +374,4 @@ mod tests {
.find(|entry| *entry == expected);
assert_eq!(captured, Some(expected));
}
#[tokio::test]
async fn spawn_agent_respects_max_threads_limit() {
let max_threads = 1usize;
let (_home, config) = test_config_with_cli_overrides(vec![(
"agents.max_threads".to_string(),
TomlValue::Integer(max_threads as i64),
)])
.await;
let manager = ThreadManager::with_models_provider_and_home(
CodexAuth::from_api_key("dummy"),
config.model_provider.clone(),
config.codex_home.clone(),
);
let control = manager.agent_control();
let _ = manager
.start_thread(config.clone())
.await
.expect("start thread");
let first_agent_id = control
.spawn_agent(config.clone(), "hello".to_string(), None)
.await
.expect("spawn_agent should succeed");
let err = control
.spawn_agent(config, "hello again".to_string(), None)
.await
.expect_err("spawn_agent should respect max threads");
let CodexErr::AgentLimitReached {
max_threads: seen_max_threads,
} = err
else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(seen_max_threads, max_threads);
let _ = control
.shutdown_agent(first_agent_id)
.await
.expect("shutdown agent");
}
#[tokio::test]
async fn spawn_agent_releases_slot_after_shutdown() {
let max_threads = 1usize;
let (_home, config) = test_config_with_cli_overrides(vec![(
"agents.max_threads".to_string(),
TomlValue::Integer(max_threads as i64),
)])
.await;
let manager = ThreadManager::with_models_provider_and_home(
CodexAuth::from_api_key("dummy"),
config.model_provider.clone(),
config.codex_home.clone(),
);
let control = manager.agent_control();
let first_agent_id = control
.spawn_agent(config.clone(), "hello".to_string(), None)
.await
.expect("spawn_agent should succeed");
let _ = control
.shutdown_agent(first_agent_id)
.await
.expect("shutdown agent");
let second_agent_id = control
.spawn_agent(config.clone(), "hello again".to_string(), None)
.await
.expect("spawn_agent should succeed after shutdown");
let _ = control
.shutdown_agent(second_agent_id)
.await
.expect("shutdown agent");
}
#[tokio::test]
async fn spawn_agent_limit_shared_across_clones() {
let max_threads = 1usize;
let (_home, config) = test_config_with_cli_overrides(vec![(
"agents.max_threads".to_string(),
TomlValue::Integer(max_threads as i64),
)])
.await;
let manager = ThreadManager::with_models_provider_and_home(
CodexAuth::from_api_key("dummy"),
config.model_provider.clone(),
config.codex_home.clone(),
);
let control = manager.agent_control();
let cloned = control.clone();
let first_agent_id = cloned
.spawn_agent(config.clone(), "hello".to_string(), None)
.await
.expect("spawn_agent should succeed");
let err = control
.spawn_agent(config, "hello again".to_string(), None)
.await
.expect_err("spawn_agent should respect shared guard");
let CodexErr::AgentLimitReached { max_threads } = err else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(max_threads, 1);
let _ = control
.shutdown_agent(first_agent_id)
.await
.expect("shutdown agent");
}
}

View File

@@ -1,238 +0,0 @@
use crate::error::CodexErr;
use crate::error::Result;
use codex_protocol::ThreadId;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use std::collections::HashSet;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
/// This structure is used to add some limits on the multi-agent capabilities for Codex. In
/// the current implementation, it limits:
/// * Total number of sub-agents (i.e. threads) per user session
///
/// This structure is shared by all agents in the same user session (because the `AgentControl`
/// is).
#[derive(Default)]
pub(crate) struct Guards {
threads_set: Mutex<HashSet<ThreadId>>,
total_count: AtomicUsize,
}
/// Initial agent is depth 0.
pub(crate) const MAX_THREAD_SPAWN_DEPTH: i32 = 1;
fn session_depth(session_source: &SessionSource) -> i32 {
match session_source {
SessionSource::SubAgent(SubAgentSource::ThreadSpawn { depth, .. }) => *depth,
SessionSource::SubAgent(_) => 0,
_ => 0,
}
}
pub(crate) fn next_thread_spawn_depth(session_source: &SessionSource) -> i32 {
session_depth(session_source).saturating_add(1)
}
pub(crate) fn exceeds_thread_spawn_depth_limit(depth: i32) -> bool {
depth > MAX_THREAD_SPAWN_DEPTH
}
impl Guards {
pub(crate) fn reserve_spawn_slot(
self: &Arc<Self>,
max_threads: Option<usize>,
) -> Result<SpawnReservation> {
if let Some(max_threads) = max_threads {
if !self.try_increment_spawned(max_threads) {
return Err(CodexErr::AgentLimitReached { max_threads });
}
} else {
self.total_count.fetch_add(1, Ordering::AcqRel);
}
Ok(SpawnReservation {
state: Arc::clone(self),
active: true,
})
}
pub(crate) fn release_spawned_thread(&self, thread_id: ThreadId) {
let removed = {
let mut threads = self
.threads_set
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
threads.remove(&thread_id)
};
if removed {
self.total_count.fetch_sub(1, Ordering::AcqRel);
}
}
fn register_spawned_thread(&self, thread_id: ThreadId) {
let mut threads = self
.threads_set
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
threads.insert(thread_id);
}
fn try_increment_spawned(&self, max_threads: usize) -> bool {
let mut current = self.total_count.load(Ordering::Acquire);
loop {
if current >= max_threads {
return false;
}
match self.total_count.compare_exchange_weak(
current,
current + 1,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(updated) => current = updated,
}
}
}
}
pub(crate) struct SpawnReservation {
state: Arc<Guards>,
active: bool,
}
impl SpawnReservation {
pub(crate) fn commit(mut self, thread_id: ThreadId) {
self.state.register_spawned_thread(thread_id);
self.active = false;
}
}
impl Drop for SpawnReservation {
fn drop(&mut self) {
if self.active {
self.state.total_count.fetch_sub(1, Ordering::AcqRel);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn session_depth_defaults_to_zero_for_root_sources() {
assert_eq!(session_depth(&SessionSource::Cli), 0);
}
#[test]
fn thread_spawn_depth_increments_and_enforces_limit() {
let session_source = SessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id: ThreadId::new(),
depth: 1,
});
let child_depth = next_thread_spawn_depth(&session_source);
assert_eq!(child_depth, 2);
assert!(exceeds_thread_spawn_depth_limit(child_depth));
}
#[test]
fn non_thread_spawn_subagents_default_to_depth_zero() {
let session_source = SessionSource::SubAgent(SubAgentSource::Review);
assert_eq!(session_depth(&session_source), 0);
assert_eq!(next_thread_spawn_depth(&session_source), 1);
assert!(!exceeds_thread_spawn_depth_limit(1));
}
#[test]
fn reservation_drop_releases_slot() {
let guards = Arc::new(Guards::default());
let reservation = guards.reserve_spawn_slot(Some(1)).expect("reserve slot");
drop(reservation);
let reservation = guards.reserve_spawn_slot(Some(1)).expect("slot released");
drop(reservation);
}
#[test]
fn commit_holds_slot_until_release() {
let guards = Arc::new(Guards::default());
let reservation = guards.reserve_spawn_slot(Some(1)).expect("reserve slot");
let thread_id = ThreadId::new();
reservation.commit(thread_id);
let err = match guards.reserve_spawn_slot(Some(1)) {
Ok(_) => panic!("limit should be enforced"),
Err(err) => err,
};
let CodexErr::AgentLimitReached { max_threads } = err else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(max_threads, 1);
guards.release_spawned_thread(thread_id);
let reservation = guards
.reserve_spawn_slot(Some(1))
.expect("slot released after thread removal");
drop(reservation);
}
#[test]
fn release_ignores_unknown_thread_id() {
let guards = Arc::new(Guards::default());
let reservation = guards.reserve_spawn_slot(Some(1)).expect("reserve slot");
let thread_id = ThreadId::new();
reservation.commit(thread_id);
guards.release_spawned_thread(ThreadId::new());
let err = match guards.reserve_spawn_slot(Some(1)) {
Ok(_) => panic!("limit should still be enforced"),
Err(err) => err,
};
let CodexErr::AgentLimitReached { max_threads } = err else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(max_threads, 1);
guards.release_spawned_thread(thread_id);
let reservation = guards
.reserve_spawn_slot(Some(1))
.expect("slot released after real thread removal");
drop(reservation);
}
#[test]
fn release_is_idempotent_for_registered_threads() {
let guards = Arc::new(Guards::default());
let reservation = guards.reserve_spawn_slot(Some(1)).expect("reserve slot");
let first_id = ThreadId::new();
reservation.commit(first_id);
guards.release_spawned_thread(first_id);
let reservation = guards.reserve_spawn_slot(Some(1)).expect("slot reused");
let second_id = ThreadId::new();
reservation.commit(second_id);
guards.release_spawned_thread(first_id);
let err = match guards.reserve_spawn_slot(Some(1)) {
Ok(_) => panic!("limit should still be enforced"),
Err(err) => err,
};
let CodexErr::AgentLimitReached { max_threads } = err else {
panic!("expected CodexErr::AgentLimitReached");
};
assert_eq!(max_threads, 1);
guards.release_spawned_thread(second_id);
let reservation = guards
.reserve_spawn_slot(Some(1))
.expect("slot released after second thread removal");
drop(reservation);
}
}

View File

@@ -1,12 +1,8 @@
pub(crate) mod control;
mod guards;
pub(crate) mod role;
pub(crate) mod status;
pub(crate) use codex_protocol::protocol::AgentStatus;
pub(crate) use control::AgentControl;
pub(crate) use guards::MAX_THREAD_SPAWN_DEPTH;
pub(crate) use guards::exceeds_thread_spawn_depth_limit;
pub(crate) use guards::next_thread_spawn_depth;
pub(crate) use role::AgentRole;
pub(crate) use status::agent_status_from_event;

View File

@@ -1,22 +1,20 @@
use crate::config::Config;
use crate::protocol::SandboxPolicy;
use codex_protocol::openai_models::ReasoningEffort;
use serde::Deserialize;
use serde::Serialize;
/// Base instructions for the orchestrator role.
const ORCHESTRATOR_PROMPT: &str = include_str!("../../templates/agents/orchestrator.md");
/// Default model override used.
// TODO(jif) update when we have something smarter.
const EXPLORER_MODEL: &str = "gpt-5.2-codex";
/// Base instructions for the worker role.
const WORKER_PROMPT: &str = include_str!("../../gpt-5.2-codex_prompt.md");
/// Default worker model override used by the worker role.
const WORKER_MODEL: &str = "gpt-5.2-codex";
/// Enumerated list of all supported agent roles.
const ALL_ROLES: [AgentRole; 3] = [
AgentRole::Default,
AgentRole::Explorer,
AgentRole::Orchestrator,
AgentRole::Worker,
// TODO(jif) add when we have stable prompts + models
// AgentRole::Orchestrator,
];
/// Hard-coded agent role selection used when spawning sub-agents.
@@ -29,8 +27,6 @@ pub enum AgentRole {
Orchestrator,
/// Task-executing agent with a fixed model override.
Worker,
/// Task-executing agent with a fixed model override.
Explorer,
}
/// Immutable profile data that drives per-agent configuration overrides.
@@ -40,12 +36,8 @@ pub struct AgentProfile {
pub base_instructions: Option<&'static str>,
/// Optional model override.
pub model: Option<&'static str>,
/// Optional reasoning effort override.
pub reasoning_effort: Option<ReasoningEffort>,
/// Whether to force a read-only sandbox policy.
pub read_only: bool,
/// Description to include in the tool specs.
pub description: &'static str,
}
impl AgentRole {
@@ -53,19 +45,7 @@ impl AgentRole {
pub fn enum_values() -> Vec<String> {
ALL_ROLES
.iter()
.filter_map(|role| {
let description = role.profile().description;
serde_json::to_string(role)
.map(|role| {
let description = if !description.is_empty() {
format!(r#", "description": {description}"#)
} else {
String::new()
};
format!(r#"{{ "name": {role}{description}}}"#)
})
.ok()
})
.filter_map(|role| serde_json::to_string(role).ok())
.collect()
}
@@ -78,35 +58,8 @@ impl AgentRole {
..Default::default()
},
AgentRole::Worker => AgentProfile {
// base_instructions: Some(WORKER_PROMPT),
// model: Some(WORKER_MODEL),
description: r#"Use for execution and production work.
Typical tasks:
- Implement part of a feature
- Fix tests or bugs
- Split large refactors into independent chunks
Rules:
- Explicitly assign **ownership** of the task (files / responsibility).
- Always tell workers they are **not alone in the codebase**, and they should ignore edits made by others without touching them"#,
..Default::default()
},
AgentRole::Explorer => AgentProfile {
model: Some(EXPLORER_MODEL),
reasoning_effort: Some(ReasoningEffort::Low),
description: r#"Use for fast codebase understanding and information gathering.
`explorer` are extremely fast agents so use them as much as you can to speed up the resolution of the global task.
Typical tasks:
- Locate usages of a symbol or concept
- Understand how X is handled in Y
- Review a section of code for issues
- Assess impact of a potential change
Rules:
- Be explicit in what you are looking for. A good usage of `explorer` would mean that don't need to read the same code after the explorer send you the result.
- **Always** prefer asking explorers rather than exploring the codebase yourself.
- Spawn multiple explorers in parallel when useful and wait for all results.
- You can ask the `explorer` to return file name, lines, entire code snippets, ...
- Reuse the same explorer when it is relevant. If later in your process you have more questions on some code an explorer already covered, reuse this same explorer to be more efficient.
"#,
base_instructions: Some(WORKER_PROMPT),
model: Some(WORKER_MODEL),
..Default::default()
},
}
@@ -121,9 +74,6 @@ Rules:
if let Some(model) = profile.model {
config.model = Some(model.to_string());
}
if let Some(reasoning_effort) = profile.reasoning_effort {
config.model_reasoning_effort = Some(reasoning_effort)
}
if profile.read_only {
config
.sandbox_policy

View File

@@ -28,7 +28,6 @@ pub(crate) fn map_api_error(err: ApiError) -> CodexErr {
url: None,
request_id: None,
}),
ApiError::InvalidRequest { message } => CodexErr::InvalidRequest(message),
ApiError::Transport(transport) => match transport {
TransportError::Http {
status,

View File

@@ -42,7 +42,6 @@ pub(crate) async fn apply_patch(
turn_context.approval_policy,
&turn_context.sandbox_policy,
&turn_context.cwd,
turn_context.windows_sandbox_level,
) {
SafetyCheck::AutoApprove {
user_explicitly_approved,

View File

@@ -996,7 +996,6 @@ mod tests {
id_token: IdTokenInfo {
email: Some("user@example.com".to_string()),
chatgpt_plan_type: Some(InternalPlanType::Known(InternalKnownPlan::Pro)),
chatgpt_user_id: Some("user-12345".to_string()),
chatgpt_account_id: None,
raw_jwt: fake_jwt,
},

View File

@@ -138,12 +138,26 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
words.push(child.utf8_text(src.as_bytes()).ok()?.to_owned());
}
"string" => {
let parsed = parse_double_quoted_string(child, src)?;
words.push(parsed);
if child.child_count() == 3
&& child.child(0)?.kind() == "\""
&& child.child(1)?.kind() == "string_content"
&& child.child(2)?.kind() == "\""
{
words.push(child.child(1)?.utf8_text(src.as_bytes()).ok()?.to_owned());
} else {
return None;
}
}
"raw_string" => {
let parsed = parse_raw_string(child, src)?;
words.push(parsed);
let raw_string = child.utf8_text(src.as_bytes()).ok()?;
let stripped = raw_string
.strip_prefix('\'')
.and_then(|s| s.strip_suffix('\''));
if let Some(s) = stripped {
words.push(s.to_owned());
} else {
return None;
}
}
"concatenation" => {
// Handle concatenated arguments like -g"*.py"
@@ -156,12 +170,28 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
.push_str(part.utf8_text(src.as_bytes()).ok()?.to_owned().as_str());
}
"string" => {
let parsed = parse_double_quoted_string(part, src)?;
concatenated.push_str(&parsed);
if part.child_count() == 3
&& part.child(0)?.kind() == "\""
&& part.child(1)?.kind() == "string_content"
&& part.child(2)?.kind() == "\""
{
concatenated.push_str(
part.child(1)?
.utf8_text(src.as_bytes())
.ok()?
.to_owned()
.as_str(),
);
} else {
return None;
}
}
"raw_string" => {
let parsed = parse_raw_string(part, src)?;
concatenated.push_str(&parsed);
let raw_string = part.utf8_text(src.as_bytes()).ok()?;
let stripped = raw_string
.strip_prefix('\'')
.and_then(|s| s.strip_suffix('\''))?;
concatenated.push_str(stripped);
}
_ => return None,
}
@@ -177,40 +207,9 @@ fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Ve
Some(words)
}
fn parse_double_quoted_string(node: Node, src: &str) -> Option<String> {
if node.kind() != "string" {
return None;
}
let mut cursor = node.walk();
for part in node.named_children(&mut cursor) {
if part.kind() != "string_content" {
return None;
}
}
let raw = node.utf8_text(src.as_bytes()).ok()?;
let stripped = raw
.strip_prefix('"')
.and_then(|text| text.strip_suffix('"'))?;
Some(stripped.to_string())
}
fn parse_raw_string(node: Node, src: &str) -> Option<String> {
if node.kind() != "raw_string" {
return None;
}
let raw_string = node.utf8_text(src.as_bytes()).ok()?;
let stripped = raw_string
.strip_prefix('\'')
.and_then(|s| s.strip_suffix('\''));
stripped.map(str::to_owned)
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn parse_seq(src: &str) -> Option<Vec<Vec<String>>> {
let tree = try_parse_shell(src)?;
@@ -251,38 +250,6 @@ mod tests {
);
}
#[test]
fn accepts_double_quoted_strings_with_newlines() {
let cmds = parse_seq("git commit -m \"line1\nline2\"").unwrap();
assert_eq!(
cmds,
vec![vec![
"git".to_string(),
"commit".to_string(),
"-m".to_string(),
"line1\nline2".to_string(),
]]
);
}
#[test]
fn accepts_mixed_quote_concatenation() {
assert_eq!(
parse_seq(r#"echo "/usr"'/'"local"/bin"#).unwrap(),
vec![vec!["echo".to_string(), "/usr/local/bin".to_string()]]
);
assert_eq!(
parse_seq(r#"echo '/usr'"/"'local'/bin"#).unwrap(),
vec![vec!["echo".to_string(), "/usr/local/bin".to_string()]]
);
}
#[test]
fn rejects_double_quoted_strings_with_expansions() {
assert!(parse_seq(r#"echo "hi ${USER}""#).is_none());
assert!(parse_seq(r#"echo "$HOME""#).is_none());
}
#[test]
fn accepts_numbers_as_words() {
let cmds = parse_seq("echo 123 456").unwrap();

View File

@@ -217,7 +217,9 @@ impl ModelClient {
let client = ApiCompactClient::new(transport, api_provider, api_auth)
.with_telemetry(Some(request_telemetry));
let instructions = prompt.base_instructions.text.clone();
let instructions = prompt
.get_full_instructions(&self.state.model_info)
.into_owned();
let payload = ApiCompactionInput {
model: &self.state.model_info.slug,
input: &prompt.input,
@@ -226,11 +228,13 @@ impl ModelClient {
let mut extra_headers = ApiHeaderMap::new();
if let SessionSource::SubAgent(sub) = &self.state.session_source {
let subagent = match sub {
crate::protocol::SubAgentSource::Review => "review".to_string(),
crate::protocol::SubAgentSource::Compact => "compact".to_string(),
crate::protocol::SubAgentSource::ThreadSpawn { .. } => "collab_spawn".to_string(),
crate::protocol::SubAgentSource::Other(label) => label.clone(),
let subagent = if let crate::protocol::SubAgentSource::Other(label) = sub {
label.clone()
} else {
serde_json::to_value(sub)
.ok()
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
.unwrap_or_else(|| "other".to_string())
};
if let Ok(val) = HeaderValue::from_str(&subagent) {
extra_headers.insert("x-openai-subagent", val);
@@ -272,7 +276,8 @@ impl ModelClientSession {
}
fn build_responses_request(&self, prompt: &Prompt) -> Result<ApiPrompt> {
let instructions = prompt.base_instructions.text.clone();
let model_info = self.state.model_info.clone();
let instructions = prompt.get_full_instructions(&model_info).into_owned();
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
Ok(build_api_prompt(prompt, instructions, tools_json))
}
@@ -443,7 +448,8 @@ impl ModelClientSession {
}
let auth_manager = self.state.auth_manager.clone();
let instructions = prompt.base_instructions.text.clone();
let model_info = self.state.model_info.clone();
let instructions = prompt.get_full_instructions(&model_info).into_owned();
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
let api_prompt = build_api_prompt(prompt, instructions, tools_json);
let conversation_id = self.state.conversation_id.to_string();
@@ -625,13 +631,11 @@ fn build_api_prompt(prompt: &Prompt, instructions: String, tools_json: Vec<Value
}
}
fn experimental_feature_headers(config: &Config) -> ApiHeaderMap {
fn beta_feature_headers(config: &Config) -> ApiHeaderMap {
let enabled = FEATURES
.iter()
.filter_map(|spec| {
if spec.stage.experimental_menu_description().is_some()
&& config.features.enabled(spec.id)
{
if spec.stage.beta_menu_description().is_some() && config.features.enabled(spec.id) {
Some(spec.key)
} else {
None
@@ -652,14 +656,16 @@ fn build_responses_headers(
config: &Config,
turn_state: Option<&Arc<OnceLock<String>>>,
) -> ApiHeaderMap {
let mut headers = experimental_feature_headers(config);
let mut headers = beta_feature_headers(config);
headers.insert(
WEB_SEARCH_ELIGIBLE_HEADER,
HeaderValue::from_static(if config.web_search_mode == WebSearchMode::Disabled {
"false"
} else {
"true"
}),
HeaderValue::from_static(
if matches!(config.web_search_mode, Some(WebSearchMode::Disabled)) {
"false"
} else {
"true"
},
),
);
if let Some(turn_state) = turn_state
&& let Some(state) = turn_state.get()

View File

@@ -1,12 +1,12 @@
use crate::client_common::tools::ToolSpec;
use crate::config::types::Personality;
use crate::error::Result;
pub use codex_api::common::ResponseEvent;
use codex_protocol::models::BaseInstructions;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ModelInfo;
use futures::Stream;
use serde::Deserialize;
use serde_json::Value;
use std::borrow::Cow;
use std::collections::HashSet;
use std::pin::Pin;
use std::task::Context;
@@ -34,16 +34,22 @@ pub struct Prompt {
/// Whether parallel tool calls are permitted for this prompt.
pub(crate) parallel_tool_calls: bool,
pub base_instructions: BaseInstructions,
/// Optionally specify the personality of the model.
pub personality: Option<Personality>,
/// Optional override for the built-in BASE_INSTRUCTIONS.
pub base_instructions_override: Option<String>,
/// Optional the output schema for the model's response.
pub output_schema: Option<Value>,
}
impl Prompt {
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelInfo) -> Cow<'a, str> {
Cow::Borrowed(
self.base_instructions_override
.as_deref()
.unwrap_or(model.base_instructions.as_str()),
)
}
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
let mut input = self.input.clone();
@@ -239,8 +245,76 @@ mod tests {
use codex_api::create_text_param_for_request;
use pretty_assertions::assert_eq;
use crate::config::test_config;
use crate::models_manager::manager::ModelsManager;
use super::*;
struct InstructionsTestCase {
pub slug: &'static str,
pub expects_apply_patch_instructions: bool,
}
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt {
..Default::default()
};
let prompt_with_apply_patch_instructions =
include_str!("../prompt_with_apply_patch_instructions.md");
let test_cases = vec![
InstructionsTestCase {
slug: "gpt-3.5",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-4.1",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-4o",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-5",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-5.1",
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
slug: "codex-mini-latest",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-oss:120b",
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
slug: "gpt-5.1-codex",
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
slug: "gpt-5.1-codex-max",
expects_apply_patch_instructions: false,
},
];
for test_case in test_cases {
let config = test_config();
let model_info = ModelsManager::construct_model_info_offline(test_case.slug, &config);
if test_case.expects_apply_patch_instructions {
assert_eq!(
model_info.base_instructions.as_str(),
prompt_with_apply_patch_instructions
);
}
let expected = model_info.base_instructions.as_str();
let full = prompt.get_full_instructions(&model_info);
assert_eq!(full, expected);
}
}
#[test]
fn serializes_text_verbosity_when_set() {
let input: Vec<ResponseItem> = vec![];

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,3 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
@@ -10,12 +9,9 @@ use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::ExecApprovalRequestEvent;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::RequestUserInputEvent;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SubAgentSource;
use codex_protocol::protocol::Submission;
use codex_protocol::request_user_input::RequestUserInputArgs;
use codex_protocol::request_user_input::RequestUserInputResponse;
use codex_protocol::user_input::UserInput;
use std::time::Duration;
use tokio::time::timeout;
@@ -57,7 +53,6 @@ pub(crate) async fn run_codex_thread_interactive(
initial_history.unwrap_or(InitialHistory::New),
SessionSource::SubAgent(SubAgentSource::Review),
parent_session.services.agent_control.clone(),
Vec::new(),
)
.await?;
let codex = Arc::new(codex);
@@ -93,7 +88,6 @@ pub(crate) async fn run_codex_thread_interactive(
tx_sub: tx_ops,
rx_event: rx_sub,
agent_status: codex.agent_status.clone(),
session: Arc::clone(&codex.session),
})
}
@@ -136,7 +130,6 @@ pub(crate) async fn run_codex_thread_one_shot(
let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
let ops_tx = io.tx_sub.clone();
let agent_status = io.agent_status.clone();
let session = Arc::clone(&io.session);
let io_for_bridge = io;
tokio::spawn(async move {
while let Ok(event) = io_for_bridge.next_event().await {
@@ -169,7 +162,6 @@ pub(crate) async fn run_codex_thread_one_shot(
rx_event: rx_bridge,
tx_sub: tx_closed,
agent_status,
session,
})
}
@@ -237,20 +229,6 @@ async fn forward_events(
)
.await;
}
Event {
id,
msg: EventMsg::RequestUserInput(event),
} => {
handle_request_user_input(
&codex,
id,
&parent_session,
&parent_ctx,
event,
&cancel_token,
)
.await;
}
other => {
match tx_sub.send(other).or_cancel(&cancel_token).await {
Ok(Ok(())) => {}
@@ -356,55 +334,6 @@ async fn handle_patch_approval(
let _ = codex.submit(Op::PatchApproval { id, decision }).await;
}
async fn handle_request_user_input(
codex: &Codex,
id: String,
parent_session: &Session,
parent_ctx: &TurnContext,
event: RequestUserInputEvent,
cancel_token: &CancellationToken,
) {
let args = RequestUserInputArgs {
questions: event.questions,
};
let response_fut =
parent_session.request_user_input(parent_ctx, parent_ctx.sub_id.clone(), args);
let response = await_user_input_with_cancel(
response_fut,
parent_session,
&parent_ctx.sub_id,
cancel_token,
)
.await;
let _ = codex.submit(Op::UserInputAnswer { id, response }).await;
}
async fn await_user_input_with_cancel<F>(
fut: F,
parent_session: &Session,
sub_id: &str,
cancel_token: &CancellationToken,
) -> RequestUserInputResponse
where
F: core::future::Future<Output = Option<RequestUserInputResponse>>,
{
tokio::select! {
biased;
_ = cancel_token.cancelled() => {
let empty = RequestUserInputResponse {
answers: HashMap::new(),
};
parent_session
.notify_user_input_response(sub_id, empty.clone())
.await;
empty
}
response = fut => response.unwrap_or_else(|| RequestUserInputResponse {
answers: HashMap::new(),
}),
}
}
/// Await an approval decision, aborting on cancellation.
async fn await_approval_with_cancel<F>(
fut: F,
@@ -446,15 +375,15 @@ mod tests {
let (tx_events, rx_events) = bounded(1);
let (tx_sub, rx_sub) = bounded(SUBMISSION_CHANNEL_CAPACITY);
let (_agent_status_tx, agent_status) = watch::channel(AgentStatus::PendingInit);
let (session, ctx, _rx_evt) = crate::codex::make_session_and_context_with_rx().await;
let codex = Arc::new(Codex {
next_id: AtomicU64::new(0),
tx_sub,
rx_event: rx_events,
agent_status,
session: Arc::clone(&session),
});
let (session, ctx, _rx_evt) = crate::codex::make_session_and_context_with_rx().await;
let (tx_out, rx_out) = bounded(1);
tx_out
.send(Event {

View File

@@ -4,35 +4,18 @@ use crate::error::Result as CodexResult;
use crate::protocol::Event;
use crate::protocol::Op;
use crate::protocol::Submission;
use codex_protocol::config_types::Personality;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::SessionSource;
use std::path::PathBuf;
use tokio::sync::watch;
#[derive(Clone, Debug)]
pub struct ThreadConfigSnapshot {
pub model: String,
pub model_provider_id: String,
pub approval_policy: AskForApproval,
pub sandbox_policy: SandboxPolicy,
pub cwd: PathBuf,
pub reasoning_effort: Option<ReasoningEffort>,
pub personality: Option<Personality>,
pub session_source: SessionSource,
}
pub struct CodexThread {
codex: Codex,
rollout_path: Option<PathBuf>,
rollout_path: PathBuf,
}
/// Conduit for the bidirectional stream of messages that compose a thread
/// (formerly called a conversation) in Codex.
impl CodexThread {
pub(crate) fn new(codex: Codex, rollout_path: Option<PathBuf>) -> Self {
pub(crate) fn new(codex: Codex, rollout_path: PathBuf) -> Self {
Self {
codex,
rollout_path,
@@ -60,11 +43,7 @@ impl CodexThread {
self.codex.agent_status.clone()
}
pub fn rollout_path(&self) -> Option<PathBuf> {
pub fn rollout_path(&self) -> PathBuf {
self.rollout_path.clone()
}
pub async fn config_snapshot(&self) -> ThreadConfigSnapshot {
self.codex.thread_config_snapshot().await
}
}

View File

@@ -82,11 +82,6 @@ fn is_dangerous_powershell(command: &[String]) -> bool {
}
}
// Check for force delete operations (e.g., Remove-Item -Force)
if has_force_delete_cmdlet(&tokens_lc) {
return true;
}
false
}
@@ -112,49 +107,15 @@ fn is_dangerous_cmd(command: &[String]) -> bool {
}
}
let remaining: Vec<String> = iter.cloned().collect();
if remaining.is_empty() {
let Some(first_cmd) = iter.next() else {
return false;
};
// Classic `cmd /c start https://...` ShellExecute path.
if !first_cmd.eq_ignore_ascii_case("start") {
return false;
}
let cmd_tokens: Vec<String> = match remaining.as_slice() {
[only] => shlex_split(only).unwrap_or_else(|| vec![only.clone()]),
_ => remaining,
};
// Refine tokens by splitting concatenated CMD operators (e.g. "echo hi&del")
let tokens: Vec<String> = cmd_tokens
.into_iter()
.flat_map(|t| split_embedded_cmd_operators(&t))
.collect();
const CMD_SEPARATORS: &[&str] = &["&", "&&", "|", "||"];
tokens
.split(|t| CMD_SEPARATORS.contains(&t.as_str()))
.any(|segment| {
let Some(cmd) = segment.first() else {
return false;
};
// Classic `cmd /c ... start https://...` ShellExecute path.
if cmd.eq_ignore_ascii_case("start") && args_have_url(segment) {
return true;
}
// Force delete: del /f, erase /f
if (cmd.eq_ignore_ascii_case("del") || cmd.eq_ignore_ascii_case("erase"))
&& has_force_flag_cmd(segment)
{
return true;
}
// Recursive directory removal: rd /s /q, rmdir /s /q
if (cmd.eq_ignore_ascii_case("rd") || cmd.eq_ignore_ascii_case("rmdir"))
&& has_recursive_flag_cmd(segment)
&& has_quiet_flag_cmd(segment)
{
return true;
}
false
})
let remaining: Vec<String> = iter.cloned().collect();
args_have_url(&remaining)
}
fn is_direct_gui_launch(command: &[String]) -> bool {
@@ -188,123 +149,6 @@ fn is_direct_gui_launch(command: &[String]) -> bool {
false
}
fn split_embedded_cmd_operators(token: &str) -> Vec<String> {
// Split concatenated CMD operators so `echo hi&del` becomes `["echo hi", "&", "del"]`.
// Handles `&`, `&&`, `|`, `||`. Best-effort (CMD escaping is weird by nature).
let mut parts = Vec::new();
let mut start = 0;
let mut it = token.char_indices().peekable();
while let Some((i, ch)) = it.next() {
if ch == '&' || ch == '|' {
if i > start {
parts.push(token[start..i].to_string());
}
// Detect doubled operator: && or ||
let op_len = match it.peek() {
Some(&(j, next)) if next == ch => {
it.next(); // consume second char
(j + next.len_utf8()) - i
}
_ => ch.len_utf8(),
};
parts.push(token[i..i + op_len].to_string());
start = i + op_len;
}
}
if start < token.len() {
parts.push(token[start..].to_string());
}
parts.retain(|s| !s.trim().is_empty());
parts
}
fn has_force_delete_cmdlet(tokens: &[String]) -> bool {
const DELETE_CMDLETS: &[&str] = &["remove-item", "ri", "rm", "del", "erase", "rd", "rmdir"];
// Hard separators that end a command segment (so -Force must be in same segment)
const SEG_SEPS: &[char] = &[';', '|', '&', '\n', '\r', '\t'];
// Soft separators: punctuation that can stick to tokens (blocks, parens, brackets, commas, etc.)
const SOFT_SEPS: &[char] = &['{', '}', '(', ')', '[', ']', ',', ';'];
// Build rough command segments first
let mut segments: Vec<Vec<String>> = vec![Vec::new()];
for tok in tokens {
// If token itself contains segment separators, split it (best-effort)
let mut cur = String::new();
for ch in tok.chars() {
if SEG_SEPS.contains(&ch) {
let s = cur.trim();
if let Some(msg) = segments.last_mut()
&& !s.is_empty()
{
msg.push(s.to_string());
}
cur.clear();
if let Some(last) = segments.last()
&& !last.is_empty()
{
segments.push(Vec::new());
}
} else {
cur.push(ch);
}
}
let s = cur.trim();
if let Some(segment) = segments.last_mut()
&& !s.is_empty()
{
segment.push(s.to_string());
}
}
// Now, inside each segment, normalize tokens by splitting on soft punctuation
segments.into_iter().any(|seg| {
let atoms = seg
.iter()
.flat_map(|t| t.split(|c| SOFT_SEPS.contains(&c)))
.map(str::trim)
.filter(|s| !s.is_empty());
let mut has_delete = false;
let mut has_force = false;
for a in atoms {
if DELETE_CMDLETS.iter().any(|cmd| a.eq_ignore_ascii_case(cmd)) {
has_delete = true;
}
if a.eq_ignore_ascii_case("-force")
|| a.get(..7)
.is_some_and(|p| p.eq_ignore_ascii_case("-force:"))
{
has_force = true;
}
}
has_delete && has_force
})
}
/// Check for /f or /F flag in CMD del/erase arguments.
fn has_force_flag_cmd(args: &[String]) -> bool {
args.iter().any(|a| a.eq_ignore_ascii_case("/f"))
}
/// Check for /s or /S flag in CMD rd/rmdir arguments.
fn has_recursive_flag_cmd(args: &[String]) -> bool {
args.iter().any(|a| a.eq_ignore_ascii_case("/s"))
}
/// Check for /q or /Q flag in CMD rd/rmdir arguments.
fn has_quiet_flag_cmd(args: &[String]) -> bool {
args.iter().any(|a| a.eq_ignore_ascii_case("/q"))
}
fn args_have_url(args: &[String]) -> bool {
args.iter().any(|arg| looks_like_url(arg))
}
@@ -469,287 +313,4 @@ mod tests {
"."
])));
}
// Force delete tests for PowerShell
#[test]
fn powershell_remove_item_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"Remove-Item test -Force"
])));
}
#[test]
fn powershell_remove_item_recurse_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"Remove-Item test -Recurse -Force"
])));
}
#[test]
fn powershell_ri_alias_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"pwsh",
"-Command",
"ri test -Force"
])));
}
#[test]
fn powershell_remove_item_without_force_is_not_flagged() {
assert!(!is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"Remove-Item test"
])));
}
// Force delete tests for CMD
#[test]
fn cmd_del_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "del", "/f", "test.txt"
])));
}
#[test]
fn cmd_erase_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "erase", "/f", "test.txt"
])));
}
#[test]
fn cmd_del_without_force_is_not_flagged() {
assert!(!is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "del", "test.txt"
])));
}
#[test]
fn cmd_rd_recursive_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "rd", "/s", "/q", "test"
])));
}
#[test]
fn cmd_rd_without_quiet_is_not_flagged() {
assert!(!is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "rd", "/s", "test"
])));
}
#[test]
fn cmd_rmdir_recursive_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "rmdir", "/s", "/q", "test"
])));
}
// Test exact scenario from issue #8567
#[test]
fn powershell_remove_item_path_recurse_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"Remove-Item -Path 'test' -Recurse -Force"
])));
}
#[test]
fn powershell_remove_item_force_with_semicolon_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"Remove-Item test -Force; Write-Host done"
])));
}
#[test]
fn powershell_remove_item_force_inside_block_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"if ($true) { Remove-Item test -Force}"
])));
}
#[test]
fn powershell_remove_item_force_inside_brackets_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"[void]( Remove-Item test -Force)]"
])));
}
#[test]
fn cmd_del_path_containing_f_is_not_flagged() {
assert!(!is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"del",
"C:/foo/bar.txt"
])));
}
#[test]
fn cmd_rd_path_containing_s_is_not_flagged() {
assert!(!is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"rd",
"C:/source"
])));
}
#[test]
fn cmd_bypass_chained_del_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "echo", "hello", "&", "del", "/f", "file.txt"
])));
}
#[test]
fn powershell_chained_no_space_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"Write-Host hi;Remove-Item -Force C:\\tmp"
])));
}
#[test]
fn powershell_comma_separated_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"del,-Force,C:\\foo"
])));
}
#[test]
fn cmd_echo_del_is_not_dangerous() {
assert!(!is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "echo", "del", "/f"
])));
}
#[test]
fn cmd_del_single_string_argument_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"del /f file.txt"
])));
}
#[test]
fn cmd_del_chained_single_string_argument_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"echo hello & del /f file.txt"
])));
}
#[test]
fn cmd_chained_no_space_del_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"echo hi&del /f file.txt"
])));
}
#[test]
fn cmd_chained_andand_no_space_del_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"echo hi&&del /f file.txt"
])));
}
#[test]
fn cmd_chained_oror_no_space_del_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"echo hi||del /f file.txt"
])));
}
#[test]
fn cmd_start_url_single_string_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"start https://example.com"
])));
}
#[test]
fn cmd_chained_no_space_rmdir_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
"echo hi&rmdir /s /q testdir"
])));
}
#[test]
fn cmd_del_force_uppercase_flag_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd", "/c", "DEL", "/F", "file.txt"
])));
}
#[test]
fn cmdexe_r_del_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd.exe", "/r", "del", "/f", "file.txt"
])));
}
#[test]
fn cmd_start_quoted_url_single_string_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
r#"start "https://example.com""#
])));
}
#[test]
fn cmd_start_title_then_url_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"cmd",
"/c",
r#"start "" https://example.com"#
])));
}
#[test]
fn powershell_rm_alias_force_is_dangerous() {
assert!(is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"rm test -Force"
])));
}
#[test]
fn powershell_benign_force_separate_command_is_not_dangerous() {
assert!(!is_dangerous_command_windows(&vec_str(&[
"powershell",
"-Command",
"Get-ChildItem -Force; Remove-Item test"
])));
}
}

View File

@@ -10,18 +10,15 @@ use crate::error::CodexErr;
use crate::error::Result as CodexResult;
use crate::features::Feature;
use crate::protocol::CompactedItem;
use crate::protocol::ContextCompactionEndedEvent;
use crate::protocol::ContextCompactionStartedEvent;
use crate::protocol::ContextCompactedEvent;
use crate::protocol::EventMsg;
use crate::protocol::TurnContextItem;
use crate::protocol::TurnStartedEvent;
use crate::protocol::WarningEvent;
use crate::session_prefix::TURN_ABORTED_OPEN_TAG;
use crate::truncate::TruncationPolicy;
use crate::truncate::approx_token_count;
use crate::truncate::truncate_text;
use crate::util::backoff;
use codex_protocol::items::ContextCompactionItem;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseInputItem;
@@ -30,7 +27,6 @@ use codex_protocol::protocol::RolloutItem;
use codex_protocol::user_input::UserInput;
use futures::prelude::*;
use tracing::error;
use uuid::Uuid;
pub const SUMMARIZATION_PROMPT: &str = include_str!("../templates/compact/prompt.md");
pub const SUMMARY_PREFIX: &str = include_str!("../templates/compact/summary_prefix.md");
@@ -50,7 +46,7 @@ pub(crate) async fn run_inline_auto_compact_task(
let prompt = turn_context.compact_prompt().to_string();
let input = vec![UserInput::Text {
text: prompt,
// Compaction prompt is synthesized; no UI element ranges to preserve.
// Plain text conversion has no UI element ranges.
text_elements: Vec::new(),
}];
@@ -74,9 +70,6 @@ async fn run_compact_task_inner(
turn_context: Arc<TurnContext>,
input: Vec<UserInput>,
) {
let compaction_item = compaction_turn_item();
emit_compaction_started(&sess, &turn_context, &compaction_item).await;
let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input);
let mut history = sess.clone_history().await;
@@ -90,20 +83,14 @@ async fn run_compact_task_inner(
let max_retries = turn_context.client.get_provider().stream_max_retries();
let mut retries = 0;
// TODO: If we need to guarantee the persisted mode always matches the prompt used for this
// turn, capture it in TurnContext at creation time. Using SessionConfiguration here avoids
// duplicating model settings on TurnContext, but an Op after turn start could update the
// session config before this write occurs.
let collaboration_mode = sess.current_collaboration_mode().await;
let rollout_item = RolloutItem::TurnContext(TurnContextItem {
cwd: turn_context.cwd.clone(),
approval_policy: turn_context.approval_policy,
sandbox_policy: turn_context.sandbox_policy.clone(),
model: turn_context.client.get_model(),
personality: turn_context.personality,
collaboration_mode: Some(collaboration_mode),
effort: turn_context.client.get_reasoning_effort(),
summary: turn_context.client.get_reasoning_summary(),
base_instructions: turn_context.base_instructions.clone(),
user_instructions: turn_context.user_instructions.clone(),
developer_instructions: turn_context.developer_instructions.clone(),
final_output_json_schema: turn_context.final_output_json_schema.clone(),
@@ -117,8 +104,6 @@ async fn run_compact_task_inner(
let turn_input_len = turn_input.len();
let prompt = Prompt {
input: turn_input,
base_instructions: sess.get_base_instructions().await,
personality: turn_context.personality,
..Default::default()
};
let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await;
@@ -137,7 +122,6 @@ async fn run_compact_task_inner(
break;
}
Err(CodexErr::Interrupted) => {
emit_compaction_ended(&sess, &turn_context, compaction_item.clone()).await;
return;
}
Err(e @ CodexErr::ContextWindowExceeded) => {
@@ -154,7 +138,6 @@ async fn run_compact_task_inner(
sess.set_total_tokens_full(turn_context.as_ref()).await;
let event = EventMsg::Error(e.to_error_event(None));
sess.send_event(&turn_context, event).await;
emit_compaction_ended(&sess, &turn_context, compaction_item.clone()).await;
return;
}
Err(e) => {
@@ -172,7 +155,6 @@ async fn run_compact_task_inner(
} else {
let event = EventMsg::Error(e.to_error_event(None));
sess.send_event(&turn_context, event).await;
emit_compaction_ended(&sess, &turn_context, compaction_item.clone()).await;
return;
}
}
@@ -185,7 +167,7 @@ async fn run_compact_task_inner(
let summary_text = format!("{SUMMARY_PREFIX}\n{summary_suffix}");
let user_messages = collect_user_messages(history_items);
let initial_context = sess.build_initial_context(turn_context.as_ref()).await;
let initial_context = sess.build_initial_context(turn_context.as_ref());
let mut new_history = build_compacted_history(initial_context, &user_messages, &summary_text);
let ghost_snapshots: Vec<ResponseItem> = history_items
.iter()
@@ -202,7 +184,8 @@ async fn run_compact_task_inner(
});
sess.persist_rollout_items(&[rollout_item]).await;
emit_compaction_ended(&sess, &turn_context, compaction_item).await;
let event = EventMsg::ContextCompacted(ContextCompactedEvent {});
sess.send_event(&turn_context, event).await;
let warning = EventMsg::Warning(WarningEvent {
message: "Heads up: Long threads and multiple compactions can cause the model to be less accurate. Start a new thread when possible to keep threads small and targeted.".to_string(),
@@ -210,38 +193,6 @@ async fn run_compact_task_inner(
sess.send_event(&turn_context, warning).await;
}
fn compaction_turn_item() -> TurnItem {
TurnItem::ContextCompaction(ContextCompactionItem {
id: Uuid::new_v4().to_string(),
})
}
pub(crate) async fn emit_compaction_started(
sess: &Session,
turn_context: &TurnContext,
item: &TurnItem,
) {
sess.send_event(
turn_context,
EventMsg::ContextCompactionStarted(ContextCompactionStartedEvent {}),
)
.await;
sess.emit_turn_item_started(turn_context, item).await;
}
pub(crate) async fn emit_compaction_ended(
sess: &Session,
turn_context: &TurnContext,
item: TurnItem,
) {
sess.emit_turn_item_completed(turn_context, item).await;
sess.send_event(
turn_context,
EventMsg::ContextCompactionEnded(ContextCompactionEndedEvent {}),
)
.await;
}
pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> {
let mut pieces = Vec::new();
for item in content {
@@ -272,31 +223,11 @@ pub(crate) fn collect_user_messages(items: &[ResponseItem]) -> Vec<String> {
Some(user.message())
}
}
_ => collect_turn_aborted_marker(item),
_ => None,
})
.collect()
}
fn collect_turn_aborted_marker(item: &ResponseItem) -> Option<String> {
let ResponseItem::Message { role, content, .. } = item else {
return None;
};
if role != "user" {
return None;
}
let text = content_items_to_text(content)?;
if text
.trim_start()
.to_ascii_lowercase()
.starts_with(TURN_ABORTED_OPEN_TAG)
{
Some(text)
} else {
None
}
}
pub(crate) fn is_summary_message(message: &str) -> bool {
message.starts_with(format!("{SUMMARY_PREFIX}\n").as_str())
}
@@ -347,7 +278,6 @@ fn build_compacted_history_with_limit(
content: vec![ContentItem::InputText {
text: message.clone(),
}],
end_turn: None,
});
}
@@ -361,7 +291,6 @@ fn build_compacted_history_with_limit(
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: summary_text }],
end_turn: None,
});
history
@@ -387,9 +316,6 @@ async fn drain_to_completed(
sess.record_into_history(std::slice::from_ref(&item), turn_context)
.await;
}
Ok(ResponseEvent::ServerReasoningIncluded(included)) => {
sess.set_server_reasoning_included(included).await;
}
Ok(ResponseEvent::RateLimits(snapshot)) => {
sess.update_rate_limits(turn_context, snapshot).await;
}
@@ -408,7 +334,6 @@ async fn drain_to_completed(
mod tests {
use super::*;
use crate::session_prefix::TURN_ABORTED_OPEN_TAG;
use pretty_assertions::assert_eq;
#[test]
@@ -450,7 +375,6 @@ mod tests {
content: vec![ContentItem::OutputText {
text: "ignored".to_string(),
}],
end_turn: None,
},
ResponseItem::Message {
id: Some("user".to_string()),
@@ -458,7 +382,6 @@ mod tests {
content: vec![ContentItem::InputText {
text: "first".to_string(),
}],
end_turn: None,
},
ResponseItem::Other,
];
@@ -478,7 +401,6 @@ mod tests {
text: "# AGENTS.md instructions for project\n\n<INSTRUCTIONS>\ndo things\n</INSTRUCTIONS>"
.to_string(),
}],
end_turn: None,
},
ResponseItem::Message {
id: None,
@@ -486,7 +408,6 @@ mod tests {
content: vec![ContentItem::InputText {
text: "<ENVIRONMENT_CONTEXT>cwd=/tmp</ENVIRONMENT_CONTEXT>".to_string(),
}],
end_turn: None,
},
ResponseItem::Message {
id: None,
@@ -494,7 +415,6 @@ mod tests {
content: vec![ContentItem::InputText {
text: "real user message".to_string(),
}],
end_turn: None,
},
];
@@ -566,43 +486,4 @@ mod tests {
};
assert_eq!(summary, summary_text);
}
#[test]
fn build_compacted_history_preserves_turn_aborted_markers() {
let marker = format!(
"{TURN_ABORTED_OPEN_TAG}\n <turn_id>turn-1</turn_id>\n <reason>interrupted</reason>\n</turn_aborted>"
);
let items = vec![
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: marker.clone(),
}],
end_turn: None,
},
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "real user message".to_string(),
}],
end_turn: None,
},
];
let user_messages = collect_user_messages(&items);
let history = build_compacted_history(Vec::new(), &user_messages, "SUMMARY");
let found_marker = history.iter().any(|item| match item {
ResponseItem::Message { role, content, .. } if role == "user" => {
content_items_to_text(content).is_some_and(|text| text == marker)
}
_ => false,
});
assert!(
found_marker,
"expected compacted history to retain <turn_aborted> marker"
);
}
}

View File

@@ -3,17 +3,13 @@ use std::sync::Arc;
use crate::Prompt;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::compact::emit_compaction_ended;
use crate::compact::emit_compaction_started;
use crate::error::Result as CodexResult;
use crate::protocol::CompactedItem;
use crate::protocol::ContextCompactedEvent;
use crate::protocol::EventMsg;
use crate::protocol::RolloutItem;
use crate::protocol::TurnStartedEvent;
use codex_protocol::items::ContextCompactionItem;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ResponseItem;
use uuid::Uuid;
pub(crate) async fn run_inline_remote_auto_compact_task(
sess: Arc<Session>,
@@ -32,19 +28,12 @@ pub(crate) async fn run_remote_compact_task(sess: Arc<Session>, turn_context: Ar
}
async fn run_remote_compact_task_inner(sess: &Arc<Session>, turn_context: &Arc<TurnContext>) {
let compaction_item = TurnItem::ContextCompaction(ContextCompactionItem {
id: Uuid::new_v4().to_string(),
});
emit_compaction_started(sess, turn_context, &compaction_item).await;
if let Err(err) = run_remote_compact_task_inner_impl(sess, turn_context).await {
let event = EventMsg::Error(
err.to_error_event(Some("Error running remote compact task".to_string())),
);
sess.send_event(turn_context, event).await;
}
emit_compaction_ended(sess, turn_context, compaction_item).await;
}
async fn run_remote_compact_task_inner_impl(
@@ -65,8 +54,7 @@ async fn run_remote_compact_task_inner_impl(
input: history.for_prompt(),
tools: vec![],
parallel_tool_calls: false,
base_instructions: sess.get_base_instructions().await,
personality: turn_context.personality,
base_instructions_override: turn_context.base_instructions.clone(),
output_schema: None,
};
@@ -88,5 +76,8 @@ async fn run_remote_compact_task_inner_impl(
sess.persist_rollout_items(&[RolloutItem::Compacted(compacted_item)])
.await;
let event = EventMsg::ContextCompacted(ContextCompactedEvent {});
sess.send_event(turn_context, event).await;
Ok(())
}

View File

@@ -1,17 +1,14 @@
use crate::config::CONFIG_TOML_FILE;
use crate::config::types::McpServerConfig;
use crate::config::types::Notice;
use crate::path_utils::resolve_symlink_write_paths;
use crate::path_utils::write_atomically;
use anyhow::Context;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::collections::BTreeMap;
use std::path::Path;
use std::path::PathBuf;
use tempfile::NamedTempFile;
use tokio::task;
use toml_edit::ArrayOfTables;
use toml_edit::DocumentMut;
use toml_edit::Item as TomlItem;
use toml_edit::Table as TomlTable;
@@ -25,8 +22,6 @@ pub enum ConfigEdit {
model: Option<String>,
effort: Option<ReasoningEffort>,
},
/// Update the active (or default) model personality.
SetModelPersonality { personality: Option<Personality> },
/// Toggle the acknowledgement flag under `[notice]`.
SetNoticeHideFullAccessWarning(bool),
/// Toggle the Windows world-writable directories warning acknowledgement flag.
@@ -41,8 +36,6 @@ pub enum ConfigEdit {
RecordModelMigrationSeen { from: String, to: String },
/// Replace the entire `[mcp_servers]` table.
ReplaceMcpServers(BTreeMap<String, McpServerConfig>),
/// Set or clear a skill config entry under `[[skills.config]]`.
SetSkillConfig { path: PathBuf, enabled: bool },
/// Set trust_level under `[projects."<path>"]`,
/// migrating inline tables to explicit tables.
SetProjectTrustLevel { path: PathBuf, level: TrustLevel },
@@ -167,11 +160,6 @@ mod document_helpers {
{
entry["disabled_tools"] = array_from_iter(disabled_tools.iter().cloned());
}
if let Some(scopes) = &config.scopes
&& !scopes.is_empty()
{
entry["scopes"] = array_from_iter(scopes.iter().cloned());
}
entry
}
@@ -277,10 +265,6 @@ impl ConfigDocument {
);
mutated
}),
ConfigEdit::SetModelPersonality { personality } => Ok(self.write_profile_value(
&["model_personality"],
personality.map(|personality| value(personality.to_string())),
)),
ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged) => Ok(self.write_value(
Scope::Global,
&[Notice::TABLE_KEY, "hide_full_access_warning"],
@@ -314,9 +298,6 @@ impl ConfigDocument {
value(*acknowledged),
)),
ConfigEdit::ReplaceMcpServers(servers) => Ok(self.replace_mcp_servers(servers)),
ConfigEdit::SetSkillConfig { path, enabled } => {
Ok(self.set_skill_config(path.as_path(), *enabled))
}
ConfigEdit::SetPath { segments, value } => Ok(self.insert(segments, value.clone())),
ConfigEdit::ClearPath { segments } => Ok(self.clear_owned(segments)),
ConfigEdit::SetProjectTrustLevel { path, level } => {
@@ -406,113 +387,6 @@ impl ConfigDocument {
true
}
fn set_skill_config(&mut self, path: &Path, enabled: bool) -> bool {
let normalized_path = normalize_skill_config_path(path);
let mut remove_skills_table = false;
let mut mutated = false;
{
let root = self.doc.as_table_mut();
let skills_item = match root.get_mut("skills") {
Some(item) => item,
None => {
if enabled {
return false;
}
root.insert(
"skills",
TomlItem::Table(document_helpers::new_implicit_table()),
);
let Some(item) = root.get_mut("skills") else {
return false;
};
item
}
};
if document_helpers::ensure_table_for_write(skills_item).is_none() {
if enabled {
return false;
}
*skills_item = TomlItem::Table(document_helpers::new_implicit_table());
}
let Some(skills_table) = skills_item.as_table_mut() else {
return false;
};
let config_item = match skills_table.get_mut("config") {
Some(item) => item,
None => {
if enabled {
return false;
}
skills_table.insert("config", TomlItem::ArrayOfTables(ArrayOfTables::new()));
let Some(item) = skills_table.get_mut("config") else {
return false;
};
item
}
};
if !matches!(config_item, TomlItem::ArrayOfTables(_)) {
if enabled {
return false;
}
*config_item = TomlItem::ArrayOfTables(ArrayOfTables::new());
}
let TomlItem::ArrayOfTables(overrides) = config_item else {
return false;
};
let existing_index = overrides.iter().enumerate().find_map(|(idx, table)| {
table
.get("path")
.and_then(|item| item.as_str())
.map(Path::new)
.map(normalize_skill_config_path)
.filter(|value| *value == normalized_path)
.map(|_| idx)
});
if enabled {
if let Some(index) = existing_index {
overrides.remove(index);
mutated = true;
if overrides.is_empty() {
skills_table.remove("config");
if skills_table.is_empty() {
remove_skills_table = true;
}
}
}
} else if let Some(index) = existing_index {
for (idx, table) in overrides.iter_mut().enumerate() {
if idx == index {
table["path"] = value(normalized_path);
table["enabled"] = value(false);
mutated = true;
break;
}
}
} else {
let mut entry = TomlTable::new();
entry.set_implicit(false);
entry["path"] = value(normalized_path);
entry["enabled"] = value(false);
overrides.push(entry);
mutated = true;
}
}
if remove_skills_table {
let root = self.doc.as_table_mut();
root.remove("skills");
}
mutated
}
fn scoped_segments(&self, scope: Scope, segments: &[&str]) -> Vec<String> {
let resolved: Vec<String> = segments
.iter()
@@ -620,13 +494,6 @@ impl ConfigDocument {
}
}
fn normalize_skill_config_path(path: &Path) -> String {
dunce::canonicalize(path)
.unwrap_or_else(|_| path.to_path_buf())
.to_string_lossy()
.to_string()
}
/// Persist edits using a blocking strategy.
pub fn apply_blocking(
codex_home: &Path,
@@ -638,14 +505,10 @@ pub fn apply_blocking(
}
let config_path = codex_home.join(CONFIG_TOML_FILE);
let write_paths = resolve_symlink_write_paths(&config_path)?;
let serialized = match write_paths.read_path {
Some(path) => match std::fs::read_to_string(&path) {
Ok(contents) => contents,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(),
Err(err) => return Err(err.into()),
},
None => String::new(),
let serialized = match std::fs::read_to_string(&config_path) {
Ok(contents) => contents,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(),
Err(err) => return Err(err.into()),
};
let doc = if serialized.is_empty() {
@@ -671,13 +534,22 @@ pub fn apply_blocking(
return Ok(());
}
write_atomically(&write_paths.write_path, &document.doc.to_string()).with_context(|| {
std::fs::create_dir_all(codex_home).with_context(|| {
format!(
"failed to persist config.toml at {}",
write_paths.write_path.display()
"failed to create Codex home directory at {}",
codex_home.display()
)
})?;
let tmp = NamedTempFile::new_in(codex_home)?;
std::fs::write(tmp.path(), document.doc.to_string()).with_context(|| {
format!(
"failed to write temporary config file at {}",
tmp.path().display()
)
})?;
tmp.persist(config_path)?;
Ok(())
}
@@ -724,12 +596,6 @@ impl ConfigEditsBuilder {
self
}
pub fn set_model_personality(mut self, personality: Option<Personality>) -> Self {
self.edits
.push(ConfigEdit::SetModelPersonality { personality });
self
}
pub fn set_hide_full_access_warning(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged));
@@ -827,8 +693,6 @@ mod tests {
use crate::config::types::McpServerTransportConfig;
use codex_protocol::openai_models::ReasoningEffort;
use pretty_assertions::assert_eq;
#[cfg(unix)]
use std::os::unix::fs::symlink;
use tempfile::tempdir;
use toml::Value as TomlValue;
@@ -873,54 +737,6 @@ model_reasoning_effort = "high"
assert_eq!(contents, "enabled = true\n");
}
#[test]
fn set_skill_config_writes_disabled_entry() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
ConfigEditsBuilder::new(codex_home)
.with_edits([ConfigEdit::SetSkillConfig {
path: PathBuf::from("/tmp/skills/demo/SKILL.md"),
enabled: false,
}])
.apply_blocking()
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[[skills.config]]
path = "/tmp/skills/demo/SKILL.md"
enabled = false
"#;
assert_eq!(contents, expected);
}
#[test]
fn set_skill_config_removes_entry_when_enabled() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"[[skills.config]]
path = "/tmp/skills/demo/SKILL.md"
enabled = false
"#,
)
.expect("seed config");
ConfigEditsBuilder::new(codex_home)
.with_edits([ConfigEdit::SetSkillConfig {
path: PathBuf::from("/tmp/skills/demo/SKILL.md"),
enabled: true,
}])
.apply_blocking()
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert_eq!(contents, "");
}
#[test]
fn blocking_set_model_preserves_inline_table_contents() {
let tmp = tempdir().expect("tmpdir");
@@ -968,71 +784,6 @@ profiles = { fast = { model = "gpt-4o", sandbox_mode = "strict" } }
);
}
#[cfg(unix)]
#[test]
fn blocking_set_model_writes_through_symlink_chain() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
let target_dir = tempdir().expect("target dir");
let target_path = target_dir.path().join(CONFIG_TOML_FILE);
let link_path = codex_home.join("config-link.toml");
let config_path = codex_home.join(CONFIG_TOML_FILE);
symlink(&target_path, &link_path).expect("symlink link");
symlink("config-link.toml", &config_path).expect("symlink config");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetModel {
model: Some("gpt-5.1-codex".to_string()),
effort: Some(ReasoningEffort::High),
}],
)
.expect("persist");
let meta = std::fs::symlink_metadata(&config_path).expect("config metadata");
assert!(meta.file_type().is_symlink());
let contents = std::fs::read_to_string(&target_path).expect("read target");
let expected = r#"model = "gpt-5.1-codex"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
}
#[cfg(unix)]
#[test]
fn blocking_set_model_replaces_symlink_on_cycle() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
let link_a = codex_home.join("a.toml");
let link_b = codex_home.join("b.toml");
let config_path = codex_home.join(CONFIG_TOML_FILE);
symlink("b.toml", &link_a).expect("symlink a");
symlink("a.toml", &link_b).expect("symlink b");
symlink("a.toml", &config_path).expect("symlink config");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetModel {
model: Some("gpt-5.1-codex".to_string()),
effort: None,
}],
)
.expect("persist");
let meta = std::fs::symlink_metadata(&config_path).expect("config metadata");
assert!(!meta.file_type().is_symlink());
let contents = std::fs::read_to_string(&config_path).expect("read config");
let expected = r#"model = "gpt-5.1-codex"
"#;
assert_eq!(contents, expected);
}
#[test]
fn batch_write_table_upsert_preserves_inline_comments() {
let tmp = tempdir().expect("tmpdir");
@@ -1378,7 +1129,6 @@ gpt-5 = "gpt-5.1"
tool_timeout_sec: None,
enabled_tools: Some(vec!["one".to_string(), "two".to_string()]),
disabled_tools: None,
scopes: None,
},
);
@@ -1401,7 +1151,6 @@ gpt-5 = "gpt-5.1"
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: Some(vec!["forbidden".to_string()]),
scopes: None,
},
);
@@ -1467,7 +1216,6 @@ foo = { command = "cmd" }
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
},
);
@@ -1512,7 +1260,6 @@ foo = { command = "cmd" } # keep me
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
},
);
@@ -1556,7 +1303,6 @@ foo = { command = "cmd", args = ["--flag"] } # keep me
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
},
);
@@ -1601,7 +1347,6 @@ foo = { command = "cmd" }
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
},
);

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,6 @@ use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use crate::config::types::Personality;
use crate::protocol::AskForApproval;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
@@ -25,12 +24,7 @@ pub struct ConfigProfile {
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub model_personality: Option<Personality>,
pub chatgpt_base_url: Option<String>,
/// Optional path to a file containing model instructions.
pub model_instructions_file: Option<AbsolutePathBuf>,
/// Deprecated: ignored. Use `model_instructions_file`.
#[schemars(skip)]
pub experimental_instructions_file: Option<AbsolutePathBuf>,
pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
pub include_apply_patch_tool: Option<bool>,

Some files were not shown because too many files have changed in this diff Show More