Compare commits

..

1 Commits

Author SHA1 Message Date
Dylan Hurd
f5e52075af test 2026-01-16 22:00:21 -08:00
1344 changed files with 85789 additions and 61151 deletions

View File

@@ -1,4 +1,3 @@
# Without this, Bazel will consider BUILD.bazel files in
# .git/sl/origbackups (which can be populated by Sapling SCM).
.git
codex-rs/target

View File

@@ -1,19 +1,12 @@
common --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
common --repo_env=BAZEL_NO_APPLE_CPP_TOOLCHAIN=1
# Dummy xcode config so we don't need to build xcode_locator in repo rule.
common --xcode_version_config=//:disable_xcode
common --disk_cache=~/.cache/bazel-disk-cache
common --repo_contents_cache=~/.cache/bazel-repo-contents-cache
common --repository_cache=~/.cache/bazel-repo-cache
common --remote_cache_compression
startup --experimental_remote_repo_contents_cache
common --experimental_platform_in_output_dir
# Runfiles strategy rationale: codex-rs/utils/cargo-bin/README.md
common --noenable_runfiles
common --enable_platform_specific_config
# TODO(zbarsky): We need to untangle these libc constraints to get linux remote builds working.
common:linux --host_platform=//:local
@@ -49,3 +42,4 @@ common --jobs=30
common:remote --extra_execution_platforms=//:rbe
common:remote --remote_executor=grpcs://remote.buildbuddy.io
common:remote --jobs=800

View File

@@ -1 +0,0 @@
9.0.0

View File

@@ -1,6 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt
check-hidden = true
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm

View File

@@ -1,163 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
: "${TARGET:?TARGET environment variable is required}"
: "${GITHUB_ENV:?GITHUB_ENV environment variable is required}"
apt_update_args=()
if [[ -n "${APT_UPDATE_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_update_args=(${APT_UPDATE_ARGS})
fi
apt_install_args=()
if [[ -n "${APT_INSTALL_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_install_args=(${APT_INSTALL_ARGS})
fi
sudo apt-get update "${apt_update_args[@]}"
sudo apt-get install -y "${apt_install_args[@]}" musl-tools pkg-config g++ clang libc++-dev libc++abi-dev lld
case "${TARGET}" in
x86_64-unknown-linux-musl)
arch="x86_64"
;;
aarch64-unknown-linux-musl)
arch="aarch64"
;;
*)
echo "Unexpected musl target: ${TARGET}" >&2
exit 1
;;
esac
# Use the musl toolchain as the Rust linker to avoid Zig injecting its own CRT.
if command -v "${arch}-linux-musl-gcc" >/dev/null; then
musl_linker="$(command -v "${arch}-linux-musl-gcc")"
elif command -v musl-gcc >/dev/null; then
musl_linker="$(command -v musl-gcc)"
else
echo "musl gcc not found after install; arch=${arch}" >&2
exit 1
fi
zig_target="${TARGET/-unknown-linux-musl/-linux-musl}"
runner_temp="${RUNNER_TEMP:-/tmp}"
tool_root="${runner_temp}/codex-musl-tools-${TARGET}"
mkdir -p "${tool_root}"
sysroot=""
if command -v zig >/dev/null; then
zig_bin="$(command -v zig)"
cc="${tool_root}/zigcc"
cxx="${tool_root}/zigcxx"
cat >"${cc}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
# Drop any explicit --target/-target flags. Zig expects -target and
# rejects Rust triples like *-unknown-linux-musl.
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" cc -target "${zig_target}" "\${args[@]}"
EOF
cat >"${cxx}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" c++ -target "${zig_target}" "\${args[@]}"
EOF
chmod +x "${cc}" "${cxx}"
sysroot="$("${zig_bin}" cc -target "${zig_target}" -print-sysroot 2>/dev/null || true)"
else
cc="${musl_linker}"
if command -v "${arch}-linux-musl-g++" >/dev/null; then
cxx="$(command -v "${arch}-linux-musl-g++")"
elif command -v musl-g++ >/dev/null; then
cxx="$(command -v musl-g++)"
else
cxx="${cc}"
fi
fi
if [[ -n "${sysroot}" && "${sysroot}" != "/" ]]; then
echo "BORING_BSSL_SYSROOT=${sysroot}" >> "$GITHUB_ENV"
boring_sysroot_var="BORING_BSSL_SYSROOT_${TARGET}"
boring_sysroot_var="${boring_sysroot_var//-/_}"
echo "${boring_sysroot_var}=${sysroot}" >> "$GITHUB_ENV"
fi
cflags="-pthread"
cxxflags="-pthread"
if [[ "${TARGET}" == "aarch64-unknown-linux-musl" ]]; then
# BoringSSL enables -Wframe-larger-than=25344 under clang and treats warnings as errors.
cflags="${cflags} -Wno-error=frame-larger-than"
cxxflags="${cxxflags} -Wno-error=frame-larger-than"
fi
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
echo "CC=${cc}" >> "$GITHUB_ENV"
echo "TARGET_CC=${cc}" >> "$GITHUB_ENV"
target_cc_var="CC_${TARGET}"
target_cc_var="${target_cc_var//-/_}"
echo "${target_cc_var}=${cc}" >> "$GITHUB_ENV"
echo "CXX=${cxx}" >> "$GITHUB_ENV"
echo "TARGET_CXX=${cxx}" >> "$GITHUB_ENV"
target_cxx_var="CXX_${TARGET}"
target_cxx_var="${target_cxx_var//-/_}"
echo "${target_cxx_var}=${cxx}" >> "$GITHUB_ENV"
cargo_linker_var="CARGO_TARGET_${TARGET^^}_LINKER"
cargo_linker_var="${cargo_linker_var//-/_}"
echo "${cargo_linker_var}=${musl_linker}" >> "$GITHUB_ENV"
echo "CMAKE_C_COMPILER=${cc}" >> "$GITHUB_ENV"
echo "CMAKE_CXX_COMPILER=${cxx}" >> "$GITHUB_ENV"
echo "CMAKE_ARGS=-DCMAKE_HAVE_THREADS_LIBRARY=1 -DCMAKE_USE_PTHREADS_INIT=1 -DCMAKE_THREAD_LIBS_INIT=-pthread -DTHREADS_PREFER_PTHREAD_FLAG=ON" >> "$GITHUB_ENV"

View File

@@ -81,7 +81,7 @@ jobs:
# previously built artifacts to minimize build time. The more precise you are with
# hashFiles sources the less work bazel will have to do.
# - name: Mount bazel caches
# uses: actions/cache@v5
# uses: actions/cache@v4
# with:
# path: |
# ~/.cache/bazel-repo-cache

View File

@@ -59,7 +59,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
components: rustfmt
- name: cargo fmt
@@ -77,7 +77,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-shear
@@ -177,31 +177,11 @@ jobs:
steps:
- uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
components: clippy
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- name: Compute lockfile hash
id: lockhash
working-directory: codex-rs
@@ -222,10 +202,6 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
restore-keys: |
cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
@@ -268,14 +244,6 @@ jobs:
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-
sccache-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Disable sccache wrapper (musl)
shell: bash
run: |
set -euo pipefail
echo "RUSTC_WRAPPER=" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Prepare APT cache directories (musl)
shell: bash
@@ -293,73 +261,15 @@ jobs:
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
TARGET: ${{ matrix.target }}
APT_UPDATE_ARGS: -o Acquire::Retries=3
APT_INSTALL_ARGS: --no-install-recommends
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
sudo apt-get -y update -o Acquire::Retries=3
sudo apt-get -y install --no-install-recommends musl-tools pkg-config
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
@@ -406,10 +316,6 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
key: cargo-home-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ steps.lockhash.outputs.hash }}-${{ steps.lockhash.outputs.toolchain_hash }}
- name: Save sccache cache (fallback)
@@ -510,7 +416,7 @@ jobs:
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- name: Validate tag matches Cargo.toml version
shell: bash
run: |
@@ -45,15 +45,6 @@ jobs:
echo "✅ Tag and Cargo.toml agree (${tag_ver})"
echo "::endgroup::"
- name: Verify config schema fixture
shell: bash
working-directory: codex-rs
run: |
set -euo pipefail
echo "If this fails, run: just write-config-schema to overwrite fixture with intentional changes."
cargo run -p codex-core --bin codex-write-config-schema
git diff --exit-code core/config.schema.json
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
@@ -89,30 +80,10 @@ jobs:
steps:
- uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Use hermetic Cargo home (musl)
shell: bash
run: |
set -euo pipefail
cargo_home="${GITHUB_WORKSPACE}/.cargo-home"
mkdir -p "${cargo_home}/bin"
echo "CARGO_HOME=${cargo_home}" >> "$GITHUB_ENV"
echo "${cargo_home}/bin" >> "$GITHUB_PATH"
: > "${cargo_home}/config.toml"
- uses: actions/cache@v5
with:
path: |
@@ -120,76 +91,14 @@ jobs:
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
${{ github.workspace }}/.cargo-home/bin/
${{ github.workspace }}/.cargo-home/registry/index/
${{ github.workspace }}/.cargo-home/registry/cache/
${{ github.workspace }}/.cargo-home/git/db/
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Cargo build
shell: bash
@@ -327,7 +236,6 @@ jobs:
# Path that contains the uncompressed binaries for the current
# ${{ matrix.target }}
dest="dist/${{ matrix.target }}"
repo_root=$PWD
# We want to ship the raw Windows executables in the GitHub Release
# in addition to the compressed archives. Keep the originals for
@@ -367,30 +275,7 @@ jobs:
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
if [[ "$base" == "codex-${{ matrix.target }}.exe" ]]; then
# Bundle the sandbox helper binaries into the main codex zip so
# WinGet installs include the required helpers next to codex.exe.
# Fall back to the single-binary zip if the helpers are missing
# to avoid breaking releases.
bundle_dir="$(mktemp -d)"
runner_src="$dest/codex-command-runner-${{ matrix.target }}.exe"
setup_src="$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
if [[ -f "$runner_src" && -f "$setup_src" ]]; then
cp "$dest/$base" "$bundle_dir/$base"
cp "$runner_src" "$bundle_dir/codex-command-runner.exe"
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
# Use an absolute path so bundle zips land in the real dist
# dir even when 7z runs from a temp directory.
(cd "$bundle_dir" && 7z a "$repo_root/$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
rm -rf "$bundle_dir"
else
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Also create .zst (existing behaviour) *and* remove the original
@@ -473,10 +358,6 @@ jobs:
ls -R dist/
- name: Add config schema release asset
run: |
cp codex-rs/core/config.schema.json dist/config-schema.json
- name: Define release name
id: release_name
run: |
@@ -547,19 +428,6 @@ jobs:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
- name: Trigger developers.openai.com deploy
# Only trigger the deploy if the release is not a pre-release.
# The deploy is used to update the developers.openai.com website with the new config schema json file.
if: ${{ !contains(steps.release_name.outputs.name, '-') }}
continue-on-error: true
env:
DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL: ${{ secrets.DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL }}
run: |
if ! curl -sS -f -o /dev/null -X POST "$DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL"; then
echo "::warning title=developers.openai.com deploy hook failed::Vercel deploy hook POST failed for ${GITHUB_REF_NAME}"
exit 1
fi
# Publish to npm using OIDC authentication.
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
# npm docs: https://docs.npmjs.com/trusted-publishers

View File

@@ -24,7 +24,7 @@ jobs:
node-version: 22
cache: pnpm
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
- name: build codex
run: cargo build --bin codex

View File

@@ -93,83 +93,15 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- name: Install UBSan runtime (musl)
if: ${{ matrix.install_musl }}
shell: bash
run: |
set -euo pipefail
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
fi
- uses: dtolnay/rust-toolchain@1.93
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.install_musl }}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.install_musl }}
name: Install musl build dependencies
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
- if: ${{ matrix.install_musl }}
name: Configure rustc UBSan wrapper (musl host)
shell: bash
run: |
set -euo pipefail
ubsan=""
if command -v ldconfig >/dev/null 2>&1; then
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
fi
wrapper_root="${RUNNER_TEMP:-/tmp}"
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
cat > "${wrapper}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "${ubsan}" ]]; then
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
fi
exec "\$1" "\${@:2}"
EOF
chmod +x "${wrapper}"
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
- if: ${{ matrix.install_musl }}
name: Clear sanitizer flags (musl)
shell: bash
run: |
set -euo pipefail
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
# Override any runner-level Cargo config rustflags as well.
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
sanitize_flags() {
local input="$1"
input="${input//-fsanitize=undefined/}"
input="${input//-fno-sanitize-recover=undefined/}"
input="${input//-fno-sanitize-trap=undefined/}"
echo "$input"
}
cflags="$(sanitize_flags "${CFLAGS-}")"
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Build exec server binaries
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
@@ -266,7 +198,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
@@ -308,7 +240,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
@@ -344,6 +276,7 @@ jobs:
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
@@ -436,6 +369,12 @@ jobs:
id-token: write
contents: read
steps:
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.8.1
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
with:
@@ -443,7 +382,6 @@ jobs:
registry-url: https://registry.npmjs.org
scope: "@openai"
# Trusted publishing requires npm CLI version 11.5.1 or later.
- name: Update npm
run: npm install -g npm@latest

View File

@@ -11,17 +11,15 @@ In the codex-rs folder where the rust code lives:
- Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- When possible, make `match` statements exhaustive and avoid wildcard arms.
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
Before finalizing a large change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
## TUI style conventions

View File

@@ -1,7 +1,3 @@
load("@apple_support//xcode:xcode_config.bzl", "xcode_config")
xcode_config(name = "disable_xcode")
# We mark the local platform as glibc-compatible so that rust can grab a toolchain for us.
# TODO(zbarsky): Upstream a better libc constraint into rules_rust.
# We only enable this on linux though for sanity, and because it breaks remote execution.

View File

@@ -2,9 +2,13 @@ bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "toolchains_llvm_bootstrapped", version = "0.3.1")
archive_override(
module_name = "toolchains_llvm_bootstrapped",
integrity = "sha256-4/2h4tYSUSptxFVI9G50yJxWGOwHSeTeOGBlaLQBV8g=",
strip_prefix = "toolchains_llvm_bootstrapped-d20baf67e04d8e2887e3779022890d1dc5e6b948",
urls = ["https://github.com/cerisier/toolchains_llvm_bootstrapped/archive/d20baf67e04d8e2887e3779022890d1dc5e6b948.tar.gz"],
integrity = "sha256-9ks21bgEqbQWmwUIvqeLA64+Jk6o4ZVjC8KxjVa2Vw8=",
strip_prefix = "toolchains_llvm_bootstrapped-e3775e66a7b6d287c705ca0cd24497ef4a77c503",
urls = ["https://github.com/cerisier/toolchains_llvm_bootstrapped/archive/e3775e66a7b6d287c705ca0cd24497ef4a77c503/master.tar.gz"],
patch_strip = 1,
patches = [
"//patches:llvm_toolchain_archive_params.patch",
],
)
osx = use_extension("@toolchains_llvm_bootstrapped//toolchain/extension:osx.bzl", "osx")
@@ -27,8 +31,6 @@ register_toolchains(
"@toolchains_llvm_bootstrapped//toolchain:all",
)
# Needed to disable xcode...
bazel_dep(name = "apple_support", version = "2.1.0")
bazel_dep(name = "rules_cc", version = "0.2.16")
bazel_dep(name = "rules_platform", version = "0.1.0")
bazel_dep(name = "rules_rust", version = "0.68.1")
@@ -55,7 +57,7 @@ rust = use_extension("@rules_rust//rust:extensions.bzl", "rust")
rust.toolchain(
edition = "2024",
extra_target_triples = RUST_TRIPLES,
versions = ["1.93.0"],
versions = ["1.90.0"],
)
use_repo(rust, "rust_toolchains")
@@ -69,11 +71,6 @@ crate.from_cargo(
cargo_toml = "//codex-rs:Cargo.toml",
platform_triples = RUST_TRIPLES,
)
crate.annotation(
crate = "nucleo-matcher",
strip_prefix = "matcher",
version = "0.3.1",
)
bazel_dep(name = "openssl", version = "3.5.4.bcr.0")
@@ -92,17 +89,12 @@ crate.annotation(
inject_repo(crate, "openssl")
crate.annotation(
crate = "runfiles",
workspace_cargo_toml = "rust/runfiles/Cargo.toml",
)
# Fix readme inclusions
crate.annotation(
crate = "windows-link",
patch_args = ["-p1"],
patches = [
"//patches:windows-link.patch",
"//patches:windows-link.patch"
],
)

479
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

70
PNPM.md Normal file
View File

@@ -0,0 +1,70 @@
# Migration to pnpm
This project has been migrated from npm to pnpm to improve dependency management and developer experience.
## Why pnpm?
- **Faster installation**: pnpm is significantly faster than npm and yarn
- **Disk space savings**: pnpm uses a content-addressable store to avoid duplication
- **Phantom dependency prevention**: pnpm creates a strict node_modules structure
- **Native workspaces support**: simplified monorepo management
## How to use pnpm
### Installation
```bash
# Global installation of pnpm
npm install -g pnpm@10.8.1
# Or with corepack (available with Node.js 22+)
corepack enable
corepack prepare pnpm@10.8.1 --activate
```
### Common commands
| npm command | pnpm equivalent |
| --------------- | ---------------- |
| `npm install` | `pnpm install` |
| `npm run build` | `pnpm run build` |
| `npm test` | `pnpm test` |
| `npm run lint` | `pnpm run lint` |
### Workspace-specific commands
| Action | Command |
| ------------------------------------------ | ---------------------------------------- |
| Run a command in a specific package | `pnpm --filter @openai/codex run build` |
| Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` |
| Run a command in all packages | `pnpm -r run test` |
## Monorepo structure
```
codex/
├── pnpm-workspace.yaml # Workspace configuration
├── .npmrc # pnpm configuration
├── package.json # Root dependencies and scripts
├── codex-cli/ # Main package
│ └── package.json # codex-cli specific dependencies
└── docs/ # Documentation (future package)
```
## Configuration files
- **pnpm-workspace.yaml**: Defines the packages included in the monorepo
- **.npmrc**: Configures pnpm behavior
- **Root package.json**: Contains shared scripts and dependencies
## CI/CD
CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.8.1 or higher.
## Known issues
If you encounter issues with pnpm, try the following solutions:
1. Remove the `node_modules` folder and `pnpm-lock.yaml` file, then run `pnpm install`
2. Make sure you're using pnpm 10.8.1 or higher
3. Verify that Node.js 22 or higher is installed

View File

@@ -14,4 +14,4 @@ target_app = "cli"
[[announcements]]
content = "This is a test announcement"
version_regex = "^0\\.0\\.0$"
to_date = "2026-05-10"
to_date = "2026-01-10"

0
codex-cli/bin/codex.js Executable file → Normal file
View File

18
codex-cli/package-lock.json generated Normal file
View File

@@ -0,0 +1,18 @@
{
"name": "@openai/codex",
"version": "0.0.0-dev",
"lockfileVersion": 3,
"packages": {
"": {
"name": "@openai/codex",
"version": "0.0.0-dev",
"license": "Apache-2.0",
"bin": {
"codex": "bin/codex.js"
},
"engines": {
"node": ">=16"
}
}
}
}

View File

@@ -17,6 +17,5 @@
"type": "git",
"url": "git+https://github.com/openai/codex.git",
"directory": "codex-cli"
},
"packageManager": "pnpm@10.28.2+sha512.41872f037ad22f7348e3b1debbaf7e867cfd448f2726d9cf74c08f19507c31d2c8e7a11525b983febc2df640b5438dee6023ebb1f84ed43cc2d654d2bc326264"
}
}

1721
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,6 @@ members = [
"arg0",
"feedback",
"codex-backend-openapi-models",
"cloud-requirements",
"cloud-tasks",
"cloud-tasks-client",
"cli",
@@ -28,7 +27,6 @@ members = [
"login",
"mcp-server",
"mcp-types",
"network-proxy",
"ollama",
"process-hardening",
"protocol",
@@ -37,19 +35,18 @@ members = [
"stdio-to-uds",
"otel",
"tui",
"tui2",
"utils/absolute-path",
"utils/cargo-bin",
"utils/git",
"utils/cache",
"utils/image",
"utils/json-to-toml",
"utils/home-dir",
"utils/pty",
"utils/readiness",
"utils/string",
"codex-client",
"codex-api",
"state",
]
resolver = "2"
@@ -73,7 +70,6 @@ codex-apply-patch = { path = "apply-patch" }
codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-cloud-requirements = { path = "cloud-requirements" }
codex-chatgpt = { path = "chatgpt" }
codex-cli = { path = "cli"}
codex-client = { path = "codex-client" }
@@ -95,15 +91,14 @@ codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-state = { path = "state" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-tui2 = { path = "tui2" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
codex-utils-image = { path = "utils/image" }
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
codex-utils-home-dir = { path = "utils/home-dir" }
codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-string = { path = "utils/string" }
@@ -127,13 +122,12 @@ axum = { version = "0.8", default-features = false }
base64 = "0.22.1"
bytes = "1.10.1"
chardetng = "0.1.17"
chrono = "0.4.43"
chrono = "0.4.42"
clap = "4"
clap_complete = "4"
color-eyre = "0.6.3"
crossterm = "0.28.1"
crossbeam-channel = "0.5.15"
ctor = "0.6.3"
ctor = "0.5.0"
derive_more = "2"
diffy = "0.4.2"
dirs = "6"
@@ -144,7 +138,6 @@ env-flags = "0.1.1"
env_logger = "0.11.5"
eventsource-stream = "0.2.3"
futures = { version = "0.3", default-features = false }
globset = "0.4"
http = "1.3.1"
icu_decimal = "2.1"
icu_locale_core = "2.1"
@@ -166,7 +159,7 @@ maplit = "1.0.2"
mime_guess = "2.0.5"
multimap = "0.10.0"
notify = "8.2.0"
nucleo = { git = "https://github.com/helix-editor/nucleo.git", rev = "4253de9faabb4e5c6d81d946a5e35a90f87347ee" }
nucleo-matcher = "0.3.1"
once_cell = "1.20.2"
openssl-sys = "*"
opentelemetry = "0.31.0"
@@ -185,18 +178,17 @@ pretty_assertions = "1.4.1"
pulldown-cmark = "0.10"
rand = "0.9"
ratatui = "0.29.0"
ratatui-core = "0.1.0"
ratatui-macros = "0.6.0"
regex = "1.12.2"
regex-lite = "0.1.8"
reqwest = "0.12"
rmcp = { version = "0.12.0", default-features = false }
runfiles = { git = "https://github.com/dzbarsky/rules_rust", rev = "b56cbaa8465e74127f1ea216f813cd377295ad81" }
schemars = "0.8.22"
seccompiler = "0.5.0"
sentry = "0.46.0"
serde = "1"
serde_json = "1"
serde_path_to_error = "0.1.20"
serde_with = "3.16"
serde_yaml = "0.9"
serial_test = "3.2.0"
@@ -206,7 +198,6 @@ semver = "1.0"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.1"
sqlx = { version = "0.8.6", default-features = false, features = ["chrono", "json", "macros", "migrate", "runtime-tokio-rustls", "sqlite", "time", "uuid"] }
starlark = "0.13.0"
strum = "0.27.2"
strum_macros = "0.27.2"
@@ -221,11 +212,11 @@ tiny_http = "0.12"
tokio = "1"
tokio-stream = "0.1.18"
tokio-test = "0.4"
tokio-tungstenite = { version = "0.28.0", features = ["proxy", "rustls-tls-native-roots"] }
tokio-tungstenite = "0.21.0"
tokio-util = "0.7.18"
toml = "0.9.5"
toml_edit = "0.24.0"
tracing = "0.1.44"
tracing = "0.1.43"
tracing-appender = "0.2.3"
tracing-subscriber = "0.3.22"
tracing-test = "0.2.5"
@@ -234,6 +225,7 @@ tree-sitter-bash = "0.25"
zstd = "0.13"
tree-sitter-highlight = "0.25.10"
ts-rs = "11"
tui-scrollbar = "0.2.2"
uds_windows = "1.1.0"
unicode-segmentation = "1.12.0"
unicode-width = "0.2"
@@ -311,10 +303,6 @@ opt-level = 0
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
tokio-tungstenite = { git = "https://github.com/JakkuSakura/tokio-tungstenite", rev = "2ae536b0de793f3ddf31fc2f22d445bf1ef2023d" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }
[patch."ssh://git@github.com/JakkuSakura/tungstenite-rs.git"]
tungstenite = { git = "https://github.com/JakkuSakura/tungstenite-rs", rev = "f514de8644821113e5d18a027d6d28a5c8cc0a6e" }

View File

@@ -23,22 +23,11 @@ impl GitSha {
}
}
/// Authentication mode for OpenAI-backed providers.
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
pub enum AuthMode {
/// OpenAI API key provided by the caller and stored by Codex.
ApiKey,
/// ChatGPT OAuth managed by Codex (tokens persisted and refreshed by Codex).
Chatgpt,
/// [UNSTABLE] FOR OPENAI INTERNAL USE ONLY - DO NOT USE.
///
/// ChatGPT auth tokens are supplied by an external host app and are only
/// stored in memory. Token refresh must be handled by the external host app.
#[serde(rename = "chatgptAuthTokens")]
#[ts(rename = "chatgptAuthTokens")]
#[strum(serialize = "chatgptAuthTokens")]
ChatgptAuthTokens,
ChatGPT,
}
/// Generates an `enum ClientRequest` where each variant is a request that the
@@ -128,14 +117,6 @@ client_request_definitions! {
params: v2::ThreadArchiveParams,
response: v2::ThreadArchiveResponse,
},
ThreadSetName => "thread/name/set" {
params: v2::ThreadSetNameParams,
response: v2::ThreadSetNameResponse,
},
ThreadUnarchive => "thread/unarchive" {
params: v2::ThreadUnarchiveParams,
response: v2::ThreadUnarchiveResponse,
},
ThreadRollback => "thread/rollback" {
params: v2::ThreadRollbackParams,
response: v2::ThreadRollbackResponse,
@@ -148,22 +129,10 @@ client_request_definitions! {
params: v2::ThreadLoadedListParams,
response: v2::ThreadLoadedListResponse,
},
ThreadRead => "thread/read" {
params: v2::ThreadReadParams,
response: v2::ThreadReadResponse,
},
SkillsList => "skills/list" {
params: v2::SkillsListParams,
response: v2::SkillsListResponse,
},
AppsList => "app/list" {
params: v2::AppsListParams,
response: v2::AppsListResponse,
},
SkillsConfigWrite => "skills/config/write" {
params: v2::SkillsConfigWriteParams,
response: v2::SkillsConfigWriteResponse,
},
TurnStart => "turn/start" {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
@@ -181,11 +150,6 @@ client_request_definitions! {
params: v2::ModelListParams,
response: v2::ModelListResponse,
},
/// EXPERIMENTAL - list collaboration mode presets.
CollaborationModeList => "collaborationMode/list" {
params: v2::CollaborationModeListParams,
response: v2::CollaborationModeListResponse,
},
McpServerOauthLogin => "mcpServer/oauth/login" {
params: v2::McpServerOauthLoginParams,
@@ -537,23 +501,6 @@ server_request_definitions! {
response: v2::FileChangeRequestApprovalResponse,
},
/// EXPERIMENTAL - Request input from the user for a tool call.
ToolRequestUserInput => "item/tool/requestUserInput" {
params: v2::ToolRequestUserInputParams,
response: v2::ToolRequestUserInputResponse,
},
/// Execute a dynamic tool call on the client.
DynamicToolCall => "item/tool/call" {
params: v2::DynamicToolCallParams,
response: v2::DynamicToolCallResponse,
},
ChatgptAuthTokensRefresh => "account/chatgptAuthTokens/refresh" {
params: v2::ChatgptAuthTokensRefreshParams,
response: v2::ChatgptAuthTokensRefreshResponse,
},
/// DEPRECATED APIs below
/// Request to approve a patch.
/// This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).
@@ -598,7 +545,6 @@ server_notification_definitions! {
/// NEW NOTIFICATIONS
Error => "error" (v2::ErrorNotification),
ThreadStarted => "thread/started" (v2::ThreadStartedNotification),
ThreadNameUpdated => "thread/name/updated" (v2::ThreadNameUpdatedNotification),
ThreadTokenUsageUpdated => "thread/tokenUsage/updated" (v2::ThreadTokenUsageUpdatedNotification),
TurnStarted => "turn/started" (v2::TurnStartedNotification),
TurnCompleted => "turn/completed" (v2::TurnCompletedNotification),
@@ -609,8 +555,6 @@ server_notification_definitions! {
/// This event is internal-only. Used by Codex Cloud.
RawResponseItemCompleted => "rawResponseItem/completed" (v2::RawResponseItemCompletedNotification),
AgentMessageDelta => "item/agentMessage/delta" (v2::AgentMessageDeltaNotification),
/// EXPERIMENTAL - proposed plan streaming deltas for plan items.
PlanDelta => "item/plan/delta" (v2::PlanDeltaNotification),
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
TerminalInteraction => "item/commandExecution/terminalInteraction" (v2::TerminalInteractionNotification),
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
@@ -621,7 +565,6 @@ server_notification_definitions! {
ReasoningSummaryTextDelta => "item/reasoning/summaryTextDelta" (v2::ReasoningSummaryTextDeltaNotification),
ReasoningSummaryPartAdded => "item/reasoning/summaryPartAdded" (v2::ReasoningSummaryPartAddedNotification),
ReasoningTextDelta => "item/reasoning/textDelta" (v2::ReasoningTextDeltaNotification),
/// Deprecated: Use `ContextCompaction` item type instead.
ContextCompacted => "thread/compacted" (v2::ContextCompactedNotification),
DeprecationNotice => "deprecationNotice" (v2::DeprecationNoticeNotification),
ConfigWarning => "configWarning" (v2::ConfigWarningNotification),
@@ -776,29 +719,6 @@ mod tests {
Ok(())
}
#[test]
fn serialize_chatgpt_auth_tokens_refresh_request() -> Result<()> {
let request = ServerRequest::ChatgptAuthTokensRefresh {
request_id: RequestId::Integer(8),
params: v2::ChatgptAuthTokensRefreshParams {
reason: v2::ChatgptAuthTokensRefreshReason::Unauthorized,
previous_account_id: Some("org-123".to_string()),
},
};
assert_eq!(
json!({
"method": "account/chatgptAuthTokens/refresh",
"id": 8,
"params": {
"reason": "unauthorized",
"previousAccountId": "org-123"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_get_account_rate_limits() -> Result<()> {
let request = ClientRequest::GetAccountRateLimits {
@@ -888,34 +808,10 @@ mod tests {
Ok(())
}
#[test]
fn serialize_account_login_chatgpt_auth_tokens() -> Result<()> {
let request = ClientRequest::LoginAccount {
request_id: RequestId::Integer(5),
params: v2::LoginAccountParams::ChatgptAuthTokens {
access_token: "access-token".to_string(),
id_token: "id-token".to_string(),
},
};
assert_eq!(
json!({
"method": "account/login/start",
"id": 5,
"params": {
"type": "chatgptAuthTokens",
"accessToken": "access-token",
"idToken": "id-token"
}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_get_account() -> Result<()> {
let request = ClientRequest::GetAccount {
request_id: RequestId::Integer(6),
request_id: RequestId::Integer(5),
params: v2::GetAccountParams {
refresh_token: false,
},
@@ -923,7 +819,7 @@ mod tests {
assert_eq!(
json!({
"method": "account/read",
"id": 6,
"id": 5,
"params": {
"refreshToken": false
}
@@ -978,21 +874,4 @@ mod tests {
);
Ok(())
}
#[test]
fn serialize_list_collaboration_modes() -> Result<()> {
let request = ClientRequest::CollaborationModeList {
request_id: RequestId::Integer(7),
params: v2::CollaborationModeListParams::default(),
};
assert_eq!(
json!({
"method": "collaborationMode/list",
"id": 7,
"params": {}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
}

View File

@@ -6,7 +6,6 @@ use crate::protocol::v2::UserInput;
use codex_protocol::protocol::AgentReasoningEvent;
use codex_protocol::protocol::AgentReasoningRawContentEvent;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::ItemCompletedEvent;
use codex_protocol::protocol::ThreadRolledBackEvent;
use codex_protocol::protocol::TurnAbortedEvent;
use codex_protocol::protocol::UserMessageEvent;
@@ -56,7 +55,6 @@ impl ThreadHistoryBuilder {
EventMsg::AgentReasoningRawContent(payload) => {
self.handle_agent_reasoning_raw_content(payload)
}
EventMsg::ItemCompleted(payload) => self.handle_item_completed(payload),
EventMsg::TokenCount(_) => {}
EventMsg::EnteredReviewMode(_) => {}
EventMsg::ExitedReviewMode(_) => {}
@@ -127,19 +125,6 @@ impl ThreadHistoryBuilder {
});
}
fn handle_item_completed(&mut self, payload: &ItemCompletedEvent) {
if let codex_protocol::items::TurnItem::Plan(plan) = &payload.item {
if plan.text.is_empty() {
return;
}
let id = self.next_item_id();
self.ensure_turn().items.push(ThreadItem::Plan {
id,
text: plan.text.clone(),
});
}
}
fn handle_turn_aborted(&mut self, _payload: &TurnAbortedEvent) {
let Some(turn) = self.current_turn.as_mut() else {
return;

View File

@@ -128,7 +128,6 @@ pub struct ConversationSummary {
pub path: PathBuf,
pub preview: String,
pub timestamp: Option<String>,
pub updated_at: Option<String>,
pub model_provider: String,
pub cwd: PathBuf,
pub cli_version: String,
@@ -503,14 +502,17 @@ impl From<CoreTextElement> for V1TextElement {
fn from(value: CoreTextElement) -> Self {
Self {
byte_range: value.byte_range.into(),
placeholder: value._placeholder_for_conversion_only().map(str::to_string),
placeholder: value.placeholder,
}
}
}
impl From<V1TextElement> for CoreTextElement {
fn from(value: V1TextElement) -> Self {
Self::new(value.byte_range.into(), value.placeholder)
Self {
byte_range: value.byte_range.into(),
placeholder: value.placeholder,
}
}
}

View File

@@ -4,10 +4,7 @@ use std::path::PathBuf;
use crate::protocol::common::AuthMode;
use codex_protocol::account::PlanType;
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode as CoreSandboxMode;
use codex_protocol::config_types::Verbosity;
@@ -27,13 +24,10 @@ use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SkillDependencies as CoreSkillDependencies;
use codex_protocol::protocol::SkillErrorInfo as CoreSkillErrorInfo;
use codex_protocol::protocol::SkillInterface as CoreSkillInterface;
use codex_protocol::protocol::SkillMetadata as CoreSkillMetadata;
use codex_protocol::protocol::SkillScope as CoreSkillScope;
use codex_protocol::protocol::SkillToolDependency as CoreSkillToolDependency;
use codex_protocol::protocol::SubAgentSource as CoreSubAgentSource;
use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
use codex_protocol::user_input::ByteRange as CoreByteRange;
@@ -86,10 +80,6 @@ macro_rules! v2_enum_from_core {
pub enum CodexErrorInfo {
ContextWindowExceeded,
UsageLimitExceeded,
ModelCap {
model: String,
reset_after_seconds: Option<u64>,
},
HttpConnectionFailed {
#[serde(rename = "httpStatusCode")]
#[ts(rename = "httpStatusCode")]
@@ -126,13 +116,6 @@ impl From<CoreCodexErrorInfo> for CodexErrorInfo {
match value {
CoreCodexErrorInfo::ContextWindowExceeded => CodexErrorInfo::ContextWindowExceeded,
CoreCodexErrorInfo::UsageLimitExceeded => CodexErrorInfo::UsageLimitExceeded,
CoreCodexErrorInfo::ModelCap {
model,
reset_after_seconds,
} => CodexErrorInfo::ModelCap {
model,
reset_after_seconds,
},
CoreCodexErrorInfo::HttpConnectionFailed { http_status_code } => {
CodexErrorInfo::HttpConnectionFailed { http_status_code }
}
@@ -233,6 +216,7 @@ v2_enum_from_core!(
}
);
// TODO(mbolin): Support in-repo layer.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
@@ -338,15 +322,6 @@ pub struct ToolsV2 {
pub view_image: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolSpec {
pub name: String,
pub description: String,
pub input_schema: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
@@ -417,8 +392,6 @@ pub struct ConfigLayer {
pub name: ConfigLayerSource,
pub version: String,
pub config: JsonValue,
#[serde(skip_serializing_if = "Option::is_none")]
pub disabled_reason: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
@@ -475,10 +448,6 @@ pub enum ConfigWriteErrorCode {
pub struct ConfigReadParams {
#[serde(default)]
pub include_layers: bool,
/// Optional working directory to resolve project config layers. If specified,
/// return the effective config as seen from that directory (i.e., including any
/// project layers between `cwd` and the project/repo root).
pub cwd: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -497,14 +466,6 @@ pub struct ConfigReadResponse {
pub struct ConfigRequirements {
pub allowed_approval_policies: Option<Vec<AskForApproval>>,
pub allowed_sandbox_modes: Option<Vec<SandboxMode>>,
pub enforce_residency: Option<ResidencyRequirement>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
#[ts(export_to = "v2/")]
pub enum ResidencyRequirement {
Us,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -732,7 +693,6 @@ pub enum SessionSource {
VsCode,
Exec,
AppServer,
SubAgent(CoreSubAgentSource),
#[serde(other)]
Unknown,
}
@@ -744,7 +704,7 @@ impl From<CoreSessionSource> for SessionSource {
CoreSessionSource::VSCode => SessionSource::VsCode,
CoreSessionSource::Exec => SessionSource::Exec,
CoreSessionSource::Mcp => SessionSource::AppServer,
CoreSessionSource::SubAgent(sub) => SessionSource::SubAgent(sub),
CoreSessionSource::SubAgent(_) => SessionSource::Unknown,
CoreSessionSource::Unknown => SessionSource::Unknown,
}
}
@@ -757,7 +717,6 @@ impl From<SessionSource> for CoreSessionSource {
SessionSource::VsCode => CoreSessionSource::VSCode,
SessionSource::Exec => CoreSessionSource::Exec,
SessionSource::AppServer => CoreSessionSource::Mcp,
SessionSource::SubAgent(sub) => CoreSessionSource::SubAgent(sub),
SessionSource::Unknown => CoreSessionSource::Unknown,
}
}
@@ -843,24 +802,6 @@ pub enum LoginAccountParams {
#[serde(rename = "chatgpt")]
#[ts(rename = "chatgpt")]
Chatgpt,
/// [UNSTABLE] FOR OPENAI INTERNAL USE ONLY - DO NOT USE.
/// The access token must contain the same scopes that Codex-managed ChatGPT auth tokens have.
#[serde(rename = "chatgptAuthTokens")]
#[ts(rename = "chatgptAuthTokens")]
ChatgptAuthTokens {
/// ID token (JWT) supplied by the client.
///
/// This token is used for identity and account metadata (email, plan type,
/// workspace id).
#[serde(rename = "idToken")]
#[ts(rename = "idToken")]
id_token: String,
/// Access token (JWT) supplied by the client.
/// This token is used for backend API requests.
#[serde(rename = "accessToken")]
#[ts(rename = "accessToken")]
access_token: String,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -880,9 +821,6 @@ pub enum LoginAccountResponse {
/// URL the client should open in a browser to initiate the OAuth flow.
auth_url: String,
},
#[serde(rename = "chatgptAuthTokens", rename_all = "camelCase")]
#[ts(rename = "chatgptAuthTokens", rename_all = "camelCase")]
ChatgptAuthTokens {},
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -913,37 +851,6 @@ pub struct CancelLoginAccountResponse {
#[ts(export_to = "v2/")]
pub struct LogoutAccountResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum ChatgptAuthTokensRefreshReason {
/// Codex attempted a backend request and received `401 Unauthorized`.
Unauthorized,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ChatgptAuthTokensRefreshParams {
pub reason: ChatgptAuthTokensRefreshReason,
/// Workspace/account identifier that Codex was previously using.
///
/// Clients that manage multiple accounts/workspaces can use this as a hint
/// to refresh the token for the correct workspace.
///
/// This may be `null` when the prior ID token did not include a workspace
/// identifier (`chatgpt_account_id`) or when the token could not be parsed.
pub previous_account_id: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ChatgptAuthTokensRefreshResponse {
pub id_token: String,
pub access_token: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -955,11 +862,6 @@ pub struct GetAccountRateLimitsResponse {
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct GetAccountParams {
/// When `true`, requests a proactive token refresh before returning.
///
/// In managed auth mode this triggers the normal refresh-token flow. In
/// external auth mode this flag is ignored. Clients should refresh tokens
/// themselves and call `account/login/start` with `chatgptAuthTokens`.
#[serde(default)]
pub refresh_token: bool,
}
@@ -992,8 +894,6 @@ pub struct Model {
pub description: String,
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
pub default_reasoning_effort: ReasoningEffort,
#[serde(default)]
pub supports_personality: bool,
// Only one model should be marked as default.
pub is_default: bool,
}
@@ -1016,20 +916,6 @@ pub struct ModelListResponse {
pub next_cursor: Option<String>,
}
/// EXPERIMENTAL - list collaboration mode presets.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CollaborationModeListParams {}
/// EXPERIMENTAL - collaboration mode presets response.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CollaborationModeListResponse {
pub data: Vec<CollaborationModeMask>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1061,41 +947,6 @@ pub struct ListMcpServerStatusResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppsListParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppInfo {
pub id: String,
pub name: String,
pub description: Option<String>,
pub logo_url: Option<String>,
pub logo_url_dark: Option<String>,
pub distribution_channel: Option<String>,
pub install_url: Option<String>,
#[serde(default)]
pub is_accessible: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppsListResponse {
pub data: Vec<AppInfo>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// If None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1177,9 +1028,6 @@ pub struct ThreadStartParams {
pub config: Option<HashMap<String, JsonValue>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
pub ephemeral: Option<bool>,
pub dynamic_tools: Option<Vec<DynamicToolSpec>>,
/// If true, opt into emitting raw response items on the event stream.
///
/// This is for internal use only (e.g. Codex Cloud).
@@ -1234,7 +1082,6 @@ pub struct ThreadResumeParams {
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1303,33 +1150,6 @@ pub struct ThreadArchiveParams {
#[ts(export_to = "v2/")]
pub struct ThreadArchiveResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadSetNameParams {
pub thread_id: String,
pub name: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadUnarchiveParams {
pub thread_id: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadSetNameResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadUnarchiveResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1362,43 +1182,9 @@ pub struct ThreadListParams {
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
/// Optional sort key; defaults to created_at.
pub sort_key: Option<ThreadSortKey>,
/// Optional provider filter; when set, only sessions recorded under these
/// providers are returned. When present but empty, includes all providers.
pub model_providers: Option<Vec<String>>,
/// Optional source filter; when set, only sessions from these source kinds
/// are returned. When omitted or empty, defaults to interactive sources.
pub source_kinds: Option<Vec<ThreadSourceKind>>,
/// Optional archived filter; when set to true, only archived threads are returned.
/// If false or null, only non-archived threads are returned.
pub archived: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase", export_to = "v2/")]
pub enum ThreadSourceKind {
Cli,
#[serde(rename = "vscode")]
#[ts(rename = "vscode")]
VsCode,
Exec,
AppServer,
SubAgent,
SubAgentReview,
SubAgentCompact,
SubAgentThreadSpawn,
SubAgentOther,
Unknown,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub enum ThreadSortKey {
CreatedAt,
UpdatedAt,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1432,23 +1218,6 @@ pub struct ThreadLoadedListResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadReadParams {
pub thread_id: String,
/// When true, include turns and their items from rollout history.
#[serde(default)]
pub include_turns: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadReadResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1488,17 +1257,13 @@ pub struct SkillMetadata {
pub description: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
/// Legacy short_description from SKILL.md. Prefer SKILL.json interface.short_description.
/// Legacy short_description from SKILL.md. Prefer SKILL.toml interface.short_description.
pub short_description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub interface: Option<SkillInterface>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub dependencies: Option<SkillDependencies>,
pub path: PathBuf,
pub scope: SkillScope,
pub enabled: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1519,35 +1284,6 @@ pub struct SkillInterface {
pub default_prompt: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillDependencies {
pub tools: Vec<SkillToolDependency>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillToolDependency {
#[serde(rename = "type")]
#[ts(rename = "type")]
pub r#type: String,
pub value: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub transport: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1565,21 +1301,6 @@ pub struct SkillsListEntry {
pub errors: Vec<SkillErrorInfo>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsConfigWriteParams {
pub path: PathBuf,
pub enabled: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsConfigWriteResponse {
pub effective_enabled: bool,
}
impl From<CoreSkillMetadata> for SkillMetadata {
fn from(value: CoreSkillMetadata) -> Self {
Self {
@@ -1587,10 +1308,8 @@ impl From<CoreSkillMetadata> for SkillMetadata {
description: value.description,
short_description: value.short_description,
interface: value.interface.map(SkillInterface::from),
dependencies: value.dependencies.map(SkillDependencies::from),
path: value.path,
scope: value.scope.into(),
enabled: true,
}
}
}
@@ -1608,31 +1327,6 @@ impl From<CoreSkillInterface> for SkillInterface {
}
}
impl From<CoreSkillDependencies> for SkillDependencies {
fn from(value: CoreSkillDependencies) -> Self {
Self {
tools: value
.tools
.into_iter()
.map(SkillToolDependency::from)
.collect(),
}
}
}
impl From<CoreSkillToolDependency> for SkillToolDependency {
fn from(value: CoreSkillToolDependency) -> Self {
Self {
r#type: value.r#type,
value: value.value,
description: value.description,
transport: value.transport,
command: value.command,
url: value.url,
}
}
}
impl From<CoreSkillScope> for SkillScope {
fn from(value: CoreSkillScope) -> Self {
match value {
@@ -1665,11 +1359,8 @@ pub struct Thread {
/// Unix timestamp (in seconds) when the thread was created.
#[ts(type = "number")]
pub created_at: i64,
/// Unix timestamp (in seconds) when the thread was last updated.
#[ts(type = "number")]
pub updated_at: i64,
/// [UNSTABLE] Path to the thread on disk.
pub path: Option<PathBuf>,
pub path: PathBuf,
/// Working directory captured for the thread.
pub cwd: PathBuf,
/// Version of the CLI that created the thread.
@@ -1678,8 +1369,7 @@ pub struct Thread {
pub source: SessionSource,
/// Optional Git metadata captured when the thread was created.
pub git_info: Option<GitInfo>,
/// Only populated on `thread/resume`, `thread/rollback`, `thread/fork`, and `thread/read`
/// (when `includeTurns` is true) responses.
/// Only populated on `thread/resume`, `thread/rollback`, `thread/fork` responses.
/// For all other responses and notifications returning a Thread,
/// the turns field will be an empty list.
pub turns: Vec<Turn>,
@@ -1816,14 +1506,8 @@ pub struct TurnStartParams {
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
/// Override the personality for this turn and subsequent turns.
pub personality: Option<Personality>,
/// Optional JSON Schema used to constrain the final assistant message for this turn.
pub output_schema: Option<JsonValue>,
/// EXPERIMENTAL - set a pre-set collaboration mode.
/// Takes precedence over model, reasoning_effort, and developer instructions if set.
pub collaboration_mode: Option<CollaborationMode>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1932,38 +1616,24 @@ pub struct TextElement {
/// Byte range in the parent `text` buffer that this element occupies.
pub byte_range: ByteRange,
/// Optional human-readable placeholder for the element, displayed in the UI.
placeholder: Option<String>,
}
impl TextElement {
pub fn new(byte_range: ByteRange, placeholder: Option<String>) -> Self {
Self {
byte_range,
placeholder,
}
}
pub fn set_placeholder(&mut self, placeholder: Option<String>) {
self.placeholder = placeholder;
}
pub fn placeholder(&self) -> Option<&str> {
self.placeholder.as_deref()
}
pub placeholder: Option<String>,
}
impl From<CoreTextElement> for TextElement {
fn from(value: CoreTextElement) -> Self {
Self::new(
value.byte_range.into(),
value._placeholder_for_conversion_only().map(str::to_string),
)
Self {
byte_range: value.byte_range.into(),
placeholder: value.placeholder,
}
}
}
impl From<TextElement> for CoreTextElement {
fn from(value: TextElement) -> Self {
Self::new(value.byte_range.into(), value.placeholder)
Self {
byte_range: value.byte_range.into(),
placeholder: value.placeholder,
}
}
}
@@ -1988,10 +1658,6 @@ pub enum UserInput {
name: String,
path: PathBuf,
},
Mention {
name: String,
path: String,
},
}
impl UserInput {
@@ -2007,7 +1673,6 @@ impl UserInput {
UserInput::Image { url } => CoreUserInput::Image { image_url: url },
UserInput::LocalImage { path } => CoreUserInput::LocalImage { path },
UserInput::Skill { name, path } => CoreUserInput::Skill { name, path },
UserInput::Mention { name, path } => CoreUserInput::Mention { name, path },
}
}
}
@@ -2025,7 +1690,6 @@ impl From<CoreUserInput> for UserInput {
CoreUserInput::Image { image_url } => UserInput::Image { url: image_url },
CoreUserInput::LocalImage { path } => UserInput::LocalImage { path },
CoreUserInput::Skill { name, path } => UserInput::Skill { name, path },
CoreUserInput::Mention { name, path } => UserInput::Mention { name, path },
_ => unreachable!("unsupported user input variant"),
}
}
@@ -2044,11 +1708,6 @@ pub enum ThreadItem {
AgentMessage { id: String, text: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
/// EXPERIMENTAL - proposed plan item content. The completed plan item is
/// authoritative and may not match the concatenation of `PlanDelta` text.
Plan { id: String, text: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
Reasoning {
id: String,
#[serde(default)]
@@ -2131,9 +1790,6 @@ pub enum ThreadItem {
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ExitedReviewMode { id: String, review: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
ContextCompaction { id: String },
}
impl From<CoreTurnItem> for ThreadItem {
@@ -2153,10 +1809,6 @@ impl From<CoreTurnItem> for ThreadItem {
.collect::<String>();
ThreadItem::AgentMessage { id: agent.id, text }
}
CoreTurnItem::Plan(plan) => ThreadItem::Plan {
id: plan.id,
text: plan.text,
},
CoreTurnItem::Reasoning(reasoning) => ThreadItem::Reasoning {
id: reasoning.id,
summary: reasoning.summary_text,
@@ -2166,9 +1818,6 @@ impl From<CoreTurnItem> for ThreadItem {
id: search.id,
query: search.query,
},
CoreTurnItem::ContextCompaction(compaction) => {
ThreadItem::ContextCompaction { id: compaction.id }
}
}
}
}
@@ -2315,16 +1964,6 @@ pub struct ThreadStartedNotification {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadNameUpdatedNotification {
pub thread_id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub thread_name: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2445,18 +2084,6 @@ pub struct AgentMessageDeltaNotification {
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL - proposed plan streaming deltas for plan items. Clients should
/// not assume concatenated deltas match the completed plan item content.
pub struct PlanDeltaNotification {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub delta: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2553,7 +2180,6 @@ pub struct WindowsWorldWritableWarningNotification {
pub failed_scan: bool,
}
/// Deprecated: Use `ContextCompaction` item type instead.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2571,18 +2197,6 @@ pub struct CommandExecutionRequestApprovalParams {
pub item_id: String,
/// Optional explanatory reason (e.g. request for network access).
pub reason: Option<String>,
/// The command to be executed.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command: Option<String>,
/// The command's working directory.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cwd: Option<PathBuf>,
/// Best-effort parsed command actions for friendly display.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command_actions: Option<Vec<CommandAction>>,
/// Optional proposed execpolicy amendment to allow similar commands without prompting.
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
}
@@ -2614,76 +2228,6 @@ pub struct FileChangeRequestApprovalResponse {
pub decision: FileChangeApprovalDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolCallParams {
pub thread_id: String,
pub turn_id: String,
pub call_id: String,
pub tool: String,
pub arguments: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolCallResponse {
pub output: String,
pub success: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Defines a single selectable option for request_user_input.
pub struct ToolRequestUserInputOption {
pub label: String,
pub description: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Represents one request_user_input question and its required options.
pub struct ToolRequestUserInputQuestion {
pub id: String,
pub header: String,
pub question: String,
#[serde(default)]
pub is_other: bool,
#[serde(default)]
pub is_secret: bool,
pub options: Option<Vec<ToolRequestUserInputOption>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Params sent with a request_user_input event.
pub struct ToolRequestUserInputParams {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub questions: Vec<ToolRequestUserInputQuestion>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Captures a user's answer to a request_user_input question.
pub struct ToolRequestUserInputAnswer {
pub answers: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Response payload mapping question ids to answers.
pub struct ToolRequestUserInputResponse {
pub answers: HashMap<String, ToolRequestUserInputAnswer>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2773,24 +2317,6 @@ pub struct DeprecationNoticeNotification {
pub details: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TextPosition {
/// 1-based line number.
pub line: usize,
/// 1-based column number (in Unicode scalar values).
pub column: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TextRange {
pub start: TextPosition,
pub end: TextPosition,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2799,14 +2325,6 @@ pub struct ConfigWarningNotification {
pub summary: String,
/// Optional extra guidance or error details.
pub details: Option<String>,
/// Optional path to the config file that triggered the warning.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub path: Option<String>,
/// Optional range for the error location inside the config file.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub range: Option<TextRange>,
}
#[cfg(test)]
@@ -2818,7 +2336,6 @@ mod tests {
use codex_protocol::items::TurnItem;
use codex_protocol::items::UserMessageItem;
use codex_protocol::items::WebSearchItem;
use codex_protocol::models::WebSearchAction;
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
use codex_protocol::user_input::UserInput as CoreUserInput;
use pretty_assertions::assert_eq;
@@ -2862,10 +2379,6 @@ mod tests {
name: "skill-creator".to_string(),
path: PathBuf::from("/repo/.codex/skills/skill-creator/SKILL.md"),
},
CoreUserInput::Mention {
name: "Demo App".to_string(),
path: "app://demo-app".to_string(),
},
],
});
@@ -2888,10 +2401,6 @@ mod tests {
name: "skill-creator".to_string(),
path: PathBuf::from("/repo/.codex/skills/skill-creator/SKILL.md"),
},
UserInput::Mention {
name: "Demo App".to_string(),
path: "app://demo-app".to_string(),
},
],
}
);
@@ -2934,9 +2443,6 @@ mod tests {
let search_item = TurnItem::WebSearch(WebSearchItem {
id: "search-1".to_string(),
query: "docs".to_string(),
action: WebSearchAction::Search {
query: Some("docs".to_string()),
},
});
assert_eq!(

View File

@@ -258,7 +258,7 @@ fn send_message_v2_with_policies(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: user_message,
// Test client sends plain text without UI element ranges.
// Plain text conversion has no UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
@@ -292,7 +292,6 @@ fn send_follow_up_v2(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: first_message,
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
@@ -305,7 +304,6 @@ fn send_follow_up_v2(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: follow_up_message,
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
@@ -479,7 +477,6 @@ impl CodexClient {
conversation_id: *conversation_id,
items: vec![InputItem::Text {
text: message.to_string(),
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
},
@@ -842,9 +839,6 @@ impl CodexClient {
turn_id,
item_id,
reason,
command,
cwd,
command_actions,
proposed_execpolicy_amendment,
} = params;
@@ -854,17 +848,6 @@ impl CodexClient {
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(command) = command.as_deref() {
println!("< command: {command}");
}
if let Some(cwd) = cwd.as_ref() {
println!("< cwd: {}", cwd.display());
}
if let Some(command_actions) = command_actions.as_ref()
&& !command_actions.is_empty()
{
println!("< command actions: {command_actions:?}");
}
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
}

View File

@@ -17,14 +17,11 @@ workspace = true
[dependencies]
anyhow = { workspace = true }
async-trait = { workspace = true }
codex-arg0 = { workspace = true }
codex-cloud-requirements = { workspace = true }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-backend-client = { workspace = true }
codex-file-search = { workspace = true }
codex-chatgpt = { workspace = true }
codex-login = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
@@ -37,7 +34,6 @@ serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
mcp-types = { workspace = true }
tempfile = { workspace = true }
time = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
@@ -52,21 +48,11 @@ uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]
app_test_support = { workspace = true }
axum = { workspace = true, default-features = false, features = [
"http1",
"json",
"tokio",
] }
base64 = { workspace = true }
codex-execpolicy = { workspace = true }
core_test_support = { workspace = true }
mcp-types = { workspace = true }
os_info = { workspace = true }
pretty_assertions = { workspace = true }
rmcp = { workspace = true, default-features = false, features = [
"server",
"transport-streamable-http-server",
] }
serial_test = { workspace = true }
wiremock = { workspace = true }
shlex = { workspace = true }

View File

@@ -13,7 +13,6 @@
- [Events](#events)
- [Approvals](#approvals)
- [Skills](#skills)
- [Apps](#apps)
- [Auth endpoints](#auth-endpoints)
## Protocol
@@ -80,22 +79,15 @@ Example (from OpenAI's official VSCode extension):
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; emits `thread/started` and auto-subscribes you to turn/item events for the new thread.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
- `thread/loaded/list` — list the thread ids currently loaded in memory.
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`.
- `thread/archive` — move a threads rollout file into the archived directory; returns `{}` on success.
- `thread/name/set` — set or update a threads user-facing name; returns `{}` on success. Thread names are not required to be unique; name lookups resolve to the most recently updated thread.
- `thread/unarchive` — move an archived rollout file back into the sessions directory; returns the restored `thread` on success.
- `thread/rollback` — drop the last N turns from the agents in-memory context and persist a rollback marker in the rollout so future resumes see the pruned history; returns the updated `thread` (with `turns` populated) on success.
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
- `review/start` — kick off Codexs automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `model/list` — list available models (with reasoning effort options).
- `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination).
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).
- `app/list` — list available apps.
- `skills/config/write` — write user-level skill config by path.
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
- `tool/requestUserInput` — prompt the user with 13 short questions for a tool call and return their answers (experimental).
- `config/mcpServer/reload` — reload MCP server config from disk and queue a refresh for loaded threads (applied on each thread's next active turn); returns `{}`. Use this after editing `config.toml` without restarting the server.
- `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
@@ -103,7 +95,7 @@ Example (from OpenAI's official VSCode extension):
- `config/read` — fetch the effective config on disk after resolving config layering.
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
- `configRequirements/read` — fetch the loaded requirements allow-lists and `enforceResidency` from `requirements.toml` and/or MDM (or `null` if none are configured).
- `configRequirements/read` — fetch the loaded requirements allow-lists from `requirements.toml` and/or MDM (or `null` if none are configured).
### Example: Start or resume a thread
@@ -117,20 +109,6 @@ Start a fresh thread when you need a new Codex conversation.
"cwd": "/Users/me/project",
"approvalPolicy": "never",
"sandbox": "workspaceWrite",
"personality": "friendly",
"dynamicTools": [
{
"name": "lookup_ticket",
"description": "Fetch a ticket by id",
"inputSchema": {
"type": "object",
"properties": {
"id": { "type": "string" }
},
"required": ["id"]
}
}
],
} }
{ "id": 10, "result": {
"thread": {
@@ -143,13 +121,10 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/started", "params": { "thread": { } } }
```
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted:
```json
{ "method": "thread/resume", "id": 11, "params": {
"threadId": "thr_123",
"personality": "friendly"
} }
{ "method": "thread/resume", "id": 11, "params": { "threadId": "thr_123" } }
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
@@ -163,14 +138,11 @@ To branch from a stored session, call `thread/fork` with the `thread.id`. This c
### Example: List threads (with pagination & filters)
`thread/list` lets you render a history UI. Results default to `createdAt` (newest first) descending. Pass any combination of:
`thread/list` lets you render a history UI. Pass any combination of:
- `cursor` — opaque string from a prior response; omit for the first page.
- `limit` — server defaults to a reasonable page size if unset.
- `sortKey``created_at` (default) or `updated_at`.
- `modelProviders` — restrict results to specific providers; unset, null, or an empty array will include all providers.
- `sourceKinds` — restrict results to specific sources; omit or pass `[]` for interactive sessions only (`cli`, `vscode`).
- `archived` — when `true`, list archived threads only. When `false` or `null`, list non-archived threads (default).
Example:
@@ -178,12 +150,11 @@ Example:
{ "method": "thread/list", "id": 20, "params": {
"cursor": null,
"limit": 25,
"sortKey": "created_at"
} }
{ "id": 20, "result": {
"data": [
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111, "updatedAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000, "updatedAt": 1730750000 }
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000 }
],
"nextCursor": "opaque-token-or-null"
} }
@@ -202,20 +173,6 @@ When `nextCursor` is `null`, youve reached the final page.
} }
```
### Example: Read a thread
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want the rollout history loaded into `thread.turns`.
```json
{ "method": "thread/read", "id": 22, "params": { "threadId": "thr_123" } }
{ "id": 22, "result": { "thread": { "id": "thr_123", "turns": [] } } }
```
```json
{ "method": "thread/read", "id": 23, "params": { "threadId": "thr_123", "includeTurns": true } }
{ "id": 23, "result": { "thread": { "id": "thr_123", "turns": [ ... ] } } }
```
### Example: Archive a thread
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
@@ -225,16 +182,7 @@ Use `thread/archive` to move the persisted rollout (stored as a JSONL file on di
{ "id": 21, "result": {} }
```
An archived thread will not appear in `thread/list` unless `archived` is set to `true`.
### Example: Unarchive a thread
Use `thread/unarchive` to move an archived rollout back into the sessions directory.
```json
{ "method": "thread/unarchive", "id": 24, "params": { "threadId": "thr_b" } }
{ "id": 24, "result": { "thread": { "id": "thr_b" } } }
```
An archived thread will not appear in future calls to `thread/list`.
### Example: Start a turn (send user input)
@@ -261,7 +209,6 @@ You can optionally specify config overrides on the new turn. If specified, these
"model": "gpt-5.1-codex",
"effort": "medium",
"summary": "concise",
"personality": "friendly",
// Optional JSON Schema to constrain the final assistant message for this turn.
"outputSchema": {
"type": "object",
@@ -298,26 +245,6 @@ Invoke a skill explicitly by including `$<skill-name>` in the text input and add
} } }
```
### Example: Start a turn (invoke an app)
Invoke an app by including `$<app-slug>` in the text input and adding a `mention` input item with the app id in `app://<connector-id>` form.
```json
{ "method": "turn/start", "id": 34, "params": {
"threadId": "thr_123",
"input": [
{ "type": "text", "text": "$demo-app Summarize the latest updates." },
{ "type": "mention", "name": "Demo App", "path": "app://demo-app" }
]
} }
{ "id": 34, "result": { "turn": {
"id": "turn_458",
"status": "inProgress",
"items": [],
"error": null
} } }
```
### Example: Interrupt an active turn
You can cancel a running Turn with `turn/interrupt`.
@@ -444,7 +371,6 @@ Today both notifications carry an empty `items` array even when item events were
- `userMessage``{id, content}` where `content` is a list of user inputs (`text`, `image`, or `localImage`).
- `agentMessage``{id, text}` containing the accumulated agent reply.
- `plan``{id, text}` emitted for plan-mode turns; plan text can stream via `item/plan/delta` (experimental).
- `reasoning``{id, summary, content}` where `summary` holds streamed reasoning summaries (applicable for most OpenAI models) and `content` holds raw reasoning blocks (applicable for e.g. open source models).
- `commandExecution``{id, command, cwd, status, commandActions, aggregatedOutput?, exitCode?, durationMs?}` for sandboxed commands; `status` is `inProgress`, `completed`, `failed`, or `declined`.
- `fileChange``{id, changes, status}` describing proposed edits; `changes` list `{path, kind, diff}` and `status` is `inProgress`, `completed`, `failed`, or `declined`.
@@ -454,8 +380,7 @@ Today both notifications carry an empty `items` array even when item events were
- `imageView``{id, path}` emitted when the agent invokes the image viewer tool.
- `enteredReviewMode``{id, review}` sent when the reviewer starts; `review` is a short user-facing label such as `"current changes"` or the requested target description.
- `exitedReviewMode``{id, review}` emitted when the reviewer finishes; `review` is the full plain-text review (usually, overall notes plus bullet point findings).
- `contextCompaction` `{id}` emitted when codex compacts the conversation history. This can happen automatically.
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically. **Deprecated:** Use `contextCompaction` instead.
- `compacted` - `{threadId, turnId}` when codex compacts the conversation history. This can happen automatically.
All items emit two shared lifecycle events:
@@ -468,10 +393,6 @@ There are additional item-specific events:
- `item/agentMessage/delta` — appends streamed text for the agent message; concatenate `delta` values for the same `itemId` in order to reconstruct the full reply.
#### plan
- `item/plan/delta` — streams proposed plan content for plan items (experimental); concatenate `delta` values for the same plan `itemId`. These deltas correspond to the `<proposed_plan>` block.
#### reasoning
- `item/reasoning/summaryTextDelta` — streams readable reasoning summaries; `summaryIndex` increments when a new summary section opens.
@@ -519,7 +440,7 @@ Certain actions (shell commands or modifying files) may require explicit user ap
Order of messages:
1. `item/started` — shows the pending `commandExecution` item with `command`, `cwd`, and other fields so you can render the proposed action.
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason`, plus `command`, `cwd`, and `commandActions` for friendly display.
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason` or `risk`, plus `parsedCmd` for friendly display.
3. Client response — `{ "decision": "accept", "acceptSettings": { "forSession": false } }` or `{ "decision": "decline" }`.
4. `item/completed` — final `commandExecution` item with `status: "completed" | "failed" | "declined"` and execution output. Render this as the authoritative result.
@@ -545,15 +466,8 @@ Invoke a skill by including `$<skill-name>` in the text input. Add a `skill` inp
"params": {
"threadId": "thread-1",
"input": [
{
"type": "text",
"text": "$skill-creator Add a new skill for triaging flaky CI."
},
{
"type": "skill",
"name": "skill-creator",
"path": "/Users/me/.codex/skills/skill-creator/SKILL.md"
}
{ "type": "text", "text": "$skill-creator Add a new skill for triaging flaky CI." },
{ "type": "skill", "name": "skill-creator", "path": "/Users/me/.codex/skills/skill-creator/SKILL.md" }
]
}
}
@@ -567,115 +481,28 @@ Example:
$skill-creator Add a new skill for triaging flaky CI and include step-by-step usage.
```
Use `skills/list` to fetch the available skills (optionally scoped by `cwds`, with `forceReload`).
Use `skills/list` to fetch the available skills (optionally scoped by `cwd` and/or with `forceReload`).
```json
{ "method": "skills/list", "id": 25, "params": {
"cwds": ["/Users/me/project"],
"cwd": "/Users/me/project",
"forceReload": false
} }
{ "id": 25, "result": {
"data": [{
"cwd": "/Users/me/project",
"skills": [
{
"name": "skill-creator",
"description": "Create or update a Codex skill",
"enabled": true,
"interface": {
"displayName": "Skill Creator",
"shortDescription": "Create or update a Codex skill",
"iconSmall": "icon.svg",
"iconLarge": "icon-large.svg",
"brandColor": "#111111",
"defaultPrompt": "Add a new skill for triaging flaky CI."
}
}
],
"errors": []
}]
} }
```
To enable or disable a skill by path:
```json
{
"method": "skills/config/write",
"id": 26,
"params": {
"path": "/Users/me/.codex/skills/skill-creator/SKILL.md",
"enabled": false
}
}
```
## Apps
Use `app/list` to fetch available apps (connectors). Each entry includes metadata like the app `id`, display `name`, `installUrl`, and whether it is currently accessible.
```json
{ "method": "app/list", "id": 50, "params": {
"cursor": null,
"limit": 50
} }
{ "id": 50, "result": {
"data": [
{
"id": "demo-app",
"name": "Demo App",
"description": "Example connector for documentation.",
"logoUrl": "https://example.com/demo-app.png",
"logoUrlDark": null,
"distributionChannel": null,
"installUrl": "https://chatgpt.com/apps/demo-app/demo-app",
"isAccessible": true
}
],
"nextCursor": null
} }
```
Invoke an app by inserting `$<app-slug>` in the text input. The slug is derived from the app name and lowercased with non-alphanumeric characters replaced by `-` (for example, "Demo App" becomes `$demo-app`). Add a `mention` input item (recommended) so the server uses the exact `app://<connector-id>` path rather than guessing by name.
Example:
```
$demo-app Pull the latest updates from the team.
```
```json
{
"method": "turn/start",
"id": 51,
"params": {
"threadId": "thread-1",
"input": [
{
"type": "text",
"text": "$demo-app Pull the latest updates from the team."
},
{ "type": "mention", "name": "Demo App", "path": "app://demo-app" }
"skills": [
{ "name": "skill-creator", "description": "Create or update a Codex skill" }
]
}
}
} }
```
## Auth endpoints
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.
### Authentication modes
Codex supports these authentication modes. The current mode is surfaced in `account/updated` (`authMode`) and can be inferred from `account/read`.
- **API key (`apiKey`)**: Caller supplies an OpenAI API key via `account/login/start` with `type: "apiKey"`. The API key is saved and used for API requests.
- **ChatGPT managed (`chatgpt`)** (recommended): Codex owns the ChatGPT OAuth flow and refresh tokens. Start via `account/login/start` with `type: "chatgpt"`; Codex persists tokens to disk and refreshes them automatically.
### API Overview
- `account/read` — fetch current account info; optionally refresh tokens.
- `account/login/start` — begin login (`apiKey`, `chatgpt`).
- `account/login/start` — begin login (`apiKey` or `chatgpt`).
- `account/login/completed` (notify) — emitted when a login attempt finishes (success or error).
- `account/login/cancel` — cancel a pending ChatGPT login by `loginId`.
- `account/logout` — sign out; triggers `account/updated`.

View File

@@ -25,7 +25,6 @@ use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::CommandExecutionStatus;
use codex_app_server_protocol::ContextCompactedNotification;
use codex_app_server_protocol::DeprecationNoticeNotification;
use codex_app_server_protocol::DynamicToolCallParams;
use codex_app_server_protocol::ErrorNotification;
use codex_app_server_protocol::ExecCommandApprovalParams;
use codex_app_server_protocol::ExecCommandApprovalResponse;
@@ -44,7 +43,6 @@ use codex_app_server_protocol::McpToolCallResult;
use codex_app_server_protocol::McpToolCallStatus;
use codex_app_server_protocol::PatchApplyStatus;
use codex_app_server_protocol::PatchChangeKind as V2PatchChangeKind;
use codex_app_server_protocol::PlanDeltaNotification;
use codex_app_server_protocol::RawResponseItemCompletedNotification;
use codex_app_server_protocol::ReasoningSummaryPartAddedNotification;
use codex_app_server_protocol::ReasoningSummaryTextDeltaNotification;
@@ -53,14 +51,9 @@ use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequestPayload;
use codex_app_server_protocol::TerminalInteractionNotification;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadNameUpdatedNotification;
use codex_app_server_protocol::ThreadRollbackResponse;
use codex_app_server_protocol::ThreadTokenUsage;
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
use codex_app_server_protocol::ToolRequestUserInputOption;
use codex_app_server_protocol::ToolRequestUserInputParams;
use codex_app_server_protocol::ToolRequestUserInputQuestion;
use codex_app_server_protocol::ToolRequestUserInputResponse;
use codex_app_server_protocol::Turn;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnDiffUpdatedNotification;
@@ -88,11 +81,8 @@ use codex_core::protocol::TurnDiffEvent;
use codex_core::review_format::format_review_findings_block;
use codex_core::review_prompts;
use codex_protocol::ThreadId;
use codex_protocol::dynamic_tools::DynamicToolResponse as CoreDynamicToolResponse;
use codex_protocol::plan_tool::UpdatePlanArgs;
use codex_protocol::protocol::ReviewOutputEvent;
use codex_protocol::request_user_input::RequestUserInputAnswer as CoreRequestUserInputAnswer;
use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestUserInputResponse;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::PathBuf;
@@ -119,7 +109,6 @@ pub(crate) async fn apply_bespoke_event_handling(
msg,
} = event;
match msg {
EventMsg::TurnStarted(_) => {}
EventMsg::TurnComplete(_ev) => {
handle_turn_complete(
conversation_id,
@@ -246,9 +235,6 @@ pub(crate) async fn apply_bespoke_event_handling(
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
item_id: item_id.clone(),
reason,
command: Some(command_string.clone()),
cwd: Some(cwd.clone()),
command_actions: Some(command_actions.clone()),
proposed_execpolicy_amendment: proposed_execpolicy_amendment_v2,
};
let rx = outgoing
@@ -272,93 +258,6 @@ pub(crate) async fn apply_bespoke_event_handling(
});
}
},
EventMsg::RequestUserInput(request) => {
if matches!(api_version, ApiVersion::V2) {
let questions = request
.questions
.into_iter()
.map(|question| ToolRequestUserInputQuestion {
id: question.id,
header: question.header,
question: question.question,
is_other: question.is_other,
is_secret: question.is_secret,
options: question.options.map(|options| {
options
.into_iter()
.map(|option| ToolRequestUserInputOption {
label: option.label,
description: option.description,
})
.collect()
}),
})
.collect();
let params = ToolRequestUserInputParams {
thread_id: conversation_id.to_string(),
turn_id: request.turn_id,
item_id: request.call_id,
questions,
};
let rx = outgoing
.send_request(ServerRequestPayload::ToolRequestUserInput(params))
.await;
tokio::spawn(async move {
on_request_user_input_response(event_turn_id, rx, conversation).await;
});
} else {
error!(
"request_user_input is only supported on api v2 (call_id: {})",
request.call_id
);
let empty = CoreRequestUserInputResponse {
answers: HashMap::new(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response: empty,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
}
}
EventMsg::DynamicToolCallRequest(request) => {
if matches!(api_version, ApiVersion::V2) {
let call_id = request.call_id;
let params = DynamicToolCallParams {
thread_id: conversation_id.to_string(),
turn_id: request.turn_id,
call_id: call_id.clone(),
tool: request.tool,
arguments: request.arguments,
};
let rx = outgoing
.send_request(ServerRequestPayload::DynamicToolCall(params))
.await;
tokio::spawn(async move {
crate::dynamic_tools::on_call_response(call_id, rx, conversation).await;
});
} else {
error!(
"dynamic tool calls are only supported on api v2 (call_id: {})",
request.call_id
);
let call_id = request.call_id;
let _ = conversation
.submit(Op::DynamicToolResponse {
id: call_id.clone(),
response: CoreDynamicToolResponse {
call_id,
output: "dynamic tool calls require api v2".to_string(),
success: false,
},
})
.await;
}
}
// TODO(celia): properly construct McpToolCall TurnItem in core.
EventMsg::McpToolCallBegin(begin_event) => {
let notification = construct_mcp_tool_call_notification(
@@ -595,27 +494,14 @@ pub(crate) async fn apply_bespoke_event_handling(
.await;
}
EventMsg::AgentMessageContentDelta(event) => {
let codex_protocol::protocol::AgentMessageContentDeltaEvent { item_id, delta, .. } =
event;
let notification = AgentMessageDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id,
delta,
};
outgoing
.send_server_notification(ServerNotification::AgentMessageDelta(notification))
.await;
}
EventMsg::PlanDelta(event) => {
let notification = PlanDeltaNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item_id: event.item_id,
delta: event.delta,
};
outgoing
.send_server_notification(ServerNotification::PlanDelta(notification))
.send_server_notification(ServerNotification::AgentMessageDelta(notification))
.await;
}
EventMsg::ContextCompacted(..) => {
@@ -1060,15 +946,7 @@ pub(crate) async fn apply_bespoke_event_handling(
};
if let Some(request_id) = pending {
let Some(rollout_path) = conversation.rollout_path() else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "thread has no persisted rollout".to_string(),
data: None,
};
outgoing.send_error(request_id, error).await;
return;
};
let rollout_path = conversation.rollout_path();
let response = match read_summary_from_rollout(
rollout_path.as_path(),
fallback_model_provider.as_str(),
@@ -1113,17 +991,6 @@ pub(crate) async fn apply_bespoke_event_handling(
outgoing.send_response(request_id, response).await;
}
}
EventMsg::ThreadNameUpdated(thread_name_event) => {
if let ApiVersion::V2 = api_version {
let notification = ThreadNameUpdatedNotification {
thread_id: thread_name_event.thread_id.to_string(),
thread_name: thread_name_event.thread_name,
};
outgoing
.send_server_notification(ServerNotification::ThreadNameUpdated(notification))
.await;
}
}
EventMsg::TurnDiff(turn_diff_event) => {
handle_turn_diff(
conversation_id,
@@ -1175,7 +1042,6 @@ async fn handle_turn_plan_update(
api_version: ApiVersion,
outgoing: &OutgoingMessageSender,
) {
// `update_plan` is a todo/checklist tool; it is not related to plan-mode updates
if let ApiVersion::V2 = api_version {
let notification = TurnPlanUpdatedNotification {
thread_id: conversation_id.to_string(),
@@ -1481,65 +1347,6 @@ async fn on_exec_approval_response(
}
}
async fn on_request_user_input_response(
event_turn_id: String,
receiver: oneshot::Receiver<JsonValue>,
conversation: Arc<CodexThread>,
) {
let response = receiver.await;
let value = match response {
Ok(value) => value,
Err(err) => {
error!("request failed: {err:?}");
let empty = CoreRequestUserInputResponse {
answers: HashMap::new(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response: empty,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
return;
}
};
let response =
serde_json::from_value::<ToolRequestUserInputResponse>(value).unwrap_or_else(|err| {
error!("failed to deserialize ToolRequestUserInputResponse: {err}");
ToolRequestUserInputResponse {
answers: HashMap::new(),
}
});
let response = CoreRequestUserInputResponse {
answers: response
.answers
.into_iter()
.map(|(id, answer)| {
(
id,
CoreRequestUserInputAnswer {
answers: answer.answers,
},
)
})
.collect(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
}
const REVIEW_FALLBACK_MESSAGE: &str = "Reviewer failed to output a response.";
fn render_review_output_text(output: &ReviewOutputEvent) -> String {

File diff suppressed because it is too large Load Diff

View File

@@ -12,10 +12,8 @@ use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::SandboxMode;
use codex_core::config::ConfigService;
use codex_core::config::ConfigServiceError;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigRequirementsToml;
use codex_core::config_loader::LoaderOverrides;
use codex_core::config_loader::ResidencyRequirement as CoreResidencyRequirement;
use codex_core::config_loader::SandboxModeRequirement as CoreSandboxModeRequirement;
use serde_json::json;
use std::path::PathBuf;
@@ -31,15 +29,9 @@ impl ConfigApi {
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
cloud_requirements: CloudRequirementsLoader,
) -> Self {
Self {
service: ConfigService::new(
codex_home,
cli_overrides,
loader_overrides,
cloud_requirements,
),
service: ConfigService::new(codex_home, cli_overrides, loader_overrides),
}
}
@@ -92,9 +84,6 @@ fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigR
.filter_map(map_sandbox_mode_requirement_to_api)
.collect()
}),
enforce_residency: requirements
.enforce_residency
.map(map_residency_requirement_to_api),
}
}
@@ -107,14 +96,6 @@ fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Opti
}
}
fn map_residency_requirement_to_api(
residency: CoreResidencyRequirement,
) -> codex_app_server_protocol::ResidencyRequirement {
match residency {
CoreResidencyRequirement::Us => codex_app_server_protocol::ResidencyRequirement::Us,
}
}
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
if let Some(code) = err.write_error_code() {
return config_write_error(code, err.to_string());
@@ -155,8 +136,6 @@ mod tests {
CoreSandboxModeRequirement::ExternalSandbox,
]),
mcp_servers: None,
rules: None,
enforce_residency: Some(CoreResidencyRequirement::Us),
};
let mapped = map_requirements_toml_to_api(requirements);
@@ -172,9 +151,5 @@ mod tests {
mapped.allowed_sandbox_modes,
Some(vec![SandboxMode::ReadOnly]),
);
assert_eq!(
mapped.enforce_residency,
Some(codex_app_server_protocol::ResidencyRequirement::Us),
);
}
}

View File

@@ -1,58 +0,0 @@
use codex_app_server_protocol::DynamicToolCallResponse;
use codex_core::CodexThread;
use codex_protocol::dynamic_tools::DynamicToolResponse as CoreDynamicToolResponse;
use codex_protocol::protocol::Op;
use std::sync::Arc;
use tokio::sync::oneshot;
use tracing::error;
pub(crate) async fn on_call_response(
call_id: String,
receiver: oneshot::Receiver<serde_json::Value>,
conversation: Arc<CodexThread>,
) {
let response = receiver.await;
let value = match response {
Ok(value) => value,
Err(err) => {
error!("request failed: {err:?}");
let fallback = CoreDynamicToolResponse {
call_id: call_id.clone(),
output: "dynamic tool request failed".to_string(),
success: false,
};
if let Err(err) = conversation
.submit(Op::DynamicToolResponse {
id: call_id.clone(),
response: fallback,
})
.await
{
error!("failed to submit DynamicToolResponse: {err}");
}
return;
}
};
let response = serde_json::from_value::<DynamicToolCallResponse>(value).unwrap_or_else(|err| {
error!("failed to deserialize DynamicToolCallResponse: {err}");
DynamicToolCallResponse {
output: "dynamic tool response was invalid".to_string(),
success: false,
}
});
let response = CoreDynamicToolResponse {
call_id: call_id.clone(),
output: response.output,
success: response.success,
};
if let Err(err) = conversation
.submit(Op::DynamicToolResponse {
id: call_id,
response,
})
.await
{
error!("failed to submit DynamicToolResponse: {err}");
}
}

View File

@@ -1,155 +0,0 @@
use codex_app_server_protocol::ThreadSourceKind;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SubAgentSource as CoreSubAgentSource;
pub(crate) fn compute_source_filters(
source_kinds: Option<Vec<ThreadSourceKind>>,
) -> (Vec<CoreSessionSource>, Option<Vec<ThreadSourceKind>>) {
let Some(source_kinds) = source_kinds else {
return (INTERACTIVE_SESSION_SOURCES.to_vec(), None);
};
if source_kinds.is_empty() {
return (INTERACTIVE_SESSION_SOURCES.to_vec(), None);
}
let requires_post_filter = source_kinds.iter().any(|kind| {
matches!(
kind,
ThreadSourceKind::Exec
| ThreadSourceKind::AppServer
| ThreadSourceKind::SubAgent
| ThreadSourceKind::SubAgentReview
| ThreadSourceKind::SubAgentCompact
| ThreadSourceKind::SubAgentThreadSpawn
| ThreadSourceKind::SubAgentOther
| ThreadSourceKind::Unknown
)
});
if requires_post_filter {
(Vec::new(), Some(source_kinds))
} else {
let interactive_sources = source_kinds
.iter()
.filter_map(|kind| match kind {
ThreadSourceKind::Cli => Some(CoreSessionSource::Cli),
ThreadSourceKind::VsCode => Some(CoreSessionSource::VSCode),
ThreadSourceKind::Exec
| ThreadSourceKind::AppServer
| ThreadSourceKind::SubAgent
| ThreadSourceKind::SubAgentReview
| ThreadSourceKind::SubAgentCompact
| ThreadSourceKind::SubAgentThreadSpawn
| ThreadSourceKind::SubAgentOther
| ThreadSourceKind::Unknown => None,
})
.collect::<Vec<_>>();
(interactive_sources, Some(source_kinds))
}
}
pub(crate) fn source_kind_matches(source: &CoreSessionSource, filter: &[ThreadSourceKind]) -> bool {
filter.iter().any(|kind| match kind {
ThreadSourceKind::Cli => matches!(source, CoreSessionSource::Cli),
ThreadSourceKind::VsCode => matches!(source, CoreSessionSource::VSCode),
ThreadSourceKind::Exec => matches!(source, CoreSessionSource::Exec),
ThreadSourceKind::AppServer => matches!(source, CoreSessionSource::Mcp),
ThreadSourceKind::SubAgent => matches!(source, CoreSessionSource::SubAgent(_)),
ThreadSourceKind::SubAgentReview => {
matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::Review)
)
}
ThreadSourceKind::SubAgentCompact => {
matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::Compact)
)
}
ThreadSourceKind::SubAgentThreadSpawn => matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::ThreadSpawn { .. })
),
ThreadSourceKind::SubAgentOther => matches!(
source,
CoreSessionSource::SubAgent(CoreSubAgentSource::Other(_))
),
ThreadSourceKind::Unknown => matches!(source, CoreSessionSource::Unknown),
})
}
#[cfg(test)]
mod tests {
use super::*;
use codex_protocol::ThreadId;
use pretty_assertions::assert_eq;
use uuid::Uuid;
#[test]
fn compute_source_filters_defaults_to_interactive_sources() {
let (allowed_sources, filter) = compute_source_filters(None);
assert_eq!(allowed_sources, INTERACTIVE_SESSION_SOURCES.to_vec());
assert_eq!(filter, None);
}
#[test]
fn compute_source_filters_empty_means_interactive_sources() {
let (allowed_sources, filter) = compute_source_filters(Some(Vec::new()));
assert_eq!(allowed_sources, INTERACTIVE_SESSION_SOURCES.to_vec());
assert_eq!(filter, None);
}
#[test]
fn compute_source_filters_interactive_only_skips_post_filtering() {
let source_kinds = vec![ThreadSourceKind::Cli, ThreadSourceKind::VsCode];
let (allowed_sources, filter) = compute_source_filters(Some(source_kinds.clone()));
assert_eq!(
allowed_sources,
vec![CoreSessionSource::Cli, CoreSessionSource::VSCode]
);
assert_eq!(filter, Some(source_kinds));
}
#[test]
fn compute_source_filters_subagent_variant_requires_post_filtering() {
let source_kinds = vec![ThreadSourceKind::SubAgentReview];
let (allowed_sources, filter) = compute_source_filters(Some(source_kinds.clone()));
assert_eq!(allowed_sources, Vec::new());
assert_eq!(filter, Some(source_kinds));
}
#[test]
fn source_kind_matches_distinguishes_subagent_variants() {
let parent_thread_id =
ThreadId::from_string(&Uuid::new_v4().to_string()).expect("valid thread id");
let review = CoreSessionSource::SubAgent(CoreSubAgentSource::Review);
let spawn = CoreSessionSource::SubAgent(CoreSubAgentSource::ThreadSpawn {
parent_thread_id,
depth: 1,
});
assert!(source_kind_matches(
&review,
&[ThreadSourceKind::SubAgentReview]
));
assert!(!source_kind_matches(
&review,
&[ThreadSourceKind::SubAgentThreadSpawn]
));
assert!(source_kind_matches(
&spawn,
&[ThreadSourceKind::SubAgentThreadSpawn]
));
assert!(!source_kind_matches(
&spawn,
&[ThreadSourceKind::SubAgentReview]
));
}
}

View File

@@ -1,14 +1,17 @@
use std::num::NonZero;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use codex_app_server_protocol::FuzzyFileSearchResult;
use codex_file_search as file_search;
use tokio::task::JoinSet;
use tracing::warn;
const MATCH_LIMIT: usize = 50;
const LIMIT_PER_ROOT: usize = 50;
const MAX_THREADS: usize = 12;
const COMPUTE_INDICES: bool = true;
pub(crate) async fn run_fuzzy_file_search(
query: String,
@@ -20,54 +23,64 @@ pub(crate) async fn run_fuzzy_file_search(
}
#[expect(clippy::expect_used)]
let limit = NonZero::new(MATCH_LIMIT).expect("MATCH_LIMIT should be a valid non-zero usize");
let limit_per_root =
NonZero::new(LIMIT_PER_ROOT).expect("LIMIT_PER_ROOT should be a valid non-zero usize");
let cores = std::thread::available_parallelism()
.map(std::num::NonZero::get)
.unwrap_or(1);
let threads = cores.min(MAX_THREADS);
#[expect(clippy::expect_used)]
let threads = NonZero::new(threads.max(1)).expect("threads should be non-zero");
let search_dirs: Vec<PathBuf> = roots.iter().map(PathBuf::from).collect();
let threads_per_root = (threads / roots.len()).max(1);
let threads = NonZero::new(threads_per_root).unwrap_or(NonZeroUsize::MIN);
let mut files = match tokio::task::spawn_blocking(move || {
file_search::run(
query.as_str(),
search_dirs,
file_search::FileSearchOptions {
limit,
let mut files: Vec<FuzzyFileSearchResult> = Vec::new();
let mut join_set = JoinSet::new();
for root in roots {
let search_dir = PathBuf::from(&root);
let query = query.clone();
let cancel_flag = cancellation_flag.clone();
join_set.spawn_blocking(move || {
match file_search::run(
query.as_str(),
limit_per_root,
&search_dir,
Vec::new(),
threads,
compute_indices: true,
..Default::default()
},
Some(cancellation_flag),
)
})
.await
{
Ok(Ok(res)) => res
.matches
.into_iter()
.map(|m| {
let file_name = m.path.file_name().unwrap_or_default();
FuzzyFileSearchResult {
root: m.root.to_string_lossy().to_string(),
path: m.path.to_string_lossy().to_string(),
file_name: file_name.to_string_lossy().to_string(),
score: m.score,
indices: m.indices,
cancel_flag,
COMPUTE_INDICES,
true,
) {
Ok(res) => Ok((root, res)),
Err(err) => Err((root, err)),
}
});
}
while let Some(res) = join_set.join_next().await {
match res {
Ok(Ok((root, res))) => {
for m in res.matches {
let path = m.path;
let file_name = file_search::file_name_from_path(&path);
let result = FuzzyFileSearchResult {
root: root.clone(),
path,
file_name,
score: m.score,
indices: m.indices,
};
files.push(result);
}
})
.collect::<Vec<_>>(),
Ok(Err(err)) => {
warn!("fuzzy-file-search failed: {err}");
Vec::new()
}
Ok(Err((root, err))) => {
warn!("fuzzy-file-search in dir '{root}' failed: {err}");
}
Err(err) => {
warn!("fuzzy-file-search join_next failed: {err}");
}
}
Err(err) => {
warn!("fuzzy-file-search join failed: {err}");
Vec::new()
}
};
}
files.sort_by(file_search::cmp_by_score_desc_then_path_asc::<
FuzzyFileSearchResult,

View File

@@ -1,30 +1,19 @@
#![deny(clippy::print_stdout, clippy::print_stderr)]
use codex_cloud_requirements::cloud_requirements_loader;
use codex_common::CliConfigOverrides;
use codex_core::AuthManager;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::config_loader::LoaderOverrides;
use std::io::ErrorKind;
use std::io::Result as IoResult;
use std::path::PathBuf;
use crate::message_processor::MessageProcessor;
use crate::message_processor::MessageProcessorArgs;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use codex_app_server_protocol::ConfigLayerSource;
use codex_app_server_protocol::ConfigWarningNotification;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::TextPosition as AppTextPosition;
use codex_app_server_protocol::TextRange as AppTextRange;
use codex_core::ExecPolicyError;
use codex_core::check_execpolicy_for_warnings;
use codex_core::config_loader::ConfigLoadError;
use codex_core::config_loader::TextRange as CoreTextRange;
use codex_feedback::CodexFeedback;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
@@ -44,9 +33,7 @@ use tracing_subscriber::util::SubscriberInitExt;
mod bespoke_event_handling;
mod codex_message_processor;
mod config_api;
mod dynamic_tools;
mod error_code;
mod filters;
mod fuzzy_file_search;
mod message_processor;
mod models;
@@ -57,116 +44,6 @@ mod outgoing_message;
/// plenty for an interactive CLI.
const CHANNEL_CAPACITY: usize = 128;
fn config_warning_from_error(
summary: impl Into<String>,
err: &std::io::Error,
) -> ConfigWarningNotification {
let (path, range) = match config_error_location(err) {
Some((path, range)) => (Some(path), Some(range)),
None => (None, None),
};
ConfigWarningNotification {
summary: summary.into(),
details: Some(err.to_string()),
path,
range,
}
}
fn config_error_location(err: &std::io::Error) -> Option<(String, AppTextRange)> {
err.get_ref()
.and_then(|err| err.downcast_ref::<ConfigLoadError>())
.map(|err| {
let config_error = err.config_error();
(
config_error.path.to_string_lossy().to_string(),
app_text_range(&config_error.range),
)
})
}
fn exec_policy_warning_location(err: &ExecPolicyError) -> (Option<String>, Option<AppTextRange>) {
match err {
ExecPolicyError::ParsePolicy { path, source } => {
if let Some(location) = source.location() {
let range = AppTextRange {
start: AppTextPosition {
line: location.range.start.line,
column: location.range.start.column,
},
end: AppTextPosition {
line: location.range.end.line,
column: location.range.end.column,
},
};
return (Some(location.path), Some(range));
}
(Some(path.clone()), None)
}
_ => (None, None),
}
}
fn app_text_range(range: &CoreTextRange) -> AppTextRange {
AppTextRange {
start: AppTextPosition {
line: range.start.line,
column: range.start.column,
},
end: AppTextPosition {
line: range.end.line,
column: range.end.column,
},
}
}
fn project_config_warning(config: &Config) -> Option<ConfigWarningNotification> {
let mut disabled_folders = Vec::new();
for layer in config
.config_layer_stack
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true)
{
if !matches!(layer.name, ConfigLayerSource::Project { .. })
|| layer.disabled_reason.is_none()
{
continue;
}
if let ConfigLayerSource::Project { dot_codex_folder } = &layer.name {
disabled_folders.push((
dot_codex_folder.as_path().display().to_string(),
layer
.disabled_reason
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| "config.toml is disabled.".to_string()),
));
}
}
if disabled_folders.is_empty() {
return None;
}
let mut message = concat!(
"Project config.toml files are disabled in the following folders. ",
"Settings in those files are ignored, but skills and exec policies still load.\n",
)
.to_string();
for (index, (folder, reason)) in disabled_folders.iter().enumerate() {
let display_index = index + 1;
message.push_str(&format!(" {display_index}. {folder}\n"));
message.push_str(&format!(" {reason}\n"));
}
Some(ConfigWarningNotification {
summary: message,
details: None,
path: None,
range: None,
})
}
pub async fn run_main(
codex_linux_sandbox_exe: Option<PathBuf>,
cli_config_overrides: CliConfigOverrides,
@@ -208,38 +85,20 @@ pub async fn run_main(
format!("error parsing -c overrides: {e}"),
)
})?;
let cloud_requirements = match ConfigBuilder::default()
.cli_overrides(cli_kv_overrides.clone())
.loader_overrides(loader_overrides.clone())
.build()
.await
{
Ok(config) => {
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
config.cli_auth_credentials_store_mode,
);
cloud_requirements_loader(auth_manager, config.chatgpt_base_url)
}
Err(err) => {
warn!(error = %err, "Failed to preload config for cloud requirements");
// TODO(gt): Make cloud requirements preload failures blocking once we can fail-closed.
CloudRequirementsLoader::default()
}
};
let loader_overrides_for_config_api = loader_overrides.clone();
let mut config_warnings = Vec::new();
let config = match ConfigBuilder::default()
.cli_overrides(cli_kv_overrides.clone())
.loader_overrides(loader_overrides)
.cloud_requirements(cloud_requirements.clone())
.build()
.await
{
Ok(config) => config,
Err(err) => {
let message = config_warning_from_error("Invalid configuration; using defaults.", &err);
let message = ConfigWarningNotification {
summary: "Invalid configuration; using defaults.".to_string(),
details: Some(err.to_string()),
};
config_warnings.push(message);
Config::load_default_with_cli_overrides(cli_kv_overrides.clone()).map_err(|e| {
std::io::Error::new(
@@ -253,20 +112,13 @@ pub async fn run_main(
if let Ok(Some(err)) =
check_execpolicy_for_warnings(&config.features, &config.config_layer_stack).await
{
let (path, range) = exec_policy_warning_location(&err);
let message = ConfigWarningNotification {
summary: "Error parsing rules; custom rules not applied.".to_string(),
details: Some(err.to_string()),
path,
range,
};
config_warnings.push(message);
}
if let Some(warning) = project_config_warning(&config) {
config_warnings.push(warning);
}
let feedback = CodexFeedback::new();
let otel = codex_core::otel_init::build_provider(
@@ -315,16 +167,15 @@ pub async fn run_main(
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone();
let loader_overrides = loader_overrides_for_config_api;
let mut processor = MessageProcessor::new(MessageProcessorArgs {
outgoing: outgoing_message_sender,
let mut processor = MessageProcessor::new(
outgoing_message_sender,
codex_linux_sandbox_exe,
config: std::sync::Arc::new(config),
std::sync::Arc::new(config),
cli_overrides,
loader_overrides,
cloud_requirements: cloud_requirements.clone(),
feedback: feedback.clone(),
feedback.clone(),
config_warnings,
});
);
let mut thread_created_rx = processor.thread_created_receiver();
async move {
let mut listen_for_threads = true;
@@ -338,7 +189,7 @@ pub async fn run_main(
JSONRPCMessage::Request(r) => processor.process_request(r).await,
JSONRPCMessage::Response(r) => processor.process_response(r).await,
JSONRPCMessage::Notification(n) => processor.process_notification(n).await,
JSONRPCMessage::Error(e) => processor.process_error(e).await,
JSONRPCMessage::Error(e) => processor.process_error(e),
}
}
created = thread_created_rx.recv(), if listen_for_threads => {

View File

@@ -2,14 +2,9 @@ use std::path::PathBuf;
use std::sync::Arc;
use crate::codex_message_processor::CodexMessageProcessor;
use crate::codex_message_processor::CodexMessageProcessorArgs;
use crate::config_api::ConfigApi;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::outgoing_message::OutgoingMessageSender;
use async_trait::async_trait;
use codex_app_server_protocol::ChatgptAuthTokensRefreshParams;
use codex_app_server_protocol::ChatgptAuthTokensRefreshReason;
use codex_app_server_protocol::ChatgptAuthTokensRefreshResponse;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::ConfigBatchWriteParams;
@@ -24,154 +19,66 @@ use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequestPayload;
use codex_core::AuthManager;
use codex_core::ThreadManager;
use codex_core::auth::ExternalAuthRefreshContext;
use codex_core::auth::ExternalAuthRefreshReason;
use codex_core::auth::ExternalAuthRefresher;
use codex_core::auth::ExternalAuthTokens;
use codex_core::config::Config;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::LoaderOverrides;
use codex_core::default_client::SetOriginatorError;
use codex_core::default_client::USER_AGENT_SUFFIX;
use codex_core::default_client::get_codex_user_agent;
use codex_core::default_client::set_default_client_residency_requirement;
use codex_core::default_client::set_default_originator;
use codex_feedback::CodexFeedback;
use codex_protocol::ThreadId;
use codex_protocol::protocol::SessionSource;
use tokio::sync::broadcast;
use tokio::time::Duration;
use tokio::time::timeout;
use toml::Value as TomlValue;
const EXTERNAL_AUTH_REFRESH_TIMEOUT: Duration = Duration::from_secs(10);
#[derive(Clone)]
struct ExternalAuthRefreshBridge {
outgoing: Arc<OutgoingMessageSender>,
}
impl ExternalAuthRefreshBridge {
fn map_reason(reason: ExternalAuthRefreshReason) -> ChatgptAuthTokensRefreshReason {
match reason {
ExternalAuthRefreshReason::Unauthorized => ChatgptAuthTokensRefreshReason::Unauthorized,
}
}
}
#[async_trait]
impl ExternalAuthRefresher for ExternalAuthRefreshBridge {
async fn refresh(
&self,
context: ExternalAuthRefreshContext,
) -> std::io::Result<ExternalAuthTokens> {
let params = ChatgptAuthTokensRefreshParams {
reason: Self::map_reason(context.reason),
previous_account_id: context.previous_account_id,
};
let (request_id, rx) = self
.outgoing
.send_request_with_id(ServerRequestPayload::ChatgptAuthTokensRefresh(params))
.await;
let result = match timeout(EXTERNAL_AUTH_REFRESH_TIMEOUT, rx).await {
Ok(result) => result.map_err(|err| {
std::io::Error::other(format!("auth refresh request canceled: {err}"))
})?,
Err(_) => {
let _canceled = self.outgoing.cancel_request(&request_id).await;
return Err(std::io::Error::other(format!(
"auth refresh request timed out after {}s",
EXTERNAL_AUTH_REFRESH_TIMEOUT.as_secs()
)));
}
};
let response: ChatgptAuthTokensRefreshResponse =
serde_json::from_value(result).map_err(std::io::Error::other)?;
Ok(ExternalAuthTokens {
access_token: response.access_token,
id_token: response.id_token,
})
}
}
pub(crate) struct MessageProcessor {
outgoing: Arc<OutgoingMessageSender>,
codex_message_processor: CodexMessageProcessor,
config_api: ConfigApi,
config: Arc<Config>,
initialized: bool,
config_warnings: Vec<ConfigWarningNotification>,
}
pub(crate) struct MessageProcessorArgs {
pub(crate) outgoing: OutgoingMessageSender,
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
pub(crate) config: Arc<Config>,
pub(crate) cli_overrides: Vec<(String, TomlValue)>,
pub(crate) loader_overrides: LoaderOverrides,
pub(crate) cloud_requirements: CloudRequirementsLoader,
pub(crate) feedback: CodexFeedback,
pub(crate) config_warnings: Vec<ConfigWarningNotification>,
}
impl MessageProcessor {
/// Create a new `MessageProcessor`, retaining a handle to the outgoing
/// `Sender` so handlers can enqueue messages to be written to stdout.
pub(crate) fn new(args: MessageProcessorArgs) -> Self {
let MessageProcessorArgs {
outgoing,
codex_linux_sandbox_exe,
config,
cli_overrides,
loader_overrides,
cloud_requirements,
feedback,
config_warnings,
} = args;
pub(crate) fn new(
outgoing: OutgoingMessageSender,
codex_linux_sandbox_exe: Option<PathBuf>,
config: Arc<Config>,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
feedback: CodexFeedback,
config_warnings: Vec<ConfigWarningNotification>,
) -> Self {
let outgoing = Arc::new(outgoing);
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
config.cli_auth_credentials_store_mode,
);
auth_manager.set_forced_chatgpt_workspace_id(config.forced_chatgpt_workspace_id.clone());
auth_manager.set_external_auth_refresher(Arc::new(ExternalAuthRefreshBridge {
outgoing: outgoing.clone(),
}));
let thread_manager = Arc::new(ThreadManager::new(
config.codex_home.clone(),
auth_manager.clone(),
SessionSource::VSCode,
));
let codex_message_processor = CodexMessageProcessor::new(CodexMessageProcessorArgs {
let codex_message_processor = CodexMessageProcessor::new(
auth_manager,
thread_manager,
outgoing: outgoing.clone(),
outgoing.clone(),
codex_linux_sandbox_exe,
config: Arc::clone(&config),
cli_overrides: cli_overrides.clone(),
cloud_requirements: cloud_requirements.clone(),
Arc::clone(&config),
cli_overrides.clone(),
feedback,
});
let config_api = ConfigApi::new(
config.codex_home.clone(),
cli_overrides,
loader_overrides,
cloud_requirements,
);
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides, loader_overrides);
Self {
outgoing,
codex_message_processor,
config_api,
config,
initialized: false,
config_warnings,
}
@@ -244,7 +151,6 @@ impl MessageProcessor {
}
}
}
set_default_client_residency_requirement(self.config.enforce_residency.value());
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);
@@ -330,9 +236,8 @@ impl MessageProcessor {
}
/// Handle an error object received from the peer.
pub(crate) async fn process_error(&mut self, err: JSONRPCError) {
pub(crate) fn process_error(&mut self, err: JSONRPCError) {
tracing::error!("<- error: {:?}", err);
self.outgoing.notify_client_error(err.id, err.error).await;
}
async fn handle_config_read(&self, request_id: RequestId, params: ConfigReadParams) {

View File

@@ -28,7 +28,6 @@ fn model_from_preset(preset: ModelPreset) -> Model {
preset.supported_reasoning_efforts,
),
default_reasoning_effort: preset.default_reasoning_effort,
supports_personality: preset.supports_personality,
is_default: preset.is_default,
}
}

View File

@@ -39,14 +39,6 @@ impl OutgoingMessageSender {
&self,
request: ServerRequestPayload,
) -> oneshot::Receiver<Result> {
let (_id, rx) = self.send_request_with_id(request).await;
rx
}
pub(crate) async fn send_request_with_id(
&self,
request: ServerRequestPayload,
) -> (RequestId, oneshot::Receiver<Result>) {
let id = RequestId::Integer(self.next_request_id.fetch_add(1, Ordering::Relaxed));
let outgoing_message_id = id.clone();
let (tx_approve, rx_approve) = oneshot::channel();
@@ -62,7 +54,7 @@ impl OutgoingMessageSender {
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove(&outgoing_message_id);
}
(outgoing_message_id, rx_approve)
rx_approve
}
pub(crate) async fn notify_client_response(&self, id: RequestId, result: Result) {
@@ -83,30 +75,6 @@ impl OutgoingMessageSender {
}
}
pub(crate) async fn notify_client_error(&self, id: RequestId, error: JSONRPCErrorError) {
let entry = {
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove_entry(&id)
};
match entry {
Some((id, _sender)) => {
warn!("client responded with error for {id:?}: {error:?}");
}
None => {
warn!("could not find callback for {id:?}");
}
}
}
pub(crate) async fn cancel_request(&self, id: &RequestId) -> bool {
let entry = {
let mut request_id_to_callback = self.request_id_to_callback.lock().await;
request_id_to_callback.remove_entry(id)
};
entry.is_some()
}
pub(crate) async fn send_response<T: Serialize>(&self, id: RequestId, response: T) {
match serde_json::to_value(response) {
Ok(result) => {
@@ -318,8 +286,6 @@ mod tests {
let notification = ServerNotification::ConfigWarning(ConfigWarningNotification {
summary: "Config error: using defaults".to_string(),
details: Some("error loading config: bad config".to_string()),
path: None,
range: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);

View File

@@ -6,7 +6,6 @@ use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use chrono::DateTime;
use chrono::Utc;
use codex_app_server_protocol::AuthMode;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::AuthDotJson;
use codex_core::auth::save_auth;
@@ -50,16 +49,6 @@ impl ChatGptAuthFixture {
self
}
pub fn chatgpt_user_id(mut self, chatgpt_user_id: impl Into<String>) -> Self {
self.claims.chatgpt_user_id = Some(chatgpt_user_id.into());
self
}
pub fn chatgpt_account_id(mut self, chatgpt_account_id: impl Into<String>) -> Self {
self.claims.chatgpt_account_id = Some(chatgpt_account_id.into());
self
}
pub fn email(mut self, email: impl Into<String>) -> Self {
self.claims.email = Some(email.into());
self
@@ -80,8 +69,6 @@ impl ChatGptAuthFixture {
pub struct ChatGptIdTokenClaims {
pub email: Option<String>,
pub plan_type: Option<String>,
pub chatgpt_user_id: Option<String>,
pub chatgpt_account_id: Option<String>,
}
impl ChatGptIdTokenClaims {
@@ -98,16 +85,6 @@ impl ChatGptIdTokenClaims {
self.plan_type = Some(plan_type.into());
self
}
pub fn chatgpt_user_id(mut self, chatgpt_user_id: impl Into<String>) -> Self {
self.chatgpt_user_id = Some(chatgpt_user_id.into());
self
}
pub fn chatgpt_account_id(mut self, chatgpt_account_id: impl Into<String>) -> Self {
self.chatgpt_account_id = Some(chatgpt_account_id.into());
self
}
}
pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
@@ -116,20 +93,10 @@ pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
if let Some(email) = &claims.email {
payload.insert("email".to_string(), json!(email));
}
let mut auth_payload = serde_json::Map::new();
if let Some(plan_type) = &claims.plan_type {
auth_payload.insert("chatgpt_plan_type".to_string(), json!(plan_type));
}
if let Some(chatgpt_user_id) = &claims.chatgpt_user_id {
auth_payload.insert("chatgpt_user_id".to_string(), json!(chatgpt_user_id));
}
if let Some(chatgpt_account_id) = &claims.chatgpt_account_id {
auth_payload.insert("chatgpt_account_id".to_string(), json!(chatgpt_account_id));
}
if !auth_payload.is_empty() {
payload.insert(
"https://api.openai.com/auth".to_string(),
serde_json::Value::Object(auth_payload),
json!({ "chatgpt_plan_type": plan_type }),
);
}
let payload = serde_json::Value::Object(payload);
@@ -159,7 +126,6 @@ pub fn write_chatgpt_auth(
let last_refresh = fixture.last_refresh.unwrap_or_else(|| Some(Utc::now()));
let auth = AuthDotJson {
auth_mode: Some(AuthMode::Chatgpt),
openai_api_key: None,
tokens: Some(tokens),
last_refresh,

View File

@@ -1,72 +0,0 @@
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use std::collections::BTreeMap;
use std::path::Path;
pub fn write_mock_responses_config_toml(
codex_home: &Path,
server_uri: &str,
feature_flags: &BTreeMap<Feature, bool>,
auto_compact_limit: i64,
requires_openai_auth: Option<bool>,
model_provider_id: &str,
compact_prompt: &str,
) -> std::io::Result<()> {
// Phase 1: build the features block for config.toml.
let mut features = BTreeMap::from([(Feature::RemoteModels, false)]);
for (feature, enabled) in feature_flags {
features.insert(*feature, *enabled);
}
let feature_entries = features
.into_iter()
.map(|(feature, enabled)| {
let key = FEATURES
.iter()
.find(|spec| spec.id == feature)
.map(|spec| spec.key)
.unwrap_or_else(|| panic!("missing feature key for {feature:?}"));
format!("{key} = {enabled}")
})
.collect::<Vec<_>>()
.join("\n");
// Phase 2: build provider-specific config bits.
let requires_line = match requires_openai_auth {
Some(true) => "requires_openai_auth = true\n".to_string(),
Some(false) | None => String::new(),
};
let provider_block = if model_provider_id == "openai" {
String::new()
} else {
format!(
r#"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
{requires_line}
"#
)
};
// Phase 3: write the final config file.
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
compact_prompt = "{compact_prompt}"
model_auto_compact_token_limit = {auto_compact_limit}
model_provider = "{model_provider_id}"
[features]
{feature_entries}
{provider_block}
"#
),
)
}

View File

@@ -1,5 +1,4 @@
mod auth_fixtures;
mod config;
mod mcp_process;
mod mock_model_server;
mod models_cache;
@@ -11,7 +10,6 @@ pub use auth_fixtures::ChatGptIdTokenClaims;
pub use auth_fixtures::encode_id_token;
pub use auth_fixtures::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCResponse;
pub use config::write_mock_responses_config_toml;
pub use core_test_support::format_with_current_shell;
pub use core_test_support::format_with_current_shell_display;
pub use core_test_support::format_with_current_shell_display_non_login;
@@ -29,12 +27,9 @@ pub use models_cache::write_models_cache_with_models;
pub use responses::create_apply_patch_sse_response;
pub use responses::create_exec_command_sse_response;
pub use responses::create_final_assistant_message_sse_response;
pub use responses::create_request_user_input_sse_response;
pub use responses::create_shell_command_sse_response;
pub use rollout::create_fake_rollout;
pub use rollout::create_fake_rollout_with_source;
pub use rollout::create_fake_rollout_with_text_elements;
pub use rollout::rollout_path;
use serde::de::DeserializeOwned;
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {

View File

@@ -12,13 +12,11 @@ use tokio::process::ChildStdout;
use anyhow::Context;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientNotification;
use codex_app_server_protocol::CollaborationModeListParams;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigValueWriteParams;
@@ -29,13 +27,11 @@ use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InterruptConversationParams;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::ListConversationsParams;
use codex_app_server_protocol::LoginAccountParams;
use codex_app_server_protocol::LoginApiKeyParams;
use codex_app_server_protocol::ModelListParams;
use codex_app_server_protocol::NewConversationParams;
@@ -51,14 +47,11 @@ use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadForkParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadLoadedListParams;
use codex_app_server_protocol::ThreadReadParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadRollbackParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadUnarchiveParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use codex_core::default_client::CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR;
use tokio::process::Command;
pub struct McpProcess {
@@ -98,7 +91,6 @@ impl McpProcess {
cmd.stderr(Stdio::piped());
cmd.env("CODEX_HOME", codex_home);
cmd.env("RUST_LOG", "debug");
cmd.env_remove(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR);
for (k, v) in env_overrides {
match v {
@@ -300,20 +292,6 @@ impl McpProcess {
self.send_request("account/read", params).await
}
/// Send an `account/login/start` JSON-RPC request with ChatGPT auth tokens.
pub async fn send_chatgpt_auth_tokens_login_request(
&mut self,
id_token: String,
access_token: String,
) -> anyhow::Result<i64> {
let params = LoginAccountParams::ChatgptAuthTokens {
id_token,
access_token,
};
let params = Some(serde_json::to_value(params)?);
self.send_request("account/login/start", params).await
}
/// Send a `feedback/upload` JSON-RPC request.
pub async fn send_feedback_upload_request(
&mut self,
@@ -382,15 +360,6 @@ impl McpProcess {
self.send_request("thread/archive", params).await
}
/// Send a `thread/unarchive` JSON-RPC request.
pub async fn send_thread_unarchive_request(
&mut self,
params: ThreadUnarchiveParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/unarchive", params).await
}
/// Send a `thread/rollback` JSON-RPC request.
pub async fn send_thread_rollback_request(
&mut self,
@@ -418,15 +387,6 @@ impl McpProcess {
self.send_request("thread/loaded/list", params).await
}
/// Send a `thread/read` JSON-RPC request.
pub async fn send_thread_read_request(
&mut self,
params: ThreadReadParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/read", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
@@ -436,21 +396,6 @@ impl McpProcess {
self.send_request("model/list", params).await
}
/// Send an `app/list` JSON-RPC request.
pub async fn send_apps_list_request(&mut self, params: AppsListParams) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("app/list", params).await
}
/// Send a `collaborationMode/list` JSON-RPC request.
pub async fn send_list_collaboration_modes_request(
&mut self,
params: CollaborationModeListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("collaborationMode/list", params).await
}
/// Send a `resumeConversation` JSON-RPC request.
pub async fn send_resume_conversation_request(
&mut self,
@@ -624,15 +569,6 @@ impl McpProcess {
.await
}
pub async fn send_error(
&mut self,
id: RequestId,
error: JSONRPCErrorError,
) -> anyhow::Result<()> {
self.send_jsonrpc_message(JSONRPCMessage::Error(JSONRPCError { id, error }))
.await
}
pub async fn send_notification(
&mut self,
notification: ClientNotification,
@@ -736,10 +672,6 @@ impl McpProcess {
Ok(notification)
}
pub async fn read_next_message(&mut self) -> anyhow::Result<JSONRPCMessage> {
self.read_stream_until_message(|_| true).await
}
/// Clears any buffered messages so future reads only consider new stream items.
///
/// We call this when e.g. we want to validate against the next turn and no longer care about

View File

@@ -27,7 +27,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
priority,
upgrade: preset.upgrade.as_ref().map(|u| u.into()),
base_instructions: "base instructions".to_string(),
model_messages: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,

View File

@@ -60,26 +60,3 @@ pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result<String>
responses::ev_completed("resp-1"),
]))
}
pub fn create_request_user_input_sse_response(call_id: &str) -> anyhow::Result<String> {
let tool_call_arguments = serde_json::to_string(&json!({
"questions": [{
"id": "confirm_path",
"header": "Confirm",
"question": "Proceed with the plan?",
"options": [{
"label": "Yes (Recommended)",
"description": "Continue the current plan."
}, {
"label": "No",
"description": "Stop and revisit the approach."
}]
}]
}))?;
Ok(responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, "request_user_input", &tool_call_arguments),
responses::ev_completed("resp-1"),
]))
}

View File

@@ -6,23 +6,10 @@ use codex_protocol::protocol::SessionMetaLine;
use codex_protocol::protocol::SessionSource;
use serde_json::json;
use std::fs;
use std::fs::FileTimes;
use std::path::Path;
use std::path::PathBuf;
use uuid::Uuid;
pub fn rollout_path(codex_home: &Path, filename_ts: &str, thread_id: &str) -> PathBuf {
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
codex_home
.join("sessions")
.join(year)
.join(month)
.join(day)
.join(format!("rollout-{filename_ts}-{thread_id}.jsonl"))
}
/// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`.
///
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
@@ -38,50 +25,30 @@ pub fn create_fake_rollout(
preview: &str,
model_provider: Option<&str>,
git_info: Option<GitInfo>,
) -> Result<String> {
create_fake_rollout_with_source(
codex_home,
filename_ts,
meta_rfc3339,
preview,
model_provider,
git_info,
SessionSource::Cli,
)
}
/// Create a minimal rollout file with an explicit session source.
pub fn create_fake_rollout_with_source(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
model_provider: Option<&str>,
git_info: Option<GitInfo>,
source: SessionSource,
) -> Result<String> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let conversation_id = ThreadId::from_string(&uuid_str)?;
let file_path = rollout_path(codex_home, filename_ts, &uuid_str);
let dir = file_path
.parent()
.ok_or_else(|| anyhow::anyhow!("missing rollout parent directory"))?;
fs::create_dir_all(dir)?;
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
// Build JSONL lines
let meta = SessionMeta {
id: conversation_id,
forked_from_id: None,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
source,
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
dynamic_tools: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,
@@ -117,13 +84,7 @@ pub fn create_fake_rollout_with_source(
.to_string(),
];
fs::write(&file_path, lines.join("\n") + "\n")?;
let parsed = chrono::DateTime::parse_from_rfc3339(meta_rfc3339)?.with_timezone(&chrono::Utc);
let times = FileTimes::new().set_modified(parsed.into());
std::fs::OpenOptions::new()
.append(true)
.open(&file_path)?
.set_times(times)?;
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(uuid_str)
}
@@ -152,15 +113,13 @@ pub fn create_fake_rollout_with_text_elements(
// Build JSONL lines
let meta = SessionMeta {
id: conversation_id,
forked_from_id: None,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
dynamic_tools: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,

View File

@@ -108,10 +108,6 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
let AddConversationSubscriptionResponse { subscription_id } =
to_response::<AddConversationSubscriptionResponse>(add_listener_resp)?;
// Drop any buffered events from conversation setup to avoid
// matching an earlier task_complete.
mcp.clear_message_buffer();
// 3) sendUserMessage (should trigger notifications; we only validate an OK response)
let send_user_id = mcp
.send_send_user_message_request(SendUserMessageParams {
@@ -129,38 +125,13 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
.await??;
let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)?;
let task_started_notification: JSONRPCNotification = timeout(
// Verify the task_finished notification is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_started"),
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let task_started_event: Event = serde_json::from_value(
task_started_notification
.params
.clone()
.expect("task_started should have params"),
)
.expect("task_started should deserialize to Event");
// Verify the task_finished notification for this turn is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let event: Event = serde_json::from_value(
notification
.params
.clone()
.expect("task_complete should have params"),
)
.expect("task_complete should deserialize to Event");
if event.id == task_started_event.id {
break notification;
}
};
let serde_json::Value::Object(map) = task_finished_notification
.params
.expect("notification should have params")

View File

@@ -48,7 +48,8 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
.await??;
let value = resp.result;
let expected_score = 72;
// The path separator on Windows affects the score.
let expected_score = if cfg!(windows) { 69 } else { 72 };
assert_eq!(
value,
@@ -58,9 +59,16 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
"root": root_path.clone(),
"path": "abexy",
"file_name": "abexy",
"score": 84,
"score": 88,
"indices": [0, 1, 2],
},
{
"root": root_path.clone(),
"path": "abcde",
"file_name": "abcde",
"score": 74,
"indices": [0, 1, 4],
},
{
"root": root_path.clone(),
"path": sub_abce_rel,
@@ -68,13 +76,6 @@ async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> {
"score": expected_score,
"indices": [4, 5, 7],
},
{
"root": root_path.clone(),
"path": "abcde",
"file_name": "abcde",
"score": 71,
"indices": [0, 1, 4],
},
]
})
);

View File

@@ -307,7 +307,6 @@ async fn test_list_and_resume_conversations() -> Result<()> {
content: vec![ContentItem::InputText {
text: fork_history_text.to_string(),
}],
end_turn: None,
}];
let resume_with_history_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {

View File

@@ -11,7 +11,6 @@ use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_execpolicy::Policy;
use codex_protocol::ThreadId;
use codex_protocol::models::ContentItem;
use codex_protocol::models::DeveloperInstructions;
@@ -359,8 +358,6 @@ fn assert_permissions_message(item: &ResponseItem) {
let expected = DeveloperInstructions::from_policy(
&SandboxPolicy::DangerFullAccess,
AskForApproval::Never,
&Policy::empty(),
false,
&PathBuf::from("/tmp"),
)
.into_text();

View File

@@ -4,43 +4,28 @@ use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::ChatGptAuthFixture;
use app_test_support::ChatGptIdTokenClaims;
use app_test_support::encode_id_token;
use app_test_support::write_chatgpt_auth;
use app_test_support::write_models_cache;
use codex_app_server_protocol::Account;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginAccountResponse;
use codex_app_server_protocol::CancelLoginAccountStatus;
use codex_app_server_protocol::ChatgptAuthTokensRefreshReason;
use codex_app_server_protocol::ChatgptAuthTokensRefreshResponse;
use codex_app_server_protocol::GetAccountParams;
use codex_app_server_protocol::GetAccountResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginAccountResponse;
use codex_app_server_protocol::LogoutAccountResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use codex_protocol::account::PlanType as AccountPlanType;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use serde_json::json;
use serial_test::serial;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -50,14 +35,10 @@ struct CreateConfigTomlParams {
forced_method: Option<String>,
forced_workspace_id: Option<String>,
requires_openai_auth: Option<bool>,
base_url: Option<String>,
}
fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let base_url = params
.base_url
.unwrap_or_else(|| "http://127.0.0.1:0/v1".to_string());
let forced_line = if let Some(method) = params.forced_method {
format!("forced_login_method = \"{method}\"\n")
} else {
@@ -85,7 +66,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{base_url}"
base_url = "http://127.0.0.1:0/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
@@ -152,627 +133,6 @@ async fn logout_account_removes_auth_and_notifies() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn set_auth_token_updates_account_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("embedded@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded"),
)?;
let access_token = "access-embedded".to_string();
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(id_token.clone(), access_token)
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
assert_eq!(payload.auth_mode, Some(AuthMode::ChatgptAuthTokens));
let get_id = mcp
.send_get_account_request(GetAccountParams {
refresh_token: false,
})
.await?;
let get_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(get_id)),
)
.await??;
let account: GetAccountResponse = to_response(get_resp)?;
assert_eq!(
account,
GetAccountResponse {
account: Some(Account::Chatgpt {
email: "embedded@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
requires_openai_auth: true,
}
);
Ok(())
}
#[tokio::test]
async fn account_read_refresh_token_is_noop_in_external_mode() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("embedded@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(id_token, "access-embedded".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let get_id = mcp
.send_get_account_request(GetAccountParams {
refresh_token: true,
})
.await?;
let get_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(get_id)),
)
.await??;
let account: GetAccountResponse = to_response(get_resp)?;
assert_eq!(
account,
GetAccountResponse {
account: Some(Account::Chatgpt {
email: "embedded@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
requires_openai_auth: true,
}
);
let refresh_request = timeout(
Duration::from_millis(250),
mcp.read_stream_until_request_message(),
)
.await;
assert!(
refresh_request.is_err(),
"external mode should not emit account/chatgptAuthTokens/refresh for refreshToken=true"
);
Ok(())
}
async fn respond_to_refresh_request(
mcp: &mut McpProcess,
access_token: &str,
id_token: &str,
) -> Result<()> {
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, params } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
assert_eq!(params.reason, ChatgptAuthTokensRefreshReason::Unauthorized);
let response = ChatgptAuthTokensRefreshResponse {
access_token: access_token.to_string(),
id_token: id_token.to_string(),
};
mcp.send_response(request_id, serde_json::to_value(response)?)
.await?;
Ok(())
}
#[tokio::test]
// 401 response triggers account/chatgptAuthTokens/refresh and retries with new tokens.
async fn external_auth_refreshes_on_unauthorized() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let success_sse = responses::sse(vec![
responses::ev_response_created("resp-turn"),
responses::ev_assistant_message("msg-turn", "turn ok"),
responses::ev_completed("resp-turn"),
]);
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let responses_mock = responses::mount_response_sequence(
&mock_server,
vec![unauthorized, responses::sse_response(success_sse)],
)
.await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-initial"),
)?;
let refreshed_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("refreshed@example.com")
.plan_type("pro")
.chatgpt_account_id("org-refreshed"),
)?;
let initial_access_token = "access-initial".to_string();
let refreshed_access_token = "access-refreshed".to_string();
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(
initial_id_token.clone(),
initial_access_token.clone(),
)
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id,
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
respond_to_refresh_request(&mut mcp, &refreshed_access_token, &refreshed_id_token).await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn_completed = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let requests = responses_mock.requests();
assert_eq!(requests.len(), 2);
assert_eq!(
requests[0].header("authorization"),
Some(format!("Bearer {initial_access_token}"))
);
assert_eq!(
requests[1].header("authorization"),
Some(format!("Bearer {refreshed_access_token}"))
);
Ok(())
}
#[tokio::test]
// Client returns JSON-RPC error to refresh; turn fails.
async fn external_auth_refresh_error_fails_turn() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let _responses_mock =
responses::mount_response_sequence(&mock_server, vec![unauthorized]).await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-initial"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(initial_id_token, "access-initial".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id.clone(),
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, .. } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
mcp.send_error(
request_id,
JSONRPCErrorError {
code: -32_000,
message: "refresh failed".to_string(),
data: None,
},
)
.await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let completed_notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification = serde_json::from_value(
completed_notif
.params
.expect("turn/completed params must be present"),
)?;
assert_eq!(completed.turn.status, TurnStatus::Failed);
assert!(completed.turn.error.is_some());
Ok(())
}
#[tokio::test]
// Refresh returns tokens for the wrong workspace; turn fails.
async fn external_auth_refresh_mismatched_workspace_fails_turn() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
forced_workspace_id: Some("org-expected".to_string()),
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let _responses_mock =
responses::mount_response_sequence(&mock_server, vec![unauthorized]).await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-expected"),
)?;
let refreshed_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("refreshed@example.com")
.plan_type("pro")
.chatgpt_account_id("org-other"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(initial_id_token, "access-initial".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id.clone(),
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, .. } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
mcp.send_response(
request_id,
serde_json::to_value(ChatgptAuthTokensRefreshResponse {
access_token: "access-refreshed".to_string(),
id_token: refreshed_id_token,
})?,
)
.await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let completed_notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification = serde_json::from_value(
completed_notif
.params
.expect("turn/completed params must be present"),
)?;
assert_eq!(completed.turn.status, TurnStatus::Failed);
assert!(completed.turn.error.is_some());
Ok(())
}
#[tokio::test]
// Refresh returns a malformed id_token; turn fails.
async fn external_auth_refresh_invalid_id_token_fails_turn() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
let unauthorized = ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "unauthorized" }
}));
let _responses_mock =
responses::mount_response_sequence(&mock_server, vec![unauthorized]).await;
let initial_id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("initial@example.com")
.plan_type("pro")
.chatgpt_account_id("org-initial"),
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(initial_id_token, "access-initial".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let thread_req = mcp
.send_thread_start_request(codex_app_server_protocol::ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<codex_app_server_protocol::ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(codex_app_server_protocol::TurnStartParams {
thread_id: thread.thread.id.clone(),
input: vec![codex_app_server_protocol::UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let refresh_req: ServerRequest = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ChatgptAuthTokensRefresh { request_id, .. } = refresh_req else {
bail!("expected account/chatgptAuthTokens/refresh request, got {refresh_req:?}");
};
mcp.send_response(
request_id,
serde_json::to_value(ChatgptAuthTokensRefreshResponse {
access_token: "access-refreshed".to_string(),
id_token: "not-a-jwt".to_string(),
})?,
)
.await?;
let _turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let completed_notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification = serde_json::from_value(
completed_notif
.params
.expect("turn/completed params must be present"),
)?;
assert_eq!(completed.turn.status, TurnStatus::Failed);
assert!(completed.turn.error.is_some());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -944,71 +304,6 @@ async fn login_account_chatgpt_start_can_be_cancelled() -> Result<()> {
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]
async fn set_auth_token_cancels_active_chatgpt_login() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Initiate the ChatGPT login flow
let request_id = mcp.send_login_account_chatgpt_request().await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
let LoginAccountResponse::Chatgpt { login_id, .. } = login else {
bail!("unexpected login response: {login:?}");
};
let id_token = encode_id_token(
&ChatGptIdTokenClaims::new()
.email("embedded@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded"),
)?;
// Set an external auth token instead of completing the ChatGPT login flow.
// This should cancel the active login attempt.
let set_id = mcp
.send_chatgpt_auth_tokens_login_request(id_token, "access-embedded".to_string())
.await?;
let set_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(set_id)),
)
.await??;
let response: LoginAccountResponse = to_response(set_resp)?;
assert_eq!(response, LoginAccountResponse::ChatgptAuthTokens {});
let _updated = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
// Verify that the active login attempt was cancelled.
// We check this by trying to cancel it and expecting a not found error.
let cancel_id = mcp
.send_cancel_login_account_request(CancelLoginAccountParams {
login_id: login_id.clone(),
})
.await?;
let cancel_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)),
)
.await??;
let cancel: CancelLoginAccountResponse = to_response(cancel_resp)?;
assert_eq!(cancel.status, CancelLoginAccountStatus::NotFound);
Ok(())
}
#[tokio::test]
// Serialize tests that launch the login server since it binds to a fixed port.
#[serial(login_port)]

View File

@@ -1,400 +0,0 @@
use std::borrow::Cow;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use axum::Json;
use axum::Router;
use axum::extract::State;
use axum::http::HeaderMap;
use axum::http::StatusCode;
use axum::http::header::AUTHORIZATION;
use axum::routing::get;
use codex_app_server_protocol::AppInfo;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::AppsListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use pretty_assertions::assert_eq;
use rmcp::handler::server::ServerHandler;
use rmcp::model::JsonObject;
use rmcp::model::ListToolsResult;
use rmcp::model::Meta;
use rmcp::model::ServerCapabilities;
use rmcp::model::ServerInfo;
use rmcp::model::Tool;
use rmcp::model::ToolAnnotations;
use rmcp::transport::StreamableHttpServerConfig;
use rmcp::transport::StreamableHttpService;
use rmcp::transport::streamable_http_server::session::local::LocalSessionManager;
use serde_json::json;
use tempfile::TempDir;
use tokio::net::TcpListener;
use tokio::task::JoinHandle;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
#[tokio::test]
async fn list_apps_returns_empty_when_connectors_disabled() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_apps_list_request(AppsListParams {
limit: Some(50),
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let AppsListResponse { data, next_cursor } = to_response(response)?;
assert!(data.is_empty());
assert!(next_cursor.is_none());
Ok(())
}
#[tokio::test]
async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
let connectors = vec![
AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
AppInfo {
id: "beta".to_string(),
name: "beta".to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
];
let tools = vec![connector_tool("beta", "Beta App")?];
let (server_url, server_handle) = start_apps_server(connectors.clone(), tools).await?;
let codex_home = TempDir::new()?;
write_connectors_config(codex_home.path(), &server_url)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.chatgpt_user_id("user-123")
.chatgpt_account_id("account-123"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_apps_list_request(AppsListParams {
limit: None,
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let AppsListResponse { data, next_cursor } = to_response(response)?;
let expected = vec![
AppInfo {
id: "beta".to_string(),
name: "Beta App".to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
},
AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
},
];
assert_eq!(data, expected);
assert!(next_cursor.is_none());
server_handle.abort();
Ok(())
}
#[tokio::test]
async fn list_apps_paginates_results() -> Result<()> {
let connectors = vec![
AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
AppInfo {
id: "beta".to_string(),
name: "beta".to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
},
];
let tools = vec![connector_tool("beta", "Beta App")?];
let (server_url, server_handle) = start_apps_server(connectors.clone(), tools).await?;
let codex_home = TempDir::new()?;
write_connectors_config(codex_home.path(), &server_url)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.chatgpt_user_id("user-123")
.chatgpt_account_id("account-123"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_apps_list_request(AppsListParams {
limit: Some(1),
cursor: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let AppsListResponse {
data: first_page,
next_cursor: first_cursor,
} = to_response(first_response)?;
let expected_first = vec![AppInfo {
id: "beta".to_string(),
name: "Beta App".to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
}];
assert_eq!(first_page, expected_first);
let next_cursor = first_cursor.ok_or_else(|| anyhow::anyhow!("missing cursor"))?;
let second_request = mcp
.send_apps_list_request(AppsListParams {
limit: Some(1),
cursor: Some(next_cursor),
})
.await?;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let AppsListResponse {
data: second_page,
next_cursor: second_cursor,
} = to_response(second_response)?;
let expected_second = vec![AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
}];
assert_eq!(second_page, expected_second);
assert!(second_cursor.is_none());
server_handle.abort();
Ok(())
}
#[derive(Clone)]
struct AppsServerState {
expected_bearer: String,
expected_account_id: String,
response: serde_json::Value,
}
#[derive(Clone)]
struct AppListMcpServer {
tools: Arc<Vec<Tool>>,
}
impl AppListMcpServer {
fn new(tools: Arc<Vec<Tool>>) -> Self {
Self { tools }
}
}
impl ServerHandler for AppListMcpServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
capabilities: ServerCapabilities::builder().enable_tools().build(),
..ServerInfo::default()
}
}
fn list_tools(
&self,
_request: Option<rmcp::model::PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListToolsResult, rmcp::ErrorData>> + Send + '_
{
let tools = self.tools.clone();
async move {
Ok(ListToolsResult {
tools: (*tools).clone(),
next_cursor: None,
meta: None,
})
}
}
}
async fn start_apps_server(
connectors: Vec<AppInfo>,
tools: Vec<Tool>,
) -> Result<(String, JoinHandle<()>)> {
let state = AppsServerState {
expected_bearer: "Bearer chatgpt-token".to_string(),
expected_account_id: "account-123".to_string(),
response: json!({ "apps": connectors, "next_token": null }),
};
let state = Arc::new(state);
let tools = Arc::new(tools);
let listener = TcpListener::bind("127.0.0.1:0").await?;
let addr = listener.local_addr()?;
let mcp_service = StreamableHttpService::new(
{
let tools = tools.clone();
move || Ok(AppListMcpServer::new(tools.clone()))
},
Arc::new(LocalSessionManager::default()),
StreamableHttpServerConfig::default(),
);
let router = Router::new()
.route("/connectors/directory/list", get(list_directory_connectors))
.route(
"/connectors/directory/list_workspace",
get(list_directory_connectors),
)
.with_state(state)
.nest_service("/api/codex/apps", mcp_service);
let handle = tokio::spawn(async move {
let _ = axum::serve(listener, router).await;
});
Ok((format!("http://{addr}"), handle))
}
async fn list_directory_connectors(
State(state): State<Arc<AppsServerState>>,
headers: HeaderMap,
) -> Result<impl axum::response::IntoResponse, StatusCode> {
let bearer_ok = headers
.get(AUTHORIZATION)
.and_then(|value| value.to_str().ok())
.is_some_and(|value| value == state.expected_bearer);
let account_ok = headers
.get("chatgpt-account-id")
.and_then(|value| value.to_str().ok())
.is_some_and(|value| value == state.expected_account_id);
if bearer_ok && account_ok {
Ok(Json(state.response.clone()))
} else {
Err(StatusCode::UNAUTHORIZED)
}
}
fn connector_tool(connector_id: &str, connector_name: &str) -> Result<Tool> {
let schema: JsonObject = serde_json::from_value(json!({
"type": "object",
"additionalProperties": false
}))?;
let mut tool = Tool::new(
Cow::Owned(format!("connector_{connector_id}")),
Cow::Borrowed("Connector test tool"),
Arc::new(schema),
);
tool.annotations = Some(ToolAnnotations::new().read_only(true));
let mut meta = Meta::new();
meta.0
.insert("connector_id".to_string(), json!(connector_id));
meta.0
.insert("connector_name".to_string(), json!(connector_name));
tool.meta = Some(meta);
Ok(tool)
}
fn write_connectors_config(codex_home: &std::path::Path, base_url: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
chatgpt_base_url = "{base_url}"
[features]
connectors = true
"#
),
)
}

View File

@@ -1,111 +0,0 @@
//! Validates that the collaboration mode list endpoint returns the expected default presets.
//!
//! The test drives the app server through the MCP harness and asserts that the list response
//! includes the plan, coding, pair programming, and execute modes with their default model and reasoning
//! effort settings, which keeps the API contract visible in one place.
#![allow(clippy::unwrap_used)]
use std::time::Duration;
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::CollaborationModeListParams;
use codex_app_server_protocol::CollaborationModeListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::models_manager::test_builtin_collaboration_mode_presets;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ModeKind;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
/// Confirms the server returns the default collaboration mode presets in a stable order.
#[tokio::test]
async fn list_collaboration_modes_returns_presets() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_collaboration_modes_request(CollaborationModeListParams {})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let CollaborationModeListResponse { data: items } =
to_response::<CollaborationModeListResponse>(response)?;
let expected = [
plan_preset(),
code_preset(),
pair_programming_preset(),
execute_preset(),
];
assert_eq!(expected.len(), items.len());
for (expected_mask, actual_mask) in expected.iter().zip(items.iter()) {
assert_eq!(expected_mask.name, actual_mask.name);
assert_eq!(expected_mask.mode, actual_mask.mode);
assert_eq!(expected_mask.model, actual_mask.model);
assert_eq!(expected_mask.reasoning_effort, actual_mask.reasoning_effort);
assert_eq!(
expected_mask.developer_instructions,
actual_mask.developer_instructions
);
}
Ok(())
}
/// Builds the plan preset that the list response is expected to return.
///
/// If the defaults change in the app server, this helper should be updated alongside the
/// contract, or the test will fail in ways that imply a regression in the API.
fn plan_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Plan))
.unwrap()
}
/// Builds the pair programming preset that the list response is expected to return.
///
/// The helper keeps the expected model and reasoning defaults co-located with the test
/// so that mismatches point directly at the API contract being exercised.
fn pair_programming_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::PairProgramming))
.unwrap()
}
/// Builds the code preset that the list response is expected to return.
fn code_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Code))
.unwrap()
}
/// Builds the execute preset that the list response is expected to return.
///
/// The execute preset uses a different reasoning effort to capture the higher-effort
/// execution contract the server currently exposes.
fn execute_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Execute))
.unwrap()
}

View File

@@ -1,282 +0,0 @@
//! End-to-end compaction flow tests.
//!
//! Phases:
//! 1) Arrange: mock responses/compact endpoints + config.
//! 2) Act: start a thread and submit multiple turns to trigger auto-compaction.
//! 3) Assert: verify item/started + item/completed notifications for context compaction.
#![expect(clippy::expect_used)]
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use app_test_support::write_mock_responses_config_toml;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::features::Feature;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const AUTO_COMPACT_LIMIT: i64 = 1_000;
const COMPACT_PROMPT: &str = "Summarize the conversation.";
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compaction_local_emits_started_and_completed_items() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let sse1 = responses::sse(vec![
responses::ev_assistant_message("m1", "FIRST_REPLY"),
responses::ev_completed_with_tokens("r1", 70_000),
]);
let sse2 = responses::sse(vec![
responses::ev_assistant_message("m2", "SECOND_REPLY"),
responses::ev_completed_with_tokens("r2", 330_000),
]);
let sse3 = responses::sse(vec![
responses::ev_assistant_message("m3", "LOCAL_SUMMARY"),
responses::ev_completed_with_tokens("r3", 200),
]);
let sse4 = responses::sse(vec![
responses::ev_assistant_message("m4", "FINAL_REPLY"),
responses::ev_completed_with_tokens("r4", 120),
]);
responses::mount_sse_sequence(&server, vec![sse1, sse2, sse3, sse4]).await;
let codex_home = TempDir::new()?;
write_mock_responses_config_toml(
codex_home.path(),
&server.uri(),
&BTreeMap::default(),
AUTO_COMPACT_LIMIT,
None,
"mock_provider",
COMPACT_PROMPT,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_thread(&mut mcp).await?;
for message in ["first", "second", "third"] {
send_turn_and_wait(&mut mcp, &thread_id, message).await?;
}
let started = wait_for_context_compaction_started(&mut mcp).await?;
let completed = wait_for_context_compaction_completed(&mut mcp).await?;
let ThreadItem::ContextCompaction { id: started_id } = started.item else {
unreachable!("started item should be context compaction");
};
let ThreadItem::ContextCompaction { id: completed_id } = completed.item else {
unreachable!("completed item should be context compaction");
};
assert_eq!(started.thread_id, thread_id);
assert_eq!(completed.thread_id, thread_id);
assert_eq!(started_id, completed_id);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compaction_remote_emits_started_and_completed_items() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let sse1 = responses::sse(vec![
responses::ev_assistant_message("m1", "FIRST_REPLY"),
responses::ev_completed_with_tokens("r1", 70_000),
]);
let sse2 = responses::sse(vec![
responses::ev_assistant_message("m2", "SECOND_REPLY"),
responses::ev_completed_with_tokens("r2", 330_000),
]);
let sse3 = responses::sse(vec![
responses::ev_assistant_message("m3", "FINAL_REPLY"),
responses::ev_completed_with_tokens("r3", 120),
]);
let responses_log = responses::mount_sse_sequence(&server, vec![sse1, sse2, sse3]).await;
let compacted_history = vec![
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: "REMOTE_COMPACT_SUMMARY".to_string(),
}],
end_turn: None,
},
ResponseItem::Compaction {
encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(),
},
];
let compact_mock = responses::mount_compact_json_once(
&server,
serde_json::json!({ "output": compacted_history }),
)
.await;
let codex_home = TempDir::new()?;
let mut features = BTreeMap::default();
features.insert(Feature::RemoteCompaction, true);
write_mock_responses_config_toml(
codex_home.path(),
&server.uri(),
&features,
AUTO_COMPACT_LIMIT,
Some(true),
"openai",
COMPACT_PROMPT,
)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt").plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
let server_base_url = format!("{}/v1", server.uri());
let mut mcp = McpProcess::new_with_env(
codex_home.path(),
&[
("OPENAI_BASE_URL", Some(server_base_url.as_str())),
("OPENAI_API_KEY", None),
],
)
.await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_thread(&mut mcp).await?;
for message in ["first", "second", "third"] {
send_turn_and_wait(&mut mcp, &thread_id, message).await?;
}
let started = wait_for_context_compaction_started(&mut mcp).await?;
let completed = wait_for_context_compaction_completed(&mut mcp).await?;
let ThreadItem::ContextCompaction { id: started_id } = started.item else {
unreachable!("started item should be context compaction");
};
let ThreadItem::ContextCompaction { id: completed_id } = completed.item else {
unreachable!("completed item should be context compaction");
};
assert_eq!(started.thread_id, thread_id);
assert_eq!(completed.thread_id, thread_id);
assert_eq!(started_id, completed_id);
let compact_requests = compact_mock.requests();
assert_eq!(compact_requests.len(), 1);
assert_eq!(compact_requests[0].path(), "/v1/responses/compact");
let response_requests = responses_log.requests();
assert_eq!(response_requests.len(), 3);
Ok(())
}
async fn start_thread(mcp: &mut McpProcess) -> Result<String> {
let thread_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
Ok(thread.id)
}
async fn send_turn_and_wait(mcp: &mut McpProcess, thread_id: &str, text: &str) -> Result<String> {
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread_id.to_string(),
input: vec![V2UserInput::Text {
text: text.to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
wait_for_turn_completed(mcp, &turn.id).await?;
Ok(turn.id)
}
async fn wait_for_turn_completed(mcp: &mut McpProcess, turn_id: &str) -> Result<()> {
loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let completed: TurnCompletedNotification =
serde_json::from_value(notification.params.clone().expect("turn/completed params"))?;
if completed.turn.id == turn_id {
return Ok(());
}
}
}
async fn wait_for_context_compaction_started(
mcp: &mut McpProcess,
) -> Result<ItemStartedNotification> {
loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/started"),
)
.await??;
let started: ItemStartedNotification =
serde_json::from_value(notification.params.clone().expect("item/started params"))?;
if let ThreadItem::ContextCompaction { .. } = started.item {
return Ok(started);
}
}
}
async fn wait_for_context_compaction_completed(
mcp: &mut McpProcess,
) -> Result<ItemCompletedNotification> {
loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("item/completed"),
)
.await??;
let completed: ItemCompletedNotification =
serde_json::from_value(notification.params.clone().expect("item/completed params"))?;
if let ThreadItem::ContextCompaction { .. } = completed.item {
return Ok(completed);
}
}
}

View File

@@ -18,10 +18,7 @@ use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxMode;
use codex_app_server_protocol::ToolsV2;
use codex_app_server_protocol::WriteStatus;
use codex_core::config::set_project_trust_level;
use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use codex_utils_absolute_path::AbsolutePathBuf;
use pretty_assertions::assert_eq;
use serde_json::json;
@@ -56,7 +53,6 @@ sandbox_mode = "workspace-write"
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -105,7 +101,6 @@ view_image = false
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -146,52 +141,6 @@ view_image = false
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_project_layers_for_cwd() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(&codex_home, r#"model = "gpt-user""#)?;
let workspace = TempDir::new()?;
let project_config_dir = workspace.path().join(".codex");
std::fs::create_dir_all(&project_config_dir)?;
std::fs::write(
project_config_dir.join("config.toml"),
r#"
model_reasoning_effort = "high"
"#,
)?;
set_project_trust_level(codex_home.path(), workspace.path(), TrustLevel::Trusted)?;
let project_config = AbsolutePathBuf::try_from(project_config_dir)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: Some(workspace.path().to_string_lossy().into_owned()),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ConfigReadResponse {
config, origins, ..
} = to_response(resp)?;
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::High));
assert_eq!(
origins.get("model_reasoning_effort").expect("origin").name,
ConfigLayerSource::Project {
dot_codex_folder: project_config
}
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -246,7 +195,6 @@ writable_roots = [{}]
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -333,7 +281,6 @@ model = "gpt-old"
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
@@ -368,7 +315,6 @@ model = "gpt-old"
let verify_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let verify_resp: JSONRPCResponse = timeout(
@@ -465,7 +411,6 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let read_resp: JSONRPCResponse = timeout(

View File

@@ -1,286 +0,0 @@
use anyhow::Context;
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::DynamicToolCallParams;
use codex_app_server_protocol::DynamicToolCallResponse;
use codex_app_server_protocol::DynamicToolSpec;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::MockServer;
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10);
/// Ensures dynamic tool specs are serialized into the model request payload.
#[tokio::test]
async fn thread_start_injects_dynamic_tools_into_model_requests() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Use a minimal JSON schema so we can assert the tool payload round-trips.
let input_schema = json!({
"type": "object",
"properties": {
"city": { "type": "string" }
},
"required": ["city"],
"additionalProperties": false,
});
let dynamic_tool = DynamicToolSpec {
name: "demo_tool".to_string(),
description: "Demo dynamic tool".to_string(),
input_schema: input_schema.clone(),
};
// Thread start injects dynamic tools into the thread's tool registry.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
dynamic_tools: Some(vec![dynamic_tool.clone()]),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn so a model request is issued.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
// Inspect the captured model request to assert the tool spec made it through.
let bodies = responses_bodies(&server).await?;
let body = bodies
.first()
.context("expected at least one responses request")?;
let tool = find_tool(body, &dynamic_tool.name)
.context("expected dynamic tool to be injected into request")?;
assert_eq!(
tool.get("description"),
Some(&Value::String(dynamic_tool.description.clone()))
);
assert_eq!(tool.get("parameters"), Some(&input_schema));
Ok(())
}
/// Exercises the full dynamic tool call path (server request, client response, model output).
#[tokio::test]
async fn dynamic_tool_call_round_trip_sends_output_to_model() -> Result<()> {
let call_id = "dyn-call-1";
let tool_name = "demo_tool";
let tool_args = json!({ "city": "Paris" });
let tool_call_arguments = serde_json::to_string(&tool_args)?;
// First response triggers a dynamic tool call, second closes the turn.
let responses = vec![
responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, tool_name, &tool_call_arguments),
responses::ev_completed("resp-1"),
]),
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let dynamic_tool = DynamicToolSpec {
name: tool_name.to_string(),
description: "Demo dynamic tool".to_string(),
input_schema: json!({
"type": "object",
"properties": {
"city": { "type": "string" }
},
"required": ["city"],
"additionalProperties": false,
}),
};
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
dynamic_tools: Some(vec![dynamic_tool]),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn so the tool call is emitted.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Run the tool".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
// Read the tool call request from the app server.
let request = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let (request_id, params) = match request {
ServerRequest::DynamicToolCall { request_id, params } => (request_id, params),
other => panic!("expected DynamicToolCall request, got {other:?}"),
};
let expected = DynamicToolCallParams {
thread_id: thread.id,
turn_id: turn.id,
call_id: call_id.to_string(),
tool: tool_name.to_string(),
arguments: tool_args.clone(),
};
assert_eq!(params, expected);
// Respond to the tool call so the model receives a function_call_output.
let response = DynamicToolCallResponse {
output: "dynamic-ok".to_string(),
success: true,
};
mcp.send_response(request_id, serde_json::to_value(response)?)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let bodies = responses_bodies(&server).await?;
let output = bodies
.iter()
.find_map(|body| function_call_output_text(body, call_id))
.context("expected function_call_output in follow-up request")?;
assert_eq!(output, "dynamic-ok");
Ok(())
}
async fn responses_bodies(server: &MockServer) -> Result<Vec<Value>> {
let requests = server
.received_requests()
.await
.context("failed to fetch received requests")?;
requests
.into_iter()
.filter(|req| req.url.path().ends_with("/responses"))
.map(|req| {
req.body_json::<Value>()
.context("request body should be JSON")
})
.collect()
}
fn find_tool<'a>(body: &'a Value, name: &str) -> Option<&'a Value> {
body.get("tools")
.and_then(Value::as_array)
.and_then(|tools| {
tools
.iter()
.find(|tool| tool.get("name").and_then(Value::as_str) == Some(name))
})
}
fn function_call_output_text(body: &Value, call_id: &str) -> Option<String> {
body.get("input")
.and_then(Value::as_array)
.and_then(|items| {
items.iter().find(|item| {
item.get("type").and_then(Value::as_str) == Some("function_call_output")
&& item.get("call_id").and_then(Value::as_str) == Some(call_id)
})
})
.and_then(|item| item.get("output"))
.and_then(Value::as_str)
.map(str::to_string)
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,25 +1,17 @@
mod account;
mod analytics;
mod app_list;
mod collaboration_mode_list;
mod compaction;
mod config_rpc;
mod dynamic_tools;
mod initialize;
mod model_list;
mod output_schema;
mod plan_item;
mod rate_limits;
mod request_user_input;
mod review;
mod thread_archive;
mod thread_fork;
mod thread_list;
mod thread_loaded_list;
mod thread_read;
mod thread_resume;
mod thread_rollback;
mod thread_start;
mod thread_unarchive;
mod turn_interrupt;
mod turn_start;

View File

@@ -72,7 +72,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: true,
},
Model {
@@ -100,7 +99,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@@ -120,7 +118,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@@ -154,7 +151,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
];

View File

@@ -1,257 +0,0 @@
use anyhow::Result;
use anyhow::anyhow;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::to_response;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::PlanDeltaNotification;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Settings;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn plan_mode_uses_proposed_plan_block_for_plan_item() -> Result<()> {
skip_if_no_network!(Ok(()));
let plan_block = "<proposed_plan>\n# Final plan\n- first\n- second\n</proposed_plan>\n";
let full_message = format!("Preface\n{plan_block}Postscript");
let responses = vec![responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_message_item_added("msg-1", ""),
responses::ev_output_text_delta(&full_message),
responses::ev_assistant_message("msg-1", &full_message),
responses::ev_completed("resp-1"),
])];
let server = create_mock_responses_server_sequence(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let turn = start_plan_mode_turn(&mut mcp).await?;
let (_, completed_items, plan_deltas, turn_completed) =
collect_turn_notifications(&mut mcp).await?;
assert_eq!(turn_completed.turn.id, turn.id);
assert_eq!(turn_completed.turn.status, TurnStatus::Completed);
let expected_plan = ThreadItem::Plan {
id: format!("{}-plan", turn.id),
text: "# Final plan\n- first\n- second\n".to_string(),
};
let expected_plan_id = format!("{}-plan", turn.id);
let streamed_plan = plan_deltas
.iter()
.map(|delta| delta.delta.as_str())
.collect::<String>();
assert_eq!(streamed_plan, "# Final plan\n- first\n- second\n");
assert!(
plan_deltas
.iter()
.all(|delta| delta.item_id == expected_plan_id)
);
let plan_items = completed_items
.iter()
.filter_map(|item| match item {
ThreadItem::Plan { .. } => Some(item.clone()),
_ => None,
})
.collect::<Vec<_>>();
assert_eq!(plan_items, vec![expected_plan]);
assert!(
completed_items
.iter()
.any(|item| matches!(item, ThreadItem::AgentMessage { .. })),
"agent message items should still be emitted alongside the plan item"
);
Ok(())
}
#[tokio::test]
async fn plan_mode_without_proposed_plan_does_not_emit_plan_item() -> Result<()> {
skip_if_no_network!(Ok(()));
let responses = vec![responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
])];
let server = create_mock_responses_server_sequence(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let _turn = start_plan_mode_turn(&mut mcp).await?;
let (_, completed_items, plan_deltas, _) = collect_turn_notifications(&mut mcp).await?;
let has_plan_item = completed_items
.iter()
.any(|item| matches!(item, ThreadItem::Plan { .. }));
assert!(!has_plan_item);
assert!(plan_deltas.is_empty());
Ok(())
}
async fn start_plan_mode_turn(mcp: &mut McpProcess) -> Result<codex_app_server_protocol::Turn> {
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let thread = to_response::<ThreadStartResponse>(thread_resp)?.thread;
let collaboration_mode = CollaborationMode {
mode: ModeKind::Plan,
settings: Settings {
model: "mock-model".to_string(),
reasoning_effort: None,
developer_instructions: None,
},
};
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![V2UserInput::Text {
text: "Plan this".to_string(),
text_elements: Vec::new(),
}],
collaboration_mode: Some(collaboration_mode),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
Ok(to_response::<TurnStartResponse>(turn_resp)?.turn)
}
async fn collect_turn_notifications(
mcp: &mut McpProcess,
) -> Result<(
Vec<ThreadItem>,
Vec<ThreadItem>,
Vec<PlanDeltaNotification>,
TurnCompletedNotification,
)> {
let mut started_items = Vec::new();
let mut completed_items = Vec::new();
let mut plan_deltas = Vec::new();
loop {
let message = timeout(DEFAULT_READ_TIMEOUT, mcp.read_next_message()).await??;
let JSONRPCMessage::Notification(notification) = message else {
continue;
};
match notification.method.as_str() {
"item/started" => {
let params = notification
.params
.ok_or_else(|| anyhow!("item/started notifications must include params"))?;
let payload: ItemStartedNotification = serde_json::from_value(params)?;
started_items.push(payload.item);
}
"item/completed" => {
let params = notification
.params
.ok_or_else(|| anyhow!("item/completed notifications must include params"))?;
let payload: ItemCompletedNotification = serde_json::from_value(params)?;
completed_items.push(payload.item);
}
"item/plan/delta" => {
let params = notification
.params
.ok_or_else(|| anyhow!("item/plan/delta notifications must include params"))?;
let payload: PlanDeltaNotification = serde_json::from_value(params)?;
plan_deltas.push(payload);
}
"turn/completed" => {
let params = notification
.params
.ok_or_else(|| anyhow!("turn/completed notifications must include params"))?;
let payload: TurnCompletedNotification = serde_json::from_value(params)?;
return Ok((started_items, completed_items, plan_deltas, payload));
}
_ => {}
}
}
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let features = BTreeMap::from([
(Feature::RemoteModels, false),
(Feature::CollaborationModes, true),
]);
let feature_entries = features
.into_iter()
.map(|(feature, enabled)| {
let key = FEATURES
.iter()
.find(|spec| spec.id == feature)
.map(|spec| spec.key)
.unwrap_or_else(|| panic!("missing feature key for {feature:?}"));
format!("{key} = {enabled}")
})
.collect::<Vec<_>>()
.join("\n");
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
{feature_entries}
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,138 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_request_user_input_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn request_user_input_round_trip() -> Result<()> {
let codex_home = tempfile::TempDir::new()?;
let responses = vec![
create_request_user_input_sse_response("call1")?,
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response(thread_start_resp)?;
let turn_start_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "ask something".to_string(),
text_elements: Vec::new(),
}],
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
collaboration_mode: Some(CollaborationMode {
mode: ModeKind::Plan,
settings: Settings {
model: "mock-model".to_string(),
reasoning_effort: Some(ReasoningEffort::Medium),
developer_instructions: None,
},
}),
..Default::default()
})
.await?;
let turn_start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_start_id)),
)
.await??;
let TurnStartResponse { turn, .. } = to_response(turn_start_resp)?;
let server_req = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ToolRequestUserInput { request_id, params } = server_req else {
panic!("expected ToolRequestUserInput request, got: {server_req:?}");
};
assert_eq!(params.thread_id, thread.id);
assert_eq!(params.turn_id, turn.id);
assert_eq!(params.item_id, "call1");
assert_eq!(params.questions.len(), 1);
mcp.send_response(
request_id,
serde_json::json!({
"answers": {
"confirm_path": { "answers": ["yes"] }
}
}),
)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
Ok(())
}
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "untrusted"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
collaboration_modes = true
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -77,9 +77,8 @@ async fn thread_fork_creates_new_thread_and_emits_started() -> Result<()> {
assert_ne!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
let thread_path = thread.path.clone().expect("thread path");
assert!(thread_path.is_absolute());
assert_ne!(thread_path, original_path);
assert!(thread.path.is_absolute());
assert_ne!(thread.path, original_path);
assert!(thread.cwd.is_absolute());
assert_eq!(thread.source, SessionSource::VsCode);

View File

@@ -1,34 +1,17 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::create_fake_rollout_with_source;
use app_test_support::rollout_path;
use app_test_support::to_response;
use chrono::DateTime;
use chrono::Utc;
use codex_app_server_protocol::GitInfo as ApiGitInfo;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadListResponse;
use codex_app_server_protocol::ThreadSortKey;
use codex_app_server_protocol::ThreadSourceKind;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_protocol::ThreadId;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SubAgentSource;
use pretty_assertions::assert_eq;
use std::cmp::Reverse;
use std::fs;
use std::fs::FileTimes;
use std::fs::OpenOptions;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -43,29 +26,12 @@ async fn list_threads(
cursor: Option<String>,
limit: Option<u32>,
providers: Option<Vec<String>>,
source_kinds: Option<Vec<ThreadSourceKind>>,
archived: Option<bool>,
) -> Result<ThreadListResponse> {
list_threads_with_sort(mcp, cursor, limit, providers, source_kinds, None, archived).await
}
async fn list_threads_with_sort(
mcp: &mut McpProcess,
cursor: Option<String>,
limit: Option<u32>,
providers: Option<Vec<String>>,
source_kinds: Option<Vec<ThreadSourceKind>>,
sort_key: Option<ThreadSortKey>,
archived: Option<bool>,
) -> Result<ThreadListResponse> {
let request_id = mcp
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
cursor,
limit,
sort_key,
model_providers: providers,
source_kinds,
archived,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -116,16 +82,6 @@ fn timestamp_at(
)
}
fn set_rollout_mtime(path: &Path, updated_at_rfc3339: &str) -> Result<()> {
let parsed = DateTime::parse_from_rfc3339(updated_at_rfc3339)?.with_timezone(&Utc);
let times = FileTimes::new().set_modified(parsed.into());
OpenOptions::new()
.append(true)
.open(path)?
.set_times(times)?;
Ok(())
}
#[tokio::test]
async fn thread_list_basic_empty() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -138,8 +94,6 @@ async fn thread_list_basic_empty() -> Result<()> {
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert!(data.is_empty());
@@ -202,8 +156,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
None,
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(data1.len(), 2);
@@ -211,7 +163,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.updated_at, thread.created_at);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -228,8 +179,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
Some(cursor1),
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert!(data2.len() <= 2);
@@ -237,7 +186,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.updated_at, thread.created_at);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -279,8 +227,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
None,
Some(10),
Some(vec!["other_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(data.len(), 1);
@@ -290,7 +236,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
assert_eq!(thread.model_provider, "other_provider");
let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_ts);
assert_eq!(thread.updated_at, expected_ts);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -299,207 +244,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_list_empty_source_kinds_defaults_to_interactive_only() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let cli_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"CLI",
Some("mock_provider"),
None,
)?;
let exec_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Exec",
Some("mock_provider"),
None,
CoreSessionSource::Exec,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(Vec::new()),
None,
)
.await?;
assert_eq!(next_cursor, None);
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![cli_id.as_str()]);
assert_ne!(cli_id, exec_id);
assert_eq!(data[0].source, SessionSource::Cli);
Ok(())
}
#[tokio::test]
async fn thread_list_filters_by_source_kind_subagent_thread_spawn() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let cli_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"CLI",
Some("mock_provider"),
None,
)?;
let parent_thread_id = ThreadId::from_string(&Uuid::new_v4().to_string())?;
let subagent_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"SubAgent",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id,
depth: 1,
}),
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, next_cursor } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentThreadSpawn]),
None,
)
.await?;
assert_eq!(next_cursor, None);
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![subagent_id.as_str()]);
assert_ne!(cli_id, subagent_id);
assert!(matches!(data[0].source, SessionSource::SubAgent(_)));
Ok(())
}
#[tokio::test]
async fn thread_list_filters_by_subagent_variant() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let parent_thread_id = ThreadId::from_string(&Uuid::new_v4().to_string())?;
let review_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T09-00-00",
"2025-02-02T09:00:00Z",
"Review",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::Review),
)?;
let compact_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T10-00-00",
"2025-02-02T10:00:00Z",
"Compact",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::Compact),
)?;
let spawn_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T11-00-00",
"2025-02-02T11:00:00Z",
"Spawn",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::ThreadSpawn {
parent_thread_id,
depth: 1,
}),
)?;
let other_id = create_fake_rollout_with_source(
codex_home.path(),
"2025-02-02T12-00-00",
"2025-02-02T12:00:00Z",
"Other",
Some("mock_provider"),
None,
CoreSessionSource::SubAgent(SubAgentSource::Other("custom".to_string())),
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let review = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentReview]),
None,
)
.await?;
let review_ids: Vec<_> = review
.data
.iter()
.map(|thread| thread.id.as_str())
.collect();
assert_eq!(review_ids, vec![review_id.as_str()]);
let compact = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentCompact]),
None,
)
.await?;
let compact_ids: Vec<_> = compact
.data
.iter()
.map(|thread| thread.id.as_str())
.collect();
assert_eq!(compact_ids, vec![compact_id.as_str()]);
let spawn = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentThreadSpawn]),
None,
)
.await?;
let spawn_ids: Vec<_> = spawn.data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(spawn_ids, vec![spawn_id.as_str()]);
let other = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(vec![ThreadSourceKind::SubAgentOther]),
None,
)
.await?;
let other_ids: Vec<_> = other.data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(other_ids, vec![other_id.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -531,8 +275,6 @@ async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
None,
Some(8),
Some(vec!["target_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(
@@ -577,8 +319,6 @@ async fn thread_list_enforces_max_limit() -> Result<()> {
None,
Some(200),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(
@@ -624,8 +364,6 @@ async fn thread_list_stops_when_not_enough_filtered_results_exist() -> Result<()
None,
Some(10),
Some(vec!["target_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(
@@ -672,8 +410,6 @@ async fn thread_list_includes_git_info() -> Result<()> {
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
let thread = data
@@ -693,428 +429,3 @@ async fn thread_list_includes_git_info() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_list_default_sorts_by_created_at() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_c = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![id_a.as_str(), id_b.as_str(), id_c.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_sort_updated_at_orders_by_mtime() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_old = create_fake_rollout(
codex_home.path(),
"2025-01-01T10-00-00",
"2025-01-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_mid = create_fake_rollout(
codex_home.path(),
"2025-01-01T11-00-00",
"2025-01-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_new = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T10-00-00", &id_old).as_path(),
"2025-01-03T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T11-00-00", &id_mid).as_path(),
"2025-01-02T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T12-00-00", &id_new).as_path(),
"2025-01-01T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![id_old.as_str(), id_mid.as_str(), id_new.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_paginates_with_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_c = create_fake_rollout(
codex_home.path(),
"2025-02-01T12-00-00",
"2025-02-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &id_a).as_path(),
"2025-02-03T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T11-00-00", &id_b).as_path(),
"2025-02-02T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T12-00-00", &id_c).as_path(),
"2025-02-01T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse {
data: page1,
next_cursor: cursor1,
} = list_threads_with_sort(
&mut mcp,
None,
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids_page1: Vec<_> = page1.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids_page1, vec![id_a.as_str(), id_b.as_str()]);
let cursor1 = cursor1.expect("expected nextCursor on first page");
let ThreadListResponse {
data: page2,
next_cursor: cursor2,
} = list_threads_with_sort(
&mut mcp,
Some(cursor1),
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids_page2: Vec<_> = page2.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids_page2, vec![id_c.as_str()]);
assert_eq!(cursor2, None);
Ok(())
}
#[tokio::test]
async fn thread_list_created_at_tie_breaks_by_uuid() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
let mut expected = [id_a, id_b];
expected.sort_by_key(|id| Reverse(Uuid::parse_str(id).expect("uuid should parse")));
let expected: Vec<_> = expected.iter().map(String::as_str).collect();
assert_eq!(ids, expected);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_tie_breaks_by_uuid() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let updated_at = "2025-02-03T00:00:00Z";
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &id_a).as_path(),
updated_at,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T11-00-00", &id_b).as_path(),
updated_at,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
let mut expected = [id_a, id_b];
expected.sort_by_key(|id| Reverse(Uuid::parse_str(id).expect("uuid should parse")));
let expected: Vec<_> = expected.iter().map(String::as_str).collect();
assert_eq!(ids, expected);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_uses_mtime() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let thread_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &thread_id).as_path(),
"2025-02-05T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let thread = data
.iter()
.find(|item| item.id == thread_id)
.expect("expected thread for created rollout");
let expected_created =
chrono::DateTime::parse_from_rfc3339("2025-02-01T10:00:00Z")?.timestamp();
let expected_updated =
chrono::DateTime::parse_from_rfc3339("2025-02-05T00:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_created);
assert_eq!(thread.updated_at, expected_updated);
Ok(())
}
#[tokio::test]
async fn thread_list_archived_filter() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let active_id = create_fake_rollout(
codex_home.path(),
"2025-03-01T10-00-00",
"2025-03-01T10:00:00Z",
"Active",
Some("mock_provider"),
None,
)?;
let archived_id = create_fake_rollout(
codex_home.path(),
"2025-03-01T09-00-00",
"2025-03-01T09:00:00Z",
"Archived",
Some("mock_provider"),
None,
)?;
let archived_dir = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
fs::create_dir_all(&archived_dir)?;
let archived_source = rollout_path(codex_home.path(), "2025-03-01T09-00-00", &archived_id);
let archived_dest = archived_dir.join(
archived_source
.file_name()
.expect("archived rollout should have a file name"),
);
fs::rename(&archived_source, &archived_dest)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
assert_eq!(data.len(), 1);
assert_eq!(data[0].id, active_id);
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
Some(true),
)
.await?;
assert_eq!(data.len(), 1);
assert_eq!(data[0].id, archived_id);
Ok(())
}
#[tokio::test]
async fn thread_list_invalid_cursor_returns_error() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let mut mcp = init_mcp(codex_home.path()).await?;
let request_id = mcp
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
cursor: Some("not-a-cursor".to_string()),
limit: Some(2),
sort_key: None,
model_providers: Some(vec!["mock_provider".to_string()]),
source_kinds: None,
archived: None,
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.error.code, -32600);
assert_eq!(error.error.message, "invalid cursor: not-a-cursor");
Ok(())
}

View File

@@ -1,159 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout_with_text_elements;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadReadParams;
use codex_app_server_protocol::ThreadReadResponse;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use pretty_assertions::assert_eq;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_read_returns_summary_without_turns() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = [TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
text_elements
.iter()
.map(|elem| serde_json::to_value(elem).expect("serialize text element"))
.collect(),
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let read_id = mcp
.send_thread_read_request(ThreadReadParams {
thread_id: conversation_id.clone(),
include_turns: false,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let ThreadReadResponse { thread } = to_response::<ThreadReadResponse>(read_resp)?;
assert_eq!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.path.as_ref().expect("thread path").is_absolute());
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
assert_eq!(thread.git_info, None);
assert_eq!(thread.turns.len(), 0);
Ok(())
}
#[tokio::test]
async fn thread_read_can_include_turns() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
text_elements
.iter()
.map(|elem| serde_json::to_value(elem).expect("serialize text element"))
.collect(),
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let read_id = mcp
.send_thread_read_request(ThreadReadParams {
thread_id: conversation_id.clone(),
include_turns: true,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let ThreadReadResponse { thread } = to_response::<ThreadReadResponse>(read_resp)?;
assert_eq!(thread.turns.len(), 1);
let turn = &thread.turns[0];
assert_eq!(turn.status, TurnStatus::Completed);
assert_eq!(turn.items.len(), 1, "expected user message item");
match &turn.items[0] {
ThreadItem::UserMessage { content, .. } => {
assert_eq!(
content,
&vec![UserInput::Text {
text: preview.to_string(),
text_elements: text_elements.clone().into_iter().map(Into::into).collect(),
}]
);
}
other => panic!("expected user message item, got {other:?}"),
}
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -2,9 +2,7 @@ use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout_with_text_elements;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::rollout_path;
use app_test_support::to_response;
use chrono::Utc;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
@@ -13,25 +11,18 @@ use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_protocol::config_types::Personality;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::fs::FileTimes;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const CODEX_5_2_INSTRUCTIONS_TEMPLATE_DEFAULT: &str = "You are Codex, a coding agent based on GPT-5. You and the user share the same workspace and collaborate to achieve the user's goals.";
#[tokio::test]
async fn thread_resume_returns_original_thread() -> Result<()> {
@@ -71,9 +62,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> {
let ThreadResumeResponse {
thread: resumed, ..
} = to_response::<ThreadResumeResponse>(resume_resp)?;
let mut expected = thread;
expected.updated_at = resumed.updated_at;
assert_eq!(resumed, expected);
assert_eq!(resumed, thread);
Ok(())
}
@@ -85,10 +74,10 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let text_elements = vec![TextElement {
byte_range: ByteRange { start: 0, end: 5 },
placeholder: Some("<note>".into()),
}];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
@@ -121,7 +110,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
assert_eq!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.path.as_ref().expect("thread path").is_absolute());
assert!(thread.path.is_absolute());
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -151,116 +140,6 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_without_overrides_does_not_change_updated_at_or_mtime() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
let rollout = setup_rollout_fixture(codex_home.path(), &server.uri())?;
let thread_id = rollout.conversation_id.clone();
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread_id.clone(),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(thread.updated_at, rollout.expected_updated_at);
let after_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert_eq!(after_modified, rollout.before_modified);
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let after_turn_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert!(after_turn_modified > rollout.before_modified);
Ok(())
}
#[tokio::test]
async fn thread_resume_with_overrides_defers_updated_at_until_turn_start() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
let rollout = setup_rollout_fixture(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: rollout.conversation_id.clone(),
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?;
assert_eq!(thread.updated_at, rollout.expected_updated_at);
let after_resume_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert_eq!(after_resume_modified, rollout.before_modified);
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: rollout.conversation_id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let after_turn_modified = std::fs::metadata(&rollout.rollout_file_path)?.modified()?;
assert!(after_turn_modified > rollout.before_modified);
Ok(())
}
#[tokio::test]
async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
@@ -283,7 +162,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let thread_path = thread.path.clone().expect("thread path");
let thread_path = thread.path.clone();
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: "not-a-valid-thread-id".to_string(),
@@ -300,9 +179,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let ThreadResumeResponse {
thread: resumed, ..
} = to_response::<ThreadResumeResponse>(resume_resp)?;
let mut expected = thread;
expected.updated_at = resumed.updated_at;
assert_eq!(resumed, expected);
assert_eq!(resumed, thread);
Ok(())
}
@@ -337,7 +214,6 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
content: vec![ContentItem::InputText {
text: history_text.to_string(),
}],
end_turn: None,
}];
// Resume with explicit history and override the model.
@@ -367,91 +243,6 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_accepts_personality_override() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id.clone(),
model: Some("gpt-5.2-codex".to_string()),
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let _resume: ThreadResumeResponse = to_response::<ThreadResumeResponse>(resume_resp)?;
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
assert!(
developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"expected a personality update message in developer input, got {developer_texts:?}"
);
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(CODEX_5_2_INSTRUCTIONS_TEMPLATE_DEFAULT),
"expected default base instructions from history, got {instructions_text:?}"
);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
@@ -459,16 +250,12 @@ fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io
config_toml,
format!(
r#"
model = "gpt-5.2-codex"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
remote_models = false
personality = true
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
@@ -479,51 +266,3 @@ stream_max_retries = 0
),
)
}
fn set_rollout_mtime(path: &Path, updated_at_rfc3339: &str) -> Result<()> {
let parsed = chrono::DateTime::parse_from_rfc3339(updated_at_rfc3339)?.with_timezone(&Utc);
let times = FileTimes::new().set_modified(parsed.into());
std::fs::OpenOptions::new()
.append(true)
.open(path)?
.set_times(times)?;
Ok(())
}
struct RolloutFixture {
conversation_id: String,
rollout_file_path: PathBuf,
before_modified: std::time::SystemTime,
expected_updated_at: i64,
}
fn setup_rollout_fixture(codex_home: &Path, server_uri: &str) -> Result<RolloutFixture> {
create_config_toml(codex_home, server_uri)?;
let preview = "Saved user message";
let filename_ts = "2025-01-05T12-00-00";
let meta_rfc3339 = "2025-01-05T12:00:00Z";
let expected_updated_at_rfc3339 = "2025-01-07T00:00:00Z";
let conversation_id = create_fake_rollout_with_text_elements(
codex_home,
filename_ts,
meta_rfc3339,
preview,
Vec::new(),
Some("mock_provider"),
None,
)?;
let rollout_file_path = rollout_path(codex_home, filename_ts, &conversation_id);
set_rollout_mtime(rollout_file_path.as_path(), expected_updated_at_rfc3339)?;
let before_modified = std::fs::metadata(&rollout_file_path)?.modified()?;
let expected_updated_at = chrono::DateTime::parse_from_rfc3339(expected_updated_at_rfc3339)?
.with_timezone(&Utc)
.timestamp();
Ok(RolloutFixture {
conversation_id,
rollout_file_path,
before_modified,
expected_updated_at,
})
}

View File

@@ -8,9 +8,6 @@ use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_core::config::set_project_trust_level;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -72,47 +69,6 @@ async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_start_respects_project_config_from_cwd() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let workspace = TempDir::new()?;
let project_config_dir = workspace.path().join(".codex");
std::fs::create_dir_all(&project_config_dir)?;
std::fs::write(
project_config_dir.join("config.toml"),
r#"
model_reasoning_effort = "high"
"#,
)?;
set_project_trust_level(codex_home.path(), workspace.path(), TrustLevel::Trusted)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_thread_start_request(ThreadStartParams {
cwd: Some(workspace.path().to_string_lossy().into_owned()),
..Default::default()
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ThreadStartResponse {
reasoning_effort, ..
} = to_response::<ThreadStartResponse>(resp)?;
assert_eq!(reasoning_effort, Some(ReasoningEffort::High));
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");

View File

@@ -1,101 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadArchiveResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadUnarchiveParams;
use codex_app_server_protocol::ThreadUnarchiveResponse;
use codex_core::find_archived_thread_path_by_id_str;
use codex_core::find_thread_path_by_id_str;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
#[tokio::test]
async fn thread_unarchive_moves_rollout_back_into_sessions_directory() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let rollout_path = find_thread_path_by_id_str(codex_home.path(), &thread.id)
.await?
.expect("expected rollout path for thread id to exist");
let archive_id = mcp
.send_thread_archive_request(ThreadArchiveParams {
thread_id: thread.id.clone(),
})
.await?;
let archive_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(archive_id)),
)
.await??;
let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?;
let archived_path = find_archived_thread_path_by_id_str(codex_home.path(), &thread.id)
.await?
.expect("expected archived rollout path for thread id to exist");
let archived_path_display = archived_path.display();
assert!(
archived_path.exists(),
"expected {archived_path_display} to exist"
);
let unarchive_id = mcp
.send_thread_unarchive_request(ThreadUnarchiveParams {
thread_id: thread.id.clone(),
})
.await?;
let unarchive_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(unarchive_id)),
)
.await??;
let _: ThreadUnarchiveResponse = to_response::<ThreadUnarchiveResponse>(unarchive_resp)?;
let rollout_path_display = rollout_path.display();
assert!(
rollout_path.exists(),
"expected rollout path {rollout_path_display} to be restored"
);
assert!(
!archived_path.exists(),
"expected archived rollout path {archived_path_display} to be moved"
);
Ok(())
}
fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, config_contents())
}
fn config_contents() -> &'static str {
r#"model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
"#
}

View File

@@ -34,18 +34,10 @@ use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -59,12 +51,7 @@ async fn turn_start_sends_originator_header() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(
@@ -134,12 +121,7 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -157,10 +139,10 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".to_string()),
)];
let text_elements = vec![TextElement {
byte_range: ByteRange { start: 0, end: 5 },
placeholder: Some("<note>".to_string()),
}];
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
@@ -226,12 +208,7 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -328,162 +305,6 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_collaboration_mode_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let collaboration_mode = CollaborationMode {
mode: ModeKind::Custom,
settings: Settings {
model: "mock-model-collab".to_string(),
reasoning_effort: Some(ReasoningEffort::High),
developer_instructions: None,
},
};
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
model: Some("mock-model-override".to_string()),
effort: Some(ReasoningEffort::Low),
summary: Some(ReasoningSummary::Auto),
output_schema: None,
collaboration_mode: Some(collaboration_mode),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let payload = request.body_json();
assert_eq!(payload["model"].as_str(), Some("mock-model-collab"));
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("exp-codex-personality".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
if developer_texts.is_empty() {
eprintln!("request body: {}", request.body_json());
}
assert!(
developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"expected personality update message in developer input, got {developer_texts:?}"
);
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
@@ -496,12 +317,7 @@ async fn turn_start_accepts_local_image_input() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -576,12 +392,7 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
];
let server = create_mock_responses_server_sequence(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -706,12 +517,7 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -858,12 +664,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -901,9 +702,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
.await?;
timeout(
@@ -932,9 +731,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
.await?;
timeout(
@@ -1003,12 +800,7 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
create_final_assistant_message_sse_response("patch applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1185,12 +977,7 @@ async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Res
create_final_assistant_message_sse_response("patch 2 applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1366,12 +1153,7 @@ async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
create_final_assistant_message_sse_response("patch declined")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1511,12 +1293,16 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
];
let server = create_mock_responses_server_sequence(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::UnifiedExec, true)]),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let config_toml = codex_home.path().join("config.toml");
let mut config_contents = std::fs::read_to_string(&config_toml)?;
config_contents.push_str(
r#"
[features]
unified_exec = true
"#,
);
std::fs::write(&config_toml, config_contents)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1609,18 +1395,8 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
unreachable!("loop ensures we break on command execution items");
};
assert_eq!(completed_id, "uexec-1");
assert!(
matches!(
completed_status,
CommandExecutionStatus::Completed | CommandExecutionStatus::Failed
),
"unexpected command execution status: {completed_status:?}"
);
if completed_status == CommandExecutionStatus::Completed {
assert_eq!(exit_code, Some(0));
} else {
assert!(exit_code.is_some(), "expected exit_code for failed command");
}
assert_eq!(completed_status, CommandExecutionStatus::Completed);
assert_eq!(exit_code, Some(0));
assert_eq!(
completed_process_id.as_deref(),
Some(started_process_id.as_str())
@@ -1640,24 +1416,7 @@ fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
feature_flags: &BTreeMap<Feature, bool>,
) -> std::io::Result<()> {
let mut features = BTreeMap::from([(Feature::RemoteModels, false)]);
for (feature, enabled) in feature_flags {
features.insert(*feature, *enabled);
}
let feature_entries = features
.into_iter()
.map(|(feature, enabled)| {
let key = FEATURES
.iter()
.find(|spec| spec.id == feature)
.map(|spec| spec.key)
.unwrap_or_else(|| panic!("missing feature key for {feature:?}"));
format!("{key} = {enabled}")
})
.collect::<Vec<_>>()
.join("\n");
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
@@ -1669,9 +1428,6 @@ sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
{feature_entries}
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"

View File

@@ -1,5 +1,4 @@
use crate::types::CodeTaskDetailsResponse;
use crate::types::ConfigFileResponse;
use crate::types::CreditStatusDetails;
use crate::types::PaginatedListTaskListItem;
use crate::types::RateLimitStatusPayload;
@@ -245,20 +244,6 @@ impl Client {
self.decode_json::<TurnAttemptsSiblingTurnsResponse>(&url, &ct, &body)
}
/// Fetch the managed requirements file from codex-backend.
///
/// `GET /api/codex/config/requirements` (Codex API style) or
/// `GET /wham/config/requirements` (ChatGPT backend-api style).
pub async fn get_config_requirements_file(&self) -> Result<ConfigFileResponse> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/config/requirements", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/config/requirements", self.base_url),
};
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
self.decode_json::<ConfigFileResponse>(&url, &ct, &body)
}
/// Create a new task (user turn) by POSTing to the appropriate backend path
/// based on `path_style`. Returns the created task id.
pub async fn create_task(&self, request_body: serde_json::Value) -> Result<String> {
@@ -351,7 +336,6 @@ impl Client {
fn map_plan_type(plan_type: crate::types::PlanType) -> AccountPlanType {
match plan_type {
crate::types::PlanType::Free => AccountPlanType::Free,
crate::types::PlanType::Go => AccountPlanType::Go,
crate::types::PlanType::Plus => AccountPlanType::Plus,
crate::types::PlanType::Pro => AccountPlanType::Pro,
crate::types::PlanType::Team => AccountPlanType::Team,
@@ -359,6 +343,7 @@ impl Client {
crate::types::PlanType::Enterprise => AccountPlanType::Enterprise,
crate::types::PlanType::Edu | crate::types::PlanType::Education => AccountPlanType::Edu,
crate::types::PlanType::Guest
| crate::types::PlanType::Go
| crate::types::PlanType::FreeWorkspace
| crate::types::PlanType::Quorum
| crate::types::PlanType::K12 => AccountPlanType::Unknown,

View File

@@ -4,7 +4,6 @@ pub mod types;
pub use client::Client;
pub use types::CodeTaskDetailsResponse;
pub use types::CodeTaskDetailsResponseExt;
pub use types::ConfigFileResponse;
pub use types::PaginatedListTaskListItem;
pub use types::TaskListItem;
pub use types::TurnAttemptsSiblingTurnsResponse;

View File

@@ -1,4 +1,3 @@
pub use codex_backend_openapi_models::models::ConfigFileResponse;
pub use codex_backend_openapi_models::models::CreditStatusDetails;
pub use codex_backend_openapi_models::models::PaginatedListTaskListItem;
pub use codex_backend_openapi_models::models::PlanType;

View File

@@ -17,8 +17,6 @@ serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }
codex-git = { workspace = true }
urlencoding = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }
tempfile = { workspace = true }

View File

@@ -6,20 +6,11 @@ use crate::chatgpt_token::init_chatgpt_token_from_auth;
use anyhow::Context;
use serde::de::DeserializeOwned;
use std::time::Duration;
/// Make a GET request to the ChatGPT backend API.
pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
config: &Config,
path: String,
) -> anyhow::Result<T> {
chatgpt_get_request_with_timeout(config, path, None).await
}
pub(crate) async fn chatgpt_get_request_with_timeout<T: DeserializeOwned>(
config: &Config,
path: String,
timeout: Option<Duration>,
) -> anyhow::Result<T> {
let chatgpt_base_url = &config.chatgpt_base_url;
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
@@ -36,17 +27,14 @@ pub(crate) async fn chatgpt_get_request_with_timeout<T: DeserializeOwned>(
anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`")
});
let mut request = client
let response = client
.get(&url)
.bearer_auth(&token.access_token)
.header("chatgpt-account-id", account_id?)
.header("Content-Type", "application/json");
if let Some(timeout) = timeout {
request = request.timeout(timeout);
}
let response = request.send().await.context("Failed to send request")?;
.header("Content-Type", "application/json")
.send()
.await
.context("Failed to send request")?;
if response.status().is_success() {
let result: T = response

View File

@@ -1,309 +0,0 @@
use std::collections::HashMap;
use codex_core::config::Config;
use codex_core::features::Feature;
use serde::Deserialize;
use std::time::Duration;
use crate::chatgpt_client::chatgpt_get_request_with_timeout;
use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
pub use codex_core::connectors::AppInfo;
pub use codex_core::connectors::connector_display_label;
use codex_core::connectors::connector_install_url;
pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools;
use codex_core::connectors::merge_connectors;
#[derive(Debug, Deserialize)]
struct DirectoryListResponse {
apps: Vec<DirectoryApp>,
#[serde(alias = "nextToken")]
next_token: Option<String>,
}
#[derive(Debug, Deserialize, Clone)]
struct DirectoryApp {
id: String,
name: String,
description: Option<String>,
#[serde(alias = "logoUrl")]
logo_url: Option<String>,
#[serde(alias = "logoUrlDark")]
logo_url_dark: Option<String>,
#[serde(alias = "distributionChannel")]
distribution_channel: Option<String>,
visibility: Option<String>,
}
const DIRECTORY_CONNECTORS_TIMEOUT: Duration = Duration::from_secs(60);
pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
if !config.features.enabled(Feature::Apps) {
return Ok(Vec::new());
}
let (connectors_result, accessible_result) = tokio::join!(
list_all_connectors(config),
list_accessible_connectors_from_mcp_tools(config),
);
let connectors = connectors_result?;
let accessible = accessible_result?;
let merged = merge_connectors(connectors, accessible);
Ok(filter_disallowed_connectors(merged))
}
pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<AppInfo>> {
if !config.features.enabled(Feature::Apps) {
return Ok(Vec::new());
}
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
.await?;
let token_data =
get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?;
let mut apps = list_directory_connectors(config).await?;
if token_data.id_token.is_workspace_account() {
apps.extend(list_workspace_connectors(config).await?);
}
let mut connectors = merge_directory_apps(apps)
.into_iter()
.map(directory_app_to_app_info)
.collect::<Vec<_>>();
for connector in &mut connectors {
let install_url = match connector.install_url.take() {
Some(install_url) => install_url,
None => connector_install_url(&connector.name, &connector.id),
};
connector.name = normalize_connector_name(&connector.name, &connector.id);
connector.description = normalize_connector_value(connector.description.as_deref());
connector.install_url = Some(install_url);
connector.is_accessible = false;
}
connectors.sort_by(|left, right| {
left.name
.cmp(&right.name)
.then_with(|| left.id.cmp(&right.id))
});
Ok(connectors)
}
async fn list_directory_connectors(config: &Config) -> anyhow::Result<Vec<DirectoryApp>> {
let mut apps = Vec::new();
let mut next_token: Option<String> = None;
loop {
let path = match next_token.as_deref() {
Some(token) => {
let encoded_token = urlencoding::encode(token);
format!("/connectors/directory/list?tier=categorized&token={encoded_token}")
}
None => "/connectors/directory/list?tier=categorized".to_string(),
};
let response: DirectoryListResponse =
chatgpt_get_request_with_timeout(config, path, Some(DIRECTORY_CONNECTORS_TIMEOUT))
.await?;
apps.extend(
response
.apps
.into_iter()
.filter(|app| !is_hidden_directory_app(app)),
);
next_token = response
.next_token
.map(|token| token.trim().to_string())
.filter(|token| !token.is_empty());
if next_token.is_none() {
break;
}
}
Ok(apps)
}
async fn list_workspace_connectors(config: &Config) -> anyhow::Result<Vec<DirectoryApp>> {
let response: anyhow::Result<DirectoryListResponse> = chatgpt_get_request_with_timeout(
config,
"/connectors/directory/list_workspace".to_string(),
Some(DIRECTORY_CONNECTORS_TIMEOUT),
)
.await;
match response {
Ok(response) => Ok(response
.apps
.into_iter()
.filter(|app| !is_hidden_directory_app(app))
.collect()),
Err(_) => Ok(Vec::new()),
}
}
fn merge_directory_apps(apps: Vec<DirectoryApp>) -> Vec<DirectoryApp> {
let mut merged: HashMap<String, DirectoryApp> = HashMap::new();
for app in apps {
if let Some(existing) = merged.get_mut(&app.id) {
merge_directory_app(existing, app);
} else {
merged.insert(app.id.clone(), app);
}
}
merged.into_values().collect()
}
fn merge_directory_app(existing: &mut DirectoryApp, incoming: DirectoryApp) {
let DirectoryApp {
id: _,
name,
description,
logo_url,
logo_url_dark,
distribution_channel,
visibility: _,
} = incoming;
let incoming_name_is_empty = name.trim().is_empty();
if existing.name.trim().is_empty() && !incoming_name_is_empty {
existing.name = name;
}
let incoming_description_present = description
.as_deref()
.map(|value| !value.trim().is_empty())
.unwrap_or(false);
let existing_description_present = existing
.description
.as_deref()
.map(|value| !value.trim().is_empty())
.unwrap_or(false);
if !existing_description_present && incoming_description_present {
existing.description = description;
}
if existing.logo_url.is_none() && logo_url.is_some() {
existing.logo_url = logo_url;
}
if existing.logo_url_dark.is_none() && logo_url_dark.is_some() {
existing.logo_url_dark = logo_url_dark;
}
if existing.distribution_channel.is_none() && distribution_channel.is_some() {
existing.distribution_channel = distribution_channel;
}
}
fn is_hidden_directory_app(app: &DirectoryApp) -> bool {
matches!(app.visibility.as_deref(), Some("HIDDEN"))
}
fn directory_app_to_app_info(app: DirectoryApp) -> AppInfo {
AppInfo {
id: app.id,
name: app.name,
description: app.description,
logo_url: app.logo_url,
logo_url_dark: app.logo_url_dark,
distribution_channel: app.distribution_channel,
install_url: None,
is_accessible: false,
}
}
fn normalize_connector_name(name: &str, connector_id: &str) -> String {
let trimmed = name.trim();
if trimmed.is_empty() {
connector_id.to_string()
} else {
trimmed.to_string()
}
}
fn normalize_connector_value(value: Option<&str>) -> Option<String> {
value
.map(str::trim)
.filter(|value| !value.is_empty())
.map(str::to_string)
}
const ALLOWED_APPS_SDK_APPS: &[&str] = &["asdk_app_69781557cc1481919cf5e9824fa2e792"];
const DISALLOWED_CONNECTOR_IDS: &[&str] = &[
"asdk_app_6938a94a61d881918ef32cb999ff937c",
"connector_2b0a9009c9c64bf9933a3dae3f2b1254",
"connector_68de829bf7648191acd70a907364c67c",
];
const DISALLOWED_CONNECTOR_PREFIX: &str = "connector_openai_";
fn filter_disallowed_connectors(connectors: Vec<AppInfo>) -> Vec<AppInfo> {
// TODO: Support Apps SDK connectors.
connectors
.into_iter()
.filter(is_connector_allowed)
.collect()
}
fn is_connector_allowed(connector: &AppInfo) -> bool {
let connector_id = connector.id.as_str();
if connector_id.starts_with(DISALLOWED_CONNECTOR_PREFIX)
|| DISALLOWED_CONNECTOR_IDS.contains(&connector_id)
{
return false;
}
if connector_id.starts_with("asdk_app_") {
return ALLOWED_APPS_SDK_APPS.contains(&connector_id);
}
true
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn app(id: &str) -> AppInfo {
AppInfo {
id: id.to_string(),
name: id.to_string(),
description: None,
logo_url: None,
logo_url_dark: None,
distribution_channel: None,
install_url: None,
is_accessible: false,
}
}
#[test]
fn filters_internal_asdk_connectors() {
let filtered = filter_disallowed_connectors(vec![app("asdk_app_hidden"), app("alpha")]);
assert_eq!(filtered, vec![app("alpha")]);
}
#[test]
fn allows_whitelisted_asdk_connectors() {
let filtered = filter_disallowed_connectors(vec![
app("asdk_app_69781557cc1481919cf5e9824fa2e792"),
app("beta"),
]);
assert_eq!(
filtered,
vec![
app("asdk_app_69781557cc1481919cf5e9824fa2e792"),
app("beta")
]
);
}
#[test]
fn filters_openai_connectors() {
let filtered = filter_disallowed_connectors(vec![
app("connector_openai_foo"),
app("connector_openai_bar"),
app("gamma"),
]);
assert_eq!(filtered, vec![app("gamma")]);
}
#[test]
fn filters_disallowed_connector_ids() {
let filtered = filter_disallowed_connectors(vec![
app("asdk_app_6938a94a61d881918ef32cb999ff937c"),
app("delta"),
]);
assert_eq!(filtered, vec![app("delta")]);
}
}

View File

@@ -1,5 +1,4 @@
pub mod apply_command;
mod chatgpt_client;
mod chatgpt_token;
pub mod connectors;
pub mod get_task;

View File

@@ -35,6 +35,8 @@ codex-responses-api-proxy = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-stdio-to-uds = { workspace = true }
codex-tui = { workspace = true }
codex-tui2 = { workspace = true }
codex-utils-absolute-path = { workspace = true }
libc = { workspace = true }
owo-colors = { workspace = true }
regex-lite = { workspace = true }

View File

@@ -136,8 +136,7 @@ async fn run_command_under_sandbox(
if let SandboxType::Windows = sandbox_type {
#[cfg(target_os = "windows")]
{
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
use codex_protocol::config_types::WindowsSandboxLevel;
use codex_core::features::Feature;
use codex_windows_sandbox::run_windows_sandbox_capture;
use codex_windows_sandbox::run_windows_sandbox_capture_elevated;
@@ -148,10 +147,8 @@ async fn run_command_under_sandbox(
let env_map = env.clone();
let command_vec = command.clone();
let base_dir = config.codex_home.clone();
let use_elevated = matches!(
WindowsSandboxLevel::from_config(&config),
WindowsSandboxLevel::Elevated
);
let use_elevated = config.features.enabled(Feature::WindowsSandbox)
&& config.features.enabled(Feature::WindowsSandboxElevated);
// Preflight audit is invoked elsewhere at the appropriate times.
let res = tokio::task::spawn_blocking(move || {

View File

@@ -225,7 +225,7 @@ pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides).await;
match CodexAuth::from_auth_storage(&config.codex_home, config.cli_auth_credentials_store_mode) {
Ok(Some(auth)) => match auth.api_auth_mode() {
Ok(Some(auth)) => match auth.mode {
AuthMode::ApiKey => match auth.get_token() {
Ok(api_key) => {
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
@@ -236,14 +236,10 @@ pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
std::process::exit(1);
}
},
AuthMode::Chatgpt => {
AuthMode::ChatGPT => {
eprintln!("Logged in using ChatGPT");
std::process::exit(0);
}
AuthMode::ChatgptAuthTokens => {
eprintln!("Logged in using ChatGPT (external tokens)");
std::process::exit(0);
}
},
Ok(None) => {
eprintln!("Not logged in");

View File

@@ -26,8 +26,8 @@ use codex_tui::AppExitInfo;
use codex_tui::Cli as TuiCli;
use codex_tui::ExitReason;
use codex_tui::update_action::UpdateAction;
use codex_tui2 as tui2;
use owo_colors::OwoColorize;
use std::io::IsTerminal;
use std::path::PathBuf;
use supports_color::Stream;
@@ -39,11 +39,13 @@ use crate::mcp_cmd::McpCli;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::find_codex_home;
use codex_core::features::Stage;
use codex_core::config::load_config_as_toml_with_cli_overrides;
use codex_core::features::Feature;
use codex_core::features::FeatureOverrides;
use codex_core::features::Features;
use codex_core::features::is_known_feature_key;
use codex_core::terminal::TerminalName;
use codex_utils_absolute_path::AbsolutePathBuf;
/// Codex CLI
///
@@ -144,13 +146,13 @@ struct CompletionCommand {
#[derive(Debug, Parser)]
struct ResumeCommand {
/// Conversation/session id (UUID) or thread name. UUIDs take precedence if it parses.
/// Conversation/session id (UUID). When provided, resumes this session.
/// If omitted, use --last to pick the most recent recorded session.
#[arg(value_name = "SESSION_ID")]
session_id: Option<String>,
/// Continue the most recent session without showing the picker.
#[arg(long = "last", default_value_t = false)]
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
last: bool,
/// Show all sessions (disables cwd filtering and shows CWD column).
@@ -323,7 +325,6 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<Stri
let AppExitInfo {
token_usage,
thread_id: conversation_id,
thread_name,
..
} = exit_info;
@@ -336,9 +337,8 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<Stri
codex_core::protocol::FinalOutput::from(token_usage)
)];
if let Some(resume_cmd) =
codex_core::util::resume_command(thread_name.as_deref(), conversation_id)
{
if let Some(session_id) = conversation_id {
let resume_cmd = format!("codex resume {session_id}");
let command = if color_enabled {
resume_cmd.cyan().to_string()
} else {
@@ -401,7 +401,8 @@ fn run_update_action(action: UpdateAction) -> anyhow::Result<()> {
if !status.success() {
anyhow::bail!("`{cmd_str}` failed with status {status}");
}
println!("\n🎉 Update ran successfully! Please restart Codex.");
println!();
println!("🎉 Update ran successfully! Please restart Codex.");
Ok(())
}
@@ -453,23 +454,13 @@ struct FeaturesCli {
enum FeaturesSubcommand {
/// List known features with their stage and effective state.
List,
/// Enable a feature in config.toml.
Enable(FeatureSetArgs),
/// Disable a feature in config.toml.
Disable(FeatureSetArgs),
}
#[derive(Debug, Parser)]
struct FeatureSetArgs {
/// Feature key to update (for example: unified_exec).
feature: String,
}
fn stage_str(stage: codex_core::features::Stage) -> &'static str {
use codex_core::features::Stage;
match stage {
Stage::UnderDevelopment => "under development",
Stage::Experimental { .. } => "experimental",
Stage::Experimental => "experimental",
Stage::Beta { .. } => "beta",
Stage::Stable => "stable",
Stage::Deprecated => "deprecated",
Stage::Removed => "removed",
@@ -710,27 +701,12 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
overrides,
)
.await?;
let mut rows = Vec::with_capacity(codex_core::features::FEATURES.len());
let mut name_width = 0;
let mut stage_width = 0;
for def in codex_core::features::FEATURES.iter() {
let name = def.key;
let stage = stage_str(def.stage);
let enabled = config.features.enabled(def.id);
name_width = name_width.max(name.len());
stage_width = stage_width.max(stage.len());
rows.push((name, stage, enabled));
println!("{name}\t{stage}\t{enabled}");
}
for (name, stage, enabled) in rows {
println!("{name:<name_width$} {stage:<stage_width$} {enabled}");
}
}
FeaturesSubcommand::Enable(FeatureSetArgs { feature }) => {
enable_feature_in_config(&interactive, &feature).await?;
}
FeaturesSubcommand::Disable(FeatureSetArgs { feature }) => {
disable_feature_in_config(&interactive, &feature).await?;
}
},
}
@@ -738,57 +714,6 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
Ok(())
}
async fn enable_feature_in_config(interactive: &TuiCli, feature: &str) -> anyhow::Result<()> {
FeatureToggles::validate_feature(feature)?;
let codex_home = find_codex_home()?;
ConfigEditsBuilder::new(&codex_home)
.with_profile(interactive.config_profile.as_deref())
.set_feature_enabled(feature, true)
.apply()
.await?;
println!("Enabled feature `{feature}` in config.toml.");
maybe_print_under_development_feature_warning(&codex_home, interactive, feature);
Ok(())
}
async fn disable_feature_in_config(interactive: &TuiCli, feature: &str) -> anyhow::Result<()> {
FeatureToggles::validate_feature(feature)?;
let codex_home = find_codex_home()?;
ConfigEditsBuilder::new(&codex_home)
.with_profile(interactive.config_profile.as_deref())
.set_feature_enabled(feature, false)
.apply()
.await?;
println!("Disabled feature `{feature}` in config.toml.");
Ok(())
}
fn maybe_print_under_development_feature_warning(
codex_home: &std::path::Path,
interactive: &TuiCli,
feature: &str,
) {
if interactive.config_profile.is_some() {
return;
}
let Some(spec) = codex_core::features::FEATURES
.iter()
.find(|spec| spec.key == feature)
else {
return;
};
if !matches!(spec.stage, Stage::UnderDevelopment) {
return;
}
let config_path = codex_home.join(codex_core::config::CONFIG_TOML_FILE);
eprintln!(
"Under-development features enabled: {feature}. Under-development features are incomplete and may behave unpredictably. To suppress this warning, set `suppress_unstable_features_warning = true` in {}.",
config_path.display()
);
}
/// Prepend root-level overrides so they have lower precedence than
/// CLI-specific ones specified after the subcommand (if any).
fn prepend_config_flags(
@@ -800,43 +725,44 @@ fn prepend_config_flags(
.splice(0..0, cli_config_overrides.raw_overrides);
}
/// Run the interactive Codex TUI, dispatching to either the legacy implementation or the
/// experimental TUI v2 shim based on feature flags resolved from config.
async fn run_interactive_tui(
mut interactive: TuiCli,
interactive: TuiCli,
codex_linux_sandbox_exe: Option<PathBuf>,
) -> std::io::Result<AppExitInfo> {
if let Some(prompt) = interactive.prompt.take() {
// Normalize CRLF/CR to LF so CLI-provided text can't leak `\r` into TUI state.
interactive.prompt = Some(prompt.replace("\r\n", "\n").replace('\r', "\n"));
if is_tui2_enabled(&interactive).await? {
let result = tui2::run_main(interactive.into(), codex_linux_sandbox_exe).await?;
Ok(result.into())
} else {
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await
}
let terminal_info = codex_core::terminal::terminal_info();
if terminal_info.name == TerminalName::Dumb {
if !(std::io::stdin().is_terminal() && std::io::stderr().is_terminal()) {
return Ok(AppExitInfo::fatal(
"TERM is set to \"dumb\". Refusing to start the interactive TUI because no terminal is available for a confirmation prompt (stdin/stderr is not a TTY). Run in a supported terminal or unset TERM.",
));
}
eprintln!(
"WARNING: TERM is set to \"dumb\". Codex's interactive TUI may not work in this terminal."
);
if !confirm("Continue anyway? [y/N]: ")? {
return Ok(AppExitInfo::fatal(
"Refusing to start the interactive TUI because TERM is set to \"dumb\". Run in a supported terminal or unset TERM.",
));
}
}
codex_tui::run_main(interactive, codex_linux_sandbox_exe).await
}
fn confirm(prompt: &str) -> std::io::Result<bool> {
eprintln!("{prompt}");
/// Returns `Ok(true)` when the resolved configuration enables the `tui2` feature flag.
///
/// This performs a lightweight config load (honoring the same precedence as the lower-level TUI
/// bootstrap: `$CODEX_HOME`, config.toml, profile, and CLI `-c` overrides) solely to decide which
/// TUI frontend to launch. The full configuration is still loaded later by the interactive TUI.
async fn is_tui2_enabled(cli: &TuiCli) -> std::io::Result<bool> {
let raw_overrides = cli.config_overrides.raw_overrides.clone();
let overrides_cli = codex_common::CliConfigOverrides { raw_overrides };
let cli_kv_overrides = overrides_cli
.parse_overrides()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
let answer = input.trim();
Ok(answer.eq_ignore_ascii_case("y") || answer.eq_ignore_ascii_case("yes"))
let codex_home = find_codex_home()?;
let cwd = cli.cwd.clone();
let config_cwd = match cwd.as_deref() {
Some(path) => AbsolutePathBuf::from_absolute_path(path)?,
None => AbsolutePathBuf::current_dir()?,
};
let config_toml =
load_config_as_toml_with_cli_overrides(&codex_home, &config_cwd, cli_kv_overrides).await?;
let config_profile = config_toml.get_config_profile(cli.config_profile.clone())?;
let overrides = FeatureOverrides::default();
let features = Features::from_config(&config_toml, &config_profile, overrides);
Ok(features.enabled(Feature::Tui2))
}
/// Build the final `TuiCli` for a `codex resume` invocation.
@@ -929,8 +855,7 @@ fn merge_interactive_cli_flags(interactive: &mut TuiCli, subcommand_cli: TuiCli)
interactive.add_dir.extend(subcommand_cli.add_dir);
}
if let Some(prompt) = subcommand_cli.prompt {
// Normalize CRLF/CR to LF so CLI-provided text can't leak `\r` into TUI state.
interactive.prompt = Some(prompt.replace("\r\n", "\n").replace('\r', "\n"));
interactive.prompt = Some(prompt);
}
interactive
@@ -1004,24 +929,6 @@ mod tests {
finalize_fork_interactive(interactive, root_overrides, session_id, last, all, fork_cli)
}
#[test]
fn exec_resume_last_accepts_prompt_positional() {
let cli =
MultitoolCli::try_parse_from(["codex", "exec", "--json", "resume", "--last", "2+2"])
.expect("parse should succeed");
let Some(Subcommand::Exec(exec)) = cli.subcommand else {
panic!("expected exec subcommand");
};
let Some(codex_exec::Command::Resume(args)) = exec.command else {
panic!("expected exec resume");
};
assert!(args.last);
assert_eq!(args.session_id, None);
assert_eq!(args.prompt.as_deref(), Some("2+2"));
}
fn app_server_from_args(args: &[&str]) -> AppServerCommand {
let cli = MultitoolCli::try_parse_from(args).expect("parse");
let Subcommand::AppServer(app_server) = cli.subcommand.expect("app-server present") else {
@@ -1030,7 +937,7 @@ mod tests {
app_server
}
fn sample_exit_info(conversation_id: Option<&str>, thread_name: Option<&str>) -> AppExitInfo {
fn sample_exit_info(conversation: Option<&str>) -> AppExitInfo {
let token_usage = TokenUsage {
output_tokens: 2,
total_tokens: 2,
@@ -1038,10 +945,7 @@ mod tests {
};
AppExitInfo {
token_usage,
thread_id: conversation_id
.map(ThreadId::from_string)
.map(Result::unwrap),
thread_name: thread_name.map(str::to_string),
thread_id: conversation.map(ThreadId::from_string).map(Result::unwrap),
update_action: None,
exit_reason: ExitReason::UserRequested,
}
@@ -1052,7 +956,6 @@ mod tests {
let exit_info = AppExitInfo {
token_usage: TokenUsage::default(),
thread_id: None,
thread_name: None,
update_action: None,
exit_reason: ExitReason::UserRequested,
};
@@ -1062,7 +965,7 @@ mod tests {
#[test]
fn format_exit_messages_includes_resume_hint_without_color() {
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"), None);
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"));
let lines = format_exit_messages(exit_info, false);
assert_eq!(
lines,
@@ -1076,28 +979,12 @@ mod tests {
#[test]
fn format_exit_messages_applies_color_when_enabled() {
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"), None);
let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000"));
let lines = format_exit_messages(exit_info, true);
assert_eq!(lines.len(), 2);
assert!(lines[1].contains("\u{1b}[36m"));
}
#[test]
fn format_exit_messages_prefers_thread_name() {
let exit_info = sample_exit_info(
Some("123e4567-e89b-12d3-a456-426614174000"),
Some("my-thread"),
);
let lines = format_exit_messages(exit_info, false);
assert_eq!(
lines,
vec![
"Token usage: total=2 input=0 output=2".to_string(),
"To continue this session, run codex resume my-thread".to_string(),
]
);
}
#[test]
fn resume_model_flag_applies_when_no_root_flags() {
let interactive =
@@ -1263,32 +1150,6 @@ mod tests {
assert!(app_server.analytics_default_enabled);
}
#[test]
fn features_enable_parses_feature_name() {
let cli = MultitoolCli::try_parse_from(["codex", "features", "enable", "unified_exec"])
.expect("parse should succeed");
let Some(Subcommand::Features(FeaturesCli { sub })) = cli.subcommand else {
panic!("expected features subcommand");
};
let FeaturesSubcommand::Enable(FeatureSetArgs { feature }) = sub else {
panic!("expected features enable");
};
assert_eq!(feature, "unified_exec");
}
#[test]
fn features_disable_parses_feature_name() {
let cli = MultitoolCli::try_parse_from(["codex", "features", "disable", "shell_tool"])
.expect("parse should succeed");
let Some(Subcommand::Features(FeaturesCli { sub })) = cli.subcommand else {
panic!("expected features subcommand");
};
let FeaturesSubcommand::Disable(FeatureSetArgs { feature }) = sub else {
panic!("expected features disable");
};
assert_eq!(feature, "shell_tool");
}
#[test]
fn feature_toggles_known_features_generate_overrides() {
let toggles = FeatureToggles {

View File

@@ -13,20 +13,18 @@ use codex_core::config::find_codex_home;
use codex_core::config::load_global_mcp_servers;
use codex_core::config::types::McpServerConfig;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::mcp::auth::McpOAuthLoginSupport;
use codex_core::mcp::auth::compute_auth_statuses;
use codex_core::mcp::auth::oauth_login_support;
use codex_core::protocol::McpAuthStatus;
use codex_rmcp_client::delete_oauth_tokens;
use codex_rmcp_client::perform_oauth_login;
use codex_rmcp_client::supports_oauth_login;
/// Subcommands:
/// - `serve` — run the MCP server on stdio
/// - `list` — list configured servers (with `--json`)
/// - `get` — show a single server (with `--json`)
/// - `add` — add a server launcher entry to `~/.codex/config.toml`
/// - `remove` — delete a server entry
/// - `login` — authenticate with MCP server using OAuth
/// - `logout` — remove OAuth credentials for MCP server
#[derive(Debug, clap::Parser)]
pub struct McpCli {
#[clap(flatten)]
@@ -248,7 +246,6 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
tool_timeout_sec: None,
enabled_tools: None,
disabled_tools: None,
scopes: None,
};
servers.insert(name.clone(), new_entry);
@@ -261,25 +258,33 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
println!("Added global MCP server '{name}'.");
match oauth_login_support(&transport).await {
McpOAuthLoginSupport::Supported(oauth_config) => {
println!("Detected OAuth support. Starting OAuth flow…");
perform_oauth_login(
&name,
&oauth_config.url,
config.mcp_oauth_credentials_store_mode,
oauth_config.http_headers,
oauth_config.env_http_headers,
&Vec::new(),
config.mcp_oauth_callback_port,
)
.await?;
println!("Successfully logged in.");
if let McpServerTransportConfig::StreamableHttp {
url,
bearer_token_env_var: None,
http_headers,
env_http_headers,
} = transport
{
match supports_oauth_login(&url).await {
Ok(true) => {
println!("Detected OAuth support. Starting OAuth flow…");
perform_oauth_login(
&name,
&url,
config.mcp_oauth_credentials_store_mode,
http_headers.clone(),
env_http_headers.clone(),
&Vec::new(),
config.mcp_oauth_callback_port,
)
.await?;
println!("Successfully logged in.");
}
Ok(false) => {}
Err(_) => println!(
"MCP server may or may not require login. Run `codex mcp login {name}` to login."
),
}
McpOAuthLoginSupport::Unsupported => {}
McpOAuthLoginSupport::Unknown(_) => println!(
"MCP server may or may not require login. Run `codex mcp login {name}` to login."
),
}
Ok(())
@@ -342,11 +347,6 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
_ => bail!("OAuth login is only supported for streamable HTTP servers."),
};
let mut scopes = scopes;
if scopes.is_empty() {
scopes = server.scopes.clone().unwrap_or_default();
}
perform_oauth_login(
&name,
&url,

View File

@@ -1,58 +0,0 @@
use std::path::Path;
use anyhow::Result;
use predicates::str::contains;
use tempfile::TempDir;
fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> {
let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codex")?);
cmd.env("CODEX_HOME", codex_home);
Ok(cmd)
}
#[tokio::test]
async fn features_enable_writes_feature_flag_to_config() -> Result<()> {
let codex_home = TempDir::new()?;
let mut cmd = codex_command(codex_home.path())?;
cmd.args(["features", "enable", "unified_exec"])
.assert()
.success()
.stdout(contains("Enabled feature `unified_exec` in config.toml."));
let config = std::fs::read_to_string(codex_home.path().join("config.toml"))?;
assert!(config.contains("[features]"));
assert!(config.contains("unified_exec = true"));
Ok(())
}
#[tokio::test]
async fn features_disable_writes_feature_flag_to_config() -> Result<()> {
let codex_home = TempDir::new()?;
let mut cmd = codex_command(codex_home.path())?;
cmd.args(["features", "disable", "shell_tool"])
.assert()
.success()
.stdout(contains("Disabled feature `shell_tool` in config.toml."));
let config = std::fs::read_to_string(codex_home.path().join("config.toml"))?;
assert!(config.contains("[features]"));
assert!(config.contains("shell_tool = false"));
Ok(())
}
#[tokio::test]
async fn features_enable_under_development_feature_prints_warning() -> Result<()> {
let codex_home = TempDir::new()?;
let mut cmd = codex_command(codex_home.path())?;
cmd.args(["features", "enable", "sqlite"])
.assert()
.success()
.stderr(contains("Under-development features enabled: sqlite."));
Ok(())
}

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "cloud-requirements",
crate_name = "codex_cloud_requirements",
)

View File

@@ -1,27 +0,0 @@
[package]
name = "codex-cloud-requirements"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
async-trait = { workspace = true }
codex-backend-client = { workspace = true }
codex-core = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
tokio = { workspace = true, features = ["sync", "time"] }
thiserror = { workspace = true }
toml = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
anyhow = { workspace = true }
base64 = { workspace = true }
pretty_assertions = { workspace = true }
serde_json = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "test-util", "time"] }

View File

@@ -1,459 +0,0 @@
//! Cloud-hosted config requirements for Codex.
//!
//! This crate fetches `requirements.toml` data from the backend as an alternative to loading it
//! from the local filesystem. It only applies to Enterprise ChatGPT customers.
//!
//! Enterprise ChatGPT customers must successfully fetch these requirements before Codex will run.
use async_trait::async_trait;
use codex_backend_client::Client as BackendClient;
use codex_core::AuthManager;
use codex_core::auth::CodexAuth;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigRequirementsToml;
use codex_protocol::account::PlanType;
use std::io;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use thiserror::Error;
use tokio::time::timeout;
/// This blocks codecs startup, so must be short.
const CLOUD_REQUIREMENTS_TIMEOUT: Duration = Duration::from_secs(5);
#[derive(Debug, Error, Clone, PartialEq, Eq)]
enum CloudRequirementsError {
#[error("cloud requirements user error: {0}")]
User(CloudRequirementsUserError),
#[error("cloud requirements network error: {0}")]
Network(CloudRequirementsNetworkError),
}
impl From<CloudRequirementsUserError> for CloudRequirementsError {
fn from(err: CloudRequirementsUserError) -> Self {
CloudRequirementsError::User(err)
}
}
impl From<CloudRequirementsNetworkError> for CloudRequirementsError {
fn from(err: CloudRequirementsNetworkError) -> Self {
CloudRequirementsError::Network(err)
}
}
impl From<CloudRequirementsError> for io::Error {
fn from(err: CloudRequirementsError) -> Self {
let kind = match &err {
CloudRequirementsError::User(_) => io::ErrorKind::InvalidData,
CloudRequirementsError::Network(CloudRequirementsNetworkError::Timeout { .. }) => {
io::ErrorKind::TimedOut
}
CloudRequirementsError::Network(_) => io::ErrorKind::Other,
};
io::Error::new(kind, err)
}
}
#[derive(Debug, Error, Clone, PartialEq, Eq)]
enum CloudRequirementsUserError {
#[error("failed to parse requirements TOML: {message}")]
InvalidToml { message: String },
}
#[derive(Debug, Error, Clone, PartialEq, Eq)]
enum CloudRequirementsNetworkError {
#[error("backend client initialization failed: {message}")]
BackendClient { message: String },
#[error("request failed: {message}")]
Request { message: String },
#[error("cloud requirements response missing contents")]
MissingContents,
#[error("timed out after {timeout_ms}ms")]
Timeout { timeout_ms: u64 },
#[error("cloud requirements task failed: {message}")]
Task { message: String },
}
#[async_trait]
trait RequirementsFetcher: Send + Sync {
/// Returns requirements as a TOML string.
async fn fetch_requirements(&self, auth: &CodexAuth) -> Result<String, CloudRequirementsError>;
}
struct BackendRequirementsFetcher {
base_url: String,
}
impl BackendRequirementsFetcher {
fn new(base_url: String) -> Self {
Self { base_url }
}
}
#[async_trait]
impl RequirementsFetcher for BackendRequirementsFetcher {
async fn fetch_requirements(&self, auth: &CodexAuth) -> Result<String, CloudRequirementsError> {
let client = BackendClient::from_auth(self.base_url.clone(), auth)
.inspect_err(|err| {
tracing::warn!(
error = %err,
"Failed to construct backend client for cloud requirements"
);
})
.map_err(|err| CloudRequirementsNetworkError::BackendClient {
message: err.to_string(),
})
.map_err(CloudRequirementsError::from)?;
let response = client
.get_config_requirements_file()
.await
.inspect_err(|err| tracing::warn!(error = %err, "Failed to fetch cloud requirements"))
.map_err(|err| CloudRequirementsNetworkError::Request {
message: err.to_string(),
})
.map_err(CloudRequirementsError::from)?;
let Some(contents) = response.contents else {
tracing::warn!("Cloud requirements response missing contents");
return Err(CloudRequirementsError::from(
CloudRequirementsNetworkError::MissingContents,
));
};
Ok(contents)
}
}
struct CloudRequirementsService {
auth_manager: Arc<AuthManager>,
fetcher: Arc<dyn RequirementsFetcher>,
timeout: Duration,
}
impl CloudRequirementsService {
fn new(
auth_manager: Arc<AuthManager>,
fetcher: Arc<dyn RequirementsFetcher>,
timeout: Duration,
) -> Self {
Self {
auth_manager,
fetcher,
timeout,
}
}
async fn fetch_with_timeout(
&self,
) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsError> {
let _timer =
codex_otel::start_global_timer("codex.cloud_requirements.fetch.duration_ms", &[]);
let started_at = Instant::now();
let result = timeout(self.timeout, self.fetch()).await.map_err(|_| {
CloudRequirementsNetworkError::Timeout {
timeout_ms: self.timeout.as_millis() as u64,
}
})?;
let elapsed_ms = started_at.elapsed().as_millis();
match result.as_ref() {
Ok(Some(requirements)) => {
tracing::info!(
elapsed_ms,
status = "success",
requirements = ?requirements,
"Cloud requirements load completed"
);
println!(
"cloud_requirements status=success elapsed_ms={elapsed_ms} value={requirements:?}"
);
}
Ok(None) => {
tracing::info!(
elapsed_ms,
status = "none",
requirements = %"none",
"Cloud requirements load completed"
);
println!("cloud_requirements status=none elapsed_ms={elapsed_ms} value=none");
}
Err(err) => {
tracing::warn!(
elapsed_ms,
status = "error",
requirements = %"none",
error = %err,
"Cloud requirements load failed"
);
println!(
"cloud_requirements status=error elapsed_ms={elapsed_ms} value=none error={err}"
);
}
}
result
}
async fn fetch(&self) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsError> {
let auth = match self.auth_manager.auth().await {
Some(auth) => auth,
None => return Ok(None),
};
if !(auth.is_chatgpt_auth() && auth.account_plan_type() == Some(PlanType::Enterprise)) {
return Ok(None);
}
let contents = self.fetcher.fetch_requirements(&auth).await?;
parse_cloud_requirements(&contents)
.inspect_err(|err| tracing::warn!(error = %err, "Failed to parse cloud requirements"))
.map_err(CloudRequirementsError::from)
}
}
pub fn cloud_requirements_loader(
auth_manager: Arc<AuthManager>,
chatgpt_base_url: String,
) -> CloudRequirementsLoader {
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(BackendRequirementsFetcher::new(chatgpt_base_url)),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let task = tokio::spawn(async move { service.fetch_with_timeout().await });
CloudRequirementsLoader::new(async move {
task.await
.map_err(|err| {
CloudRequirementsError::from(CloudRequirementsNetworkError::Task {
message: err.to_string(),
})
})
.and_then(std::convert::identity)
.map_err(io::Error::from)
.inspect_err(|err| tracing::warn!(error = %err, "Cloud requirements task failed"))
})
}
fn parse_cloud_requirements(
contents: &str,
) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsUserError> {
if contents.trim().is_empty() {
return Ok(None);
}
let requirements: ConfigRequirementsToml =
toml::from_str(contents).map_err(|err| CloudRequirementsUserError::InvalidToml {
message: err.to_string(),
})?;
if requirements.is_empty() {
Ok(None)
} else {
Ok(Some(requirements))
}
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_protocol::protocol::AskForApproval;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::future::pending;
use std::path::Path;
use tempfile::tempdir;
fn write_auth_json(codex_home: &Path, value: serde_json::Value) -> Result<()> {
std::fs::write(codex_home.join("auth.json"), serde_json::to_string(&value)?)?;
Ok(())
}
fn auth_manager_with_api_key() -> Result<Arc<AuthManager>> {
let tmp = tempdir()?;
let auth_json = json!({
"OPENAI_API_KEY": "sk-test-key",
"tokens": null,
"last_refresh": null,
});
write_auth_json(tmp.path(), auth_json)?;
Ok(Arc::new(AuthManager::new(
tmp.path().to_path_buf(),
false,
AuthCredentialsStoreMode::File,
)))
}
fn auth_manager_with_plan(plan_type: &str) -> Result<Arc<AuthManager>> {
let tmp = tempdir()?;
let header = json!({ "alg": "none", "typ": "JWT" });
let auth_payload = json!({
"chatgpt_plan_type": plan_type,
"chatgpt_user_id": "user-12345",
"user_id": "user-12345",
});
let payload = json!({
"email": "user@example.com",
"https://api.openai.com/auth": auth_payload,
});
let header_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header)?);
let payload_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload)?);
let signature_b64 = URL_SAFE_NO_PAD.encode(b"sig");
let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}");
let auth_json = json!({
"OPENAI_API_KEY": null,
"tokens": {
"id_token": fake_jwt,
"access_token": "test-access-token",
"refresh_token": "test-refresh-token",
},
"last_refresh": null,
});
write_auth_json(tmp.path(), auth_json)?;
Ok(Arc::new(AuthManager::new(
tmp.path().to_path_buf(),
false,
AuthCredentialsStoreMode::File,
)))
}
fn parse_for_fetch(
contents: Option<&str>,
) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsUserError> {
contents.map(parse_cloud_requirements).unwrap_or(Ok(None))
}
struct StaticFetcher {
result: Result<String, CloudRequirementsError>,
}
#[async_trait::async_trait]
impl RequirementsFetcher for StaticFetcher {
async fn fetch_requirements(
&self,
_auth: &CodexAuth,
) -> Result<String, CloudRequirementsError> {
self.result.clone()
}
}
struct PendingFetcher;
#[async_trait::async_trait]
impl RequirementsFetcher for PendingFetcher {
async fn fetch_requirements(
&self,
_auth: &CodexAuth,
) -> Result<String, CloudRequirementsError> {
pending::<()>().await;
Ok(String::new())
}
}
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_chatgpt_auth() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager_with_api_key()?,
Arc::new(StaticFetcher {
result: Ok(String::new()),
}),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(service.fetch().await, Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_enterprise_plan() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager_with_plan("pro")?,
Arc::new(StaticFetcher {
result: Ok(String::new()),
}),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(service.fetch().await, Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_returns_missing_contents_error() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager_with_plan("enterprise")?,
Arc::new(StaticFetcher {
result: Err(CloudRequirementsError::Network(
CloudRequirementsNetworkError::MissingContents,
)),
}),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
service.fetch().await,
Err(CloudRequirementsError::Network(
CloudRequirementsNetworkError::MissingContents
))
);
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_empty_contents() -> Result<()> {
assert_eq!(parse_for_fetch(Some(" ")), Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_invalid_toml() -> Result<()> {
assert!(matches!(
parse_for_fetch(Some("not = [")),
Err(CloudRequirementsUserError::InvalidToml { .. })
));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_ignores_empty_requirements() -> Result<()> {
assert_eq!(parse_for_fetch(Some("# comment")), Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_parses_valid_toml() -> Result<()> {
assert_eq!(
parse_for_fetch(Some("allowed_approval_policies = [\"never\"]")),
Ok(Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
}))
);
Ok(())
}
#[tokio::test(start_paused = true)]
async fn fetch_cloud_requirements_times_out() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager_with_plan("enterprise")?,
Arc::new(PendingFetcher),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let handle = tokio::spawn(async move { service.fetch_with_timeout().await });
tokio::time::advance(CLOUD_REQUIREMENTS_TIMEOUT + Duration::from_millis(1)).await;
assert_eq!(
handle.await?,
Err(CloudRequirementsError::Network(
CloudRequirementsNetworkError::Timeout {
timeout_ms: CLOUD_REQUIREMENTS_TIMEOUT.as_millis() as u64,
}
))
);
Ok(())
}
}

View File

@@ -42,10 +42,6 @@ pub enum ResponseEvent {
Created,
OutputItemDone(ResponseItem),
OutputItemAdded(ResponseItem),
/// Emitted when `X-Reasoning-Included: true` is present on the response,
/// meaning the server already accounted for past reasoning tokens and the
/// client should not re-estimate them.
ServerReasoningIncluded(bool),
Completed {
response_id: String,
token_usage: Option<TokenUsage>,

View File

@@ -157,9 +157,6 @@ impl Stream for AggregatedStream {
return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item))));
}
Poll::Ready(Some(Ok(ResponseEvent::ServerReasoningIncluded(included)))) => {
return Poll::Ready(Some(Ok(ResponseEvent::ServerReasoningIncluded(included))));
}
Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => {
return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot))));
}
@@ -193,7 +190,6 @@ impl Stream for AggregatedStream {
content: vec![ContentItem::OutputText {
text: std::mem::take(&mut this.cumulative),
}],
end_turn: None,
};
this.pending
.push_back(ResponseEvent::OutputItemDone(aggregated_message));

View File

@@ -24,28 +24,23 @@ use tokio_tungstenite::tungstenite::Error as WsError;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::tungstenite::client::IntoClientRequest;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::trace;
use url::Url;
type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>;
const X_CODEX_TURN_STATE_HEADER: &str = "x-codex-turn-state";
const X_REASONING_INCLUDED_HEADER: &str = "x-reasoning-included";
pub struct ResponsesWebsocketConnection {
stream: Arc<Mutex<Option<WsStream>>>,
// TODO (pakrym): is this the right place for timeout?
idle_timeout: Duration,
server_reasoning_included: bool,
}
impl ResponsesWebsocketConnection {
fn new(stream: WsStream, idle_timeout: Duration, server_reasoning_included: bool) -> Self {
fn new(stream: WsStream, idle_timeout: Duration) -> Self {
Self {
stream: Arc::new(Mutex::new(Some(stream))),
idle_timeout,
server_reasoning_included,
}
}
@@ -61,17 +56,11 @@ impl ResponsesWebsocketConnection {
mpsc::channel::<std::result::Result<ResponseEvent, ApiError>>(1600);
let stream = Arc::clone(&self.stream);
let idle_timeout = self.idle_timeout;
let server_reasoning_included = self.server_reasoning_included;
let request_body = serde_json::to_value(&request).map_err(|err| {
ApiError::Stream(format!("failed to encode websocket request: {err}"))
})?;
tokio::spawn(async move {
if server_reasoning_included {
let _ = tx_event
.send(Ok(ResponseEvent::ServerReasoningIncluded(true)))
.await;
}
let mut guard = stream.lock().await;
let Some(ws_stream) = guard.as_mut() else {
let _ = tx_event
@@ -115,21 +104,17 @@ impl<A: AuthProvider> ResponsesWebsocketClient<A> {
extra_headers: HeaderMap,
turn_state: Option<Arc<OnceLock<String>>>,
) -> Result<ResponsesWebsocketConnection, ApiError> {
let ws_url = self
.provider
.websocket_url_for_path("responses")
let ws_url = Url::parse(&self.provider.url_for_path("responses"))
.map_err(|err| ApiError::Stream(format!("failed to build websocket URL: {err}")))?;
let mut headers = self.provider.headers.clone();
headers.extend(extra_headers);
apply_auth_headers(&mut headers, &self.auth);
let (stream, server_reasoning_included) =
connect_websocket(ws_url, headers, turn_state).await?;
let stream = connect_websocket(ws_url, headers, turn_state).await?;
Ok(ResponsesWebsocketConnection::new(
stream,
self.provider.stream_idle_timeout,
server_reasoning_included,
))
}
}
@@ -152,32 +137,16 @@ async fn connect_websocket(
url: Url,
headers: HeaderMap,
turn_state: Option<Arc<OnceLock<String>>>,
) -> Result<(WsStream, bool), ApiError> {
info!("connecting to websocket: {url}");
) -> Result<WsStream, ApiError> {
let mut request = url
.as_str()
.clone()
.into_client_request()
.map_err(|err| ApiError::Stream(format!("failed to build websocket request: {err}")))?;
request.headers_mut().extend(headers);
let response = tokio_tungstenite::connect_async(request).await;
let (stream, response) = match response {
Ok((stream, response)) => {
info!(
"successfully connected to websocket: {url}, headers: {:?}",
response.headers()
);
(stream, response)
}
Err(err) => {
error!("failed to connect to websocket: {err}, url: {url}");
return Err(map_ws_error(err, &url));
}
};
let reasoning_included = response.headers().contains_key(X_REASONING_INCLUDED_HEADER);
let (stream, response) = tokio_tungstenite::connect_async(request)
.await
.map_err(|err| map_ws_error(err, &url))?;
if let Some(turn_state) = turn_state
&& let Some(header_value) = response
.headers()
@@ -186,7 +155,7 @@ async fn connect_websocket(
{
let _ = turn_state.set(header_value.to_string());
}
Ok((stream, reasoning_included))
Ok(stream)
}
fn map_ws_error(err: WsError, url: &Url) -> ApiError {
@@ -228,7 +197,7 @@ async fn run_websocket_response_stream(
}
};
if let Err(err) = ws_stream.send(Message::Text(request_text.into())).await {
if let Err(err) = ws_stream.send(Message::Text(request_text)).await {
return Err(ApiError::Stream(format!(
"failed to send websocket request: {err}"
)));
@@ -288,7 +257,7 @@ async fn run_websocket_response_stream(
Message::Pong(_) => {}
Message::Close(_) => {
return Err(ApiError::Stream(
"websocket closed by server before response.completed".into(),
"websocket closed before response.completed".into(),
));
}
_ => {}

View File

@@ -25,8 +25,6 @@ pub enum ApiError {
},
#[error("rate limit: {0}")]
RateLimit(String),
#[error("invalid request: {message}")]
InvalidRequest { message: String },
}
impl From<RateLimitError> for ApiError {

View File

@@ -33,7 +33,6 @@ pub use crate::endpoint::responses_websocket::ResponsesWebsocketConnection;
pub use crate::error::ApiError;
pub use crate::provider::Provider;
pub use crate::provider::WireApi;
pub use crate::provider::is_azure_responses_wire_base_url;
pub use crate::requests::ChatRequest;
pub use crate::requests::ChatRequestBuilder;
pub use crate::requests::ResponsesRequest;

View File

@@ -6,7 +6,6 @@ use http::Method;
use http::header::HeaderMap;
use std::collections::HashMap;
use std::time::Duration;
use url::Url;
/// Wire-level APIs supported by a `Provider`.
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -95,38 +94,17 @@ impl Provider {
}
pub fn is_azure_responses_endpoint(&self) -> bool {
is_azure_responses_wire_base_url(self.wire.clone(), &self.name, Some(&self.base_url))
if self.wire != WireApi::Responses {
return false;
}
if self.name.eq_ignore_ascii_case("azure") {
return true;
}
self.base_url.to_ascii_lowercase().contains("openai.azure.")
|| matches_azure_responses_base_url(&self.base_url)
}
pub fn websocket_url_for_path(&self, path: &str) -> Result<Url, url::ParseError> {
let mut url = Url::parse(&self.url_for_path(path))?;
let scheme = match url.scheme() {
"http" => "ws",
"https" => "wss",
"ws" | "wss" => return Ok(url),
_ => return Ok(url),
};
let _ = url.set_scheme(scheme);
Ok(url)
}
}
pub fn is_azure_responses_wire_base_url(wire: WireApi, name: &str, base_url: Option<&str>) -> bool {
if wire != WireApi::Responses {
return false;
}
if name.eq_ignore_ascii_case("azure") {
return true;
}
let Some(base_url) = base_url else {
return false;
};
let base = base_url.to_ascii_lowercase();
base.contains("openai.azure.") || matches_azure_responses_base_url(&base)
}
fn matches_azure_responses_base_url(base_url: &str) -> bool {
@@ -137,54 +115,6 @@ fn matches_azure_responses_base_url(base_url: &str) -> bool {
"azurefd.",
"windows.net/openai",
];
AZURE_MARKERS.iter().any(|marker| base_url.contains(marker))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn detects_azure_responses_base_urls() {
let positive_cases = [
"https://foo.openai.azure.com/openai",
"https://foo.openai.azure.us/openai/deployments/bar",
"https://foo.cognitiveservices.azure.cn/openai",
"https://foo.aoai.azure.com/openai",
"https://foo.openai.azure-api.net/openai",
"https://foo.z01.azurefd.net/",
];
for base_url in positive_cases {
assert!(
is_azure_responses_wire_base_url(WireApi::Responses, "test", Some(base_url)),
"expected {base_url} to be detected as Azure"
);
}
assert!(is_azure_responses_wire_base_url(
WireApi::Responses,
"Azure",
Some("https://example.com")
));
let negative_cases = [
"https://api.openai.com/v1",
"https://example.com/openai",
"https://myproxy.azurewebsites.net/openai",
];
for base_url in negative_cases {
assert!(
!is_azure_responses_wire_base_url(WireApi::Responses, "test", Some(base_url)),
"expected {base_url} not to be detected as Azure"
);
}
assert!(!is_azure_responses_wire_base_url(
WireApi::Chat,
"Azure",
Some("https://foo.openai.azure.com/openai")
));
}
let base = base_url.to_ascii_lowercase();
AZURE_MARKERS.iter().any(|marker| base.contains(marker))
}

View File

@@ -386,7 +386,6 @@ mod tests {
content: vec![ContentItem::InputText {
text: "hi".to_string(),
}],
end_turn: None,
}];
let req = ChatRequestBuilder::new("gpt-test", "inst", &prompt_input, &[])
.conversation_id(Some("conv-1".into()))
@@ -413,7 +412,6 @@ mod tests {
content: vec![ContentItem::InputText {
text: "read these".to_string(),
}],
end_turn: None,
},
ResponseItem::FunctionCall {
id: None,

View File

@@ -15,12 +15,13 @@ pub(crate) fn subagent_header(source: &Option<SessionSource>) -> Option<String>
return None;
};
match sub {
codex_protocol::protocol::SubAgentSource::Review => Some("review".to_string()),
codex_protocol::protocol::SubAgentSource::Compact => Some("compact".to_string()),
codex_protocol::protocol::SubAgentSource::ThreadSpawn { .. } => {
Some("collab_spawn".to_string())
}
codex_protocol::protocol::SubAgentSource::Other(label) => Some(label.clone()),
other => Some(
serde_json::to_value(other)
.ok()
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
.unwrap_or_else(|| "other".to_string()),
),
}
}

View File

@@ -223,13 +223,11 @@ mod tests {
id: Some("m1".into()),
role: "assistant".into(),
content: Vec::new(),
end_turn: None,
},
ResponseItem::Message {
id: None,
role: "assistant".into(),
content: Vec::new(),
end_turn: None,
},
];

View File

@@ -330,7 +330,6 @@ async fn append_assistant_text(
id: None,
role: "assistant".to_string(),
content: vec![],
end_turn: None,
};
*assistant_item = Some(item.clone());
let _ = tx_event

View File

@@ -25,8 +25,6 @@ use tokio_util::io::ReaderStream;
use tracing::debug;
use tracing::trace;
const X_REASONING_INCLUDED_HEADER: &str = "x-reasoning-included";
/// Streams SSE events from an on-disk fixture for tests.
pub fn stream_from_fixture(
path: impl AsRef<Path>,
@@ -60,10 +58,6 @@ pub fn spawn_response_stream(
.get("X-Models-Etag")
.and_then(|v| v.to_str().ok())
.map(ToString::to_string);
let reasoning_included = stream_response
.headers
.get(X_REASONING_INCLUDED_HEADER)
.is_some();
if let Some(turn_state) = turn_state.as_ref()
&& let Some(header_value) = stream_response
.headers
@@ -80,11 +74,6 @@ pub fn spawn_response_stream(
if let Some(etag) = models_etag {
let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await;
}
if reasoning_included {
let _ = tx_event
.send(Ok(ResponseEvent::ServerReasoningIncluded(true)))
.await;
}
process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await;
});
@@ -228,11 +217,6 @@ pub fn process_responses_event(
response_error = ApiError::QuotaExceeded;
} else if is_usage_not_included(&error) {
response_error = ApiError::UsageNotIncluded;
} else if is_invalid_prompt_error(&error) {
let message = error
.message
.unwrap_or_else(|| "Invalid request.".to_string());
response_error = ApiError::InvalidRequest { message };
} else {
let delay = try_parse_retry_after(&error);
let message = error.message.unwrap_or_default();
@@ -291,7 +275,7 @@ pub fn process_responses_event(
if let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) {
return Ok(Some(ResponseEvent::OutputItemAdded(item)));
}
debug!("failed to parse ResponseItem from output_item.added");
debug!("failed to parse ResponseItem from output_item.done");
}
}
"response.reasoning_summary_part.added" => {
@@ -412,10 +396,6 @@ fn is_usage_not_included(error: &Error) -> bool {
error.code.as_deref() == Some("usage_not_included")
}
fn is_invalid_prompt_error(error: &Error) -> bool {
error.code.as_deref() == Some("invalid_prompt")
}
fn rate_limit_regex() -> &'static regex_lite::Regex {
static RE: std::sync::OnceLock<regex_lite::Regex> = std::sync::OnceLock::new();
#[expect(clippy::unwrap_used)]
@@ -731,27 +711,6 @@ mod tests {
assert_matches!(events[0], Err(ApiError::QuotaExceeded));
}
#[tokio::test]
async fn invalid_prompt_without_type_is_invalid_request() {
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_invalid_prompt_no_type","object":"response","created_at":1759771628,"status":"failed","background":false,"error":{"code":"invalid_prompt","message":"Invalid prompt: we've limited access to this content for safety reasons."},"incomplete_details":null}}"#;
let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
let events = collect_events(&[sse1.as_bytes()]).await;
assert_eq!(events.len(), 1);
match &events[0] {
Err(ApiError::InvalidRequest { message }) => {
assert_eq!(
message,
"Invalid prompt: we've limited access to this content for safety reasons."
);
}
other => panic!("unexpected event: {other:?}"),
}
}
#[tokio::test]
async fn table_driven_event_kinds() {
struct TestCase {

View File

@@ -308,7 +308,6 @@ async fn streaming_client_retries_on_transport_error() -> Result<()> {
content: vec![ContentItem::InputText {
text: "hi".to_string(),
}],
end_turn: None,
}],
tools: Vec::<Value>::new(),
parallel_tool_calls: false,

View File

@@ -77,7 +77,6 @@ async fn models_client_hits_models_endpoint() {
priority: 1,
upgrade: None,
base_instructions: "base instructions".to_string(),
model_messages: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,

View File

@@ -1,40 +0,0 @@
/*
* codex-backend
*
* codex-backend
*
* The version of the OpenAPI document: 0.0.1
*
* Generated by: https://openapi-generator.tech
*/
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConfigFileResponse {
#[serde(rename = "contents", skip_serializing_if = "Option::is_none")]
pub contents: Option<String>,
#[serde(rename = "sha256", skip_serializing_if = "Option::is_none")]
pub sha256: Option<String>,
#[serde(rename = "updated_at", skip_serializing_if = "Option::is_none")]
pub updated_at: Option<String>,
#[serde(rename = "updated_by_user_id", skip_serializing_if = "Option::is_none")]
pub updated_by_user_id: Option<String>,
}
impl ConfigFileResponse {
pub fn new(
contents: Option<String>,
sha256: Option<String>,
updated_at: Option<String>,
updated_by_user_id: Option<String>,
) -> ConfigFileResponse {
ConfigFileResponse {
contents,
sha256,
updated_at,
updated_by_user_id,
}
}
}

View File

@@ -3,10 +3,6 @@
// Currently export only the types referenced by the workspace
// The process for this will change
// Config
pub mod config_file_response;
pub use self::config_file_response::ConfigFileResponse;
// Cloud Tasks
pub mod code_task_details_response;
pub use self::code_task_details_response::CodeTaskDetailsResponse;

View File

@@ -42,12 +42,9 @@ impl RateLimitStatusPayload {
}
}
#[derive(
Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Default,
)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum PlanType {
#[serde(rename = "guest")]
#[default]
Guest,
#[serde(rename = "free")]
Free,
@@ -74,3 +71,9 @@ pub enum PlanType {
#[serde(rename = "edu")]
Edu,
}
impl Default for PlanType {
fn default() -> PlanType {
Self::Guest
}
}

View File

@@ -24,21 +24,21 @@ pub fn builtin_approval_presets() -> Vec<ApprovalPreset> {
ApprovalPreset {
id: "read-only",
label: "Read Only",
description: "Codex can read files in the current workspace. Approval is required to edit files or access the internet.",
description: "Requires approval to edit files and run commands.",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::ReadOnly,
},
ApprovalPreset {
id: "auto",
label: "Default",
description: "Codex can read and edit files in the current workspace, and run commands. Approval is required to access the internet or edit other files. (Identical to Agent mode)",
label: "Agent",
description: "Read and edit files, and run commands.",
approval: AskForApproval::OnRequest,
sandbox: SandboxPolicy::new_workspace_write_policy(),
},
ApprovalPreset {
id: "full-access",
label: "Full Access",
description: "Codex can edit files outside this workspace and access the internet without asking for approval. Exercise caution when using.",
label: "Agent (full access)",
description: "Codex can edit files outside this workspace and run commands with network access. Exercise caution when using.",
approval: AskForApproval::Never,
sandbox: SandboxPolicy::DangerFullAccess,
},

View File

@@ -18,7 +18,6 @@ codex_rust_crate(
),
integration_compile_data_extra = [
"//codex-rs/apply-patch:apply_patch_tool_instructions.md",
"models.json",
"prompt.md",
],
test_data_extra = [

Some files were not shown because too many files have changed in this diff Show More