Compare commits

..

8 Commits

Author SHA1 Message Date
Ahmed Ibrahim
2aba7b2913 prompt 2026-01-08 13:18:15 -08:00
Ahmed Ibrahim
a57fefa8fc prompt 2026-01-08 12:50:29 -08:00
Ahmed Ibrahim
ed7723607d model 2026-01-08 12:34:40 -08:00
Ahmed Ibrahim
7b3263e9a6 model 2026-01-08 12:21:51 -08:00
Ahmed Ibrahim
112bc1c39f model 2026-01-08 12:16:25 -08:00
Ahmed Ibrahim
b1d46c2320 shimmer 2026-01-08 00:04:34 -08:00
Ahmed Ibrahim
b9e443f0c4 shimmer 2026-01-08 00:02:08 -08:00
Ahmed Ibrahim
f2e64e9fa5 shimmer 2026-01-07 23:49:27 -08:00
1419 changed files with 83826 additions and 63665 deletions

View File

@@ -1,3 +0,0 @@
# Without this, Bazel will consider BUILD.bazel files in
# .git/sl/origbackups (which can be populated by Sapling SCM).
.git

View File

@@ -1,46 +0,0 @@
common --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
common --repo_env=BAZEL_NO_APPLE_CPP_TOOLCHAIN=1
common --disk_cache=~/.cache/bazel-disk-cache
common --repo_contents_cache=~/.cache/bazel-repo-contents-cache
common --repository_cache=~/.cache/bazel-repo-cache
startup --experimental_remote_repo_contents_cache
common --experimental_platform_in_output_dir
common --enable_platform_specific_config
# TODO(zbarsky): We need to untangle these libc constraints to get linux remote builds working.
common:linux --host_platform=//:local
common --@rules_cc//cc/toolchains/args/archiver_flags:use_libtool_on_macos=False
common --@toolchains_llvm_bootstrapped//config:experimental_stub_libgcc_s
# We need to use the sh toolchain on windows so we don't send host bash paths to the linux executor.
common:windows --@rules_rust//rust/settings:experimental_use_sh_toolchain_for_bootstrap_process_wrapper
# TODO(zbarsky): rules_rust doesn't implement this flag properly with remote exec...
# common --@rules_rust//rust/settings:pipelined_compilation
common --incompatible_strict_action_env
# Not ideal, but We need to allow dotslash to be found
common --test_env=PATH=/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
common --test_output=errors
common --bes_results_url=https://app.buildbuddy.io/invocation/
common --bes_backend=grpcs://remote.buildbuddy.io
common --remote_cache=grpcs://remote.buildbuddy.io
common --remote_download_toplevel
common --nobuild_runfile_links
common --remote_timeout=3600
common --noexperimental_throttle_remote_action_building
common --experimental_remote_execution_keepalive
common --grpc_keepalive_time=30s
# This limits both in-flight executions and concurrent downloads. Even with high number
# of jobs execution will still be limited by CPU cores, so this just pays a bit of
# memory in exchange for higher download concurrency.
common --jobs=30
common:remote --extra_execution_platforms=//:rbe
common:remote --remote_executor=grpcs://remote.buildbuddy.io
common:remote --jobs=800

View File

@@ -1 +0,0 @@
9.0.0

View File

@@ -40,18 +40,11 @@ body:
description: |
For MacOS and Linux: copy the output of `uname -mprs`
For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console
- type: input
id: terminal
attributes:
label: What terminal emulator and version are you using (if applicable)?
description: Also note any multiplexer in use (screen / tmux / zellij)
description: |
E.g, VSCode, Terminal.app, iTerm2, Ghostty, Windows Terminal (WSL / PowerShell)
- type: textarea
id: actual
attributes:
label: What issue are you seeing?
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
description: Please include the full error messages and prompts with PII redacted. If possible, please provide text instead of a screenshot.
validations:
required: true
- type: textarea

View File

@@ -1,163 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
: "${TARGET:?TARGET environment variable is required}"
: "${GITHUB_ENV:?GITHUB_ENV environment variable is required}"
apt_update_args=()
if [[ -n "${APT_UPDATE_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_update_args=(${APT_UPDATE_ARGS})
fi
apt_install_args=()
if [[ -n "${APT_INSTALL_ARGS:-}" ]]; then
# shellcheck disable=SC2206
apt_install_args=(${APT_INSTALL_ARGS})
fi
sudo apt-get update "${apt_update_args[@]}"
sudo apt-get install -y "${apt_install_args[@]}" musl-tools pkg-config g++ clang libc++-dev libc++abi-dev lld
case "${TARGET}" in
x86_64-unknown-linux-musl)
arch="x86_64"
;;
aarch64-unknown-linux-musl)
arch="aarch64"
;;
*)
echo "Unexpected musl target: ${TARGET}" >&2
exit 1
;;
esac
# Use the musl toolchain as the Rust linker to avoid Zig injecting its own CRT.
if command -v "${arch}-linux-musl-gcc" >/dev/null; then
musl_linker="$(command -v "${arch}-linux-musl-gcc")"
elif command -v musl-gcc >/dev/null; then
musl_linker="$(command -v musl-gcc)"
else
echo "musl gcc not found after install; arch=${arch}" >&2
exit 1
fi
zig_target="${TARGET/-unknown-linux-musl/-linux-musl}"
runner_temp="${RUNNER_TEMP:-/tmp}"
tool_root="${runner_temp}/codex-musl-tools-${TARGET}"
mkdir -p "${tool_root}"
sysroot=""
if command -v zig >/dev/null; then
zig_bin="$(command -v zig)"
cc="${tool_root}/zigcc"
cxx="${tool_root}/zigcxx"
cat >"${cc}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
# Drop any explicit --target/-target flags. Zig expects -target and
# rejects Rust triples like *-unknown-linux-musl.
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" cc -target "${zig_target}" "\${args[@]}"
EOF
cat >"${cxx}" <<EOF
#!/usr/bin/env bash
set -euo pipefail
args=()
skip_next=0
for arg in "\$@"; do
if [[ "\${skip_next}" -eq 1 ]]; then
skip_next=0
continue
fi
case "\${arg}" in
--target)
skip_next=1
continue
;;
--target=*|-target=*|-target)
if [[ "\${arg}" == "-target" ]]; then
skip_next=1
fi
continue
;;
esac
args+=("\${arg}")
done
exec "${zig_bin}" c++ -target "${zig_target}" "\${args[@]}"
EOF
chmod +x "${cc}" "${cxx}"
sysroot="$("${zig_bin}" cc -target "${zig_target}" -print-sysroot 2>/dev/null || true)"
else
cc="${musl_linker}"
if command -v "${arch}-linux-musl-g++" >/dev/null; then
cxx="$(command -v "${arch}-linux-musl-g++")"
elif command -v musl-g++ >/dev/null; then
cxx="$(command -v musl-g++)"
else
cxx="${cc}"
fi
fi
if [[ -n "${sysroot}" && "${sysroot}" != "/" ]]; then
echo "BORING_BSSL_SYSROOT=${sysroot}" >> "$GITHUB_ENV"
boring_sysroot_var="BORING_BSSL_SYSROOT_${TARGET}"
boring_sysroot_var="${boring_sysroot_var//-/_}"
echo "${boring_sysroot_var}=${sysroot}" >> "$GITHUB_ENV"
fi
cflags="-pthread"
cxxflags="-pthread"
if [[ "${TARGET}" == "aarch64-unknown-linux-musl" ]]; then
# BoringSSL enables -Wframe-larger-than=25344 under clang and treats warnings as errors.
cflags="${cflags} -Wno-error=frame-larger-than"
cxxflags="${cxxflags} -Wno-error=frame-larger-than"
fi
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
echo "CC=${cc}" >> "$GITHUB_ENV"
echo "TARGET_CC=${cc}" >> "$GITHUB_ENV"
target_cc_var="CC_${TARGET}"
target_cc_var="${target_cc_var//-/_}"
echo "${target_cc_var}=${cc}" >> "$GITHUB_ENV"
echo "CXX=${cxx}" >> "$GITHUB_ENV"
echo "TARGET_CXX=${cxx}" >> "$GITHUB_ENV"
target_cxx_var="CXX_${TARGET}"
target_cxx_var="${target_cxx_var//-/_}"
echo "${target_cxx_var}=${cxx}" >> "$GITHUB_ENV"
cargo_linker_var="CARGO_TARGET_${TARGET^^}_LINKER"
cargo_linker_var="${cargo_linker_var//-/_}"
echo "${cargo_linker_var}=${musl_linker}" >> "$GITHUB_ENV"
echo "CMAKE_C_COMPILER=${cc}" >> "$GITHUB_ENV"
echo "CMAKE_CXX_COMPILER=${cxx}" >> "$GITHUB_ENV"
echo "CMAKE_ARGS=-DCMAKE_HAVE_THREADS_LIBRARY=1 -DCMAKE_USE_PTHREADS_INIT=1 -DCMAKE_THREAD_LIBS_INIT=-pthread -DTHREADS_PREFER_PTHREAD_FLAG=ON" >> "$GITHUB_ENV"

View File

@@ -1,20 +0,0 @@
FROM ubuntu:24.04
# TODO(mbolin): Published to docker.io/mbolin491/codex-bazel:latest for
# initial debugging, but we should publish to a more proper location.
#
# docker buildx create --use
# docker buildx build --platform linux/amd64,linux/arm64 -f .github/workflows/Dockerfile.bazel -t mbolin491/codex-bazel:latest --push .
RUN apt-get update && \
apt-get install -y --no-install-recommends \
curl git python3 ca-certificates && \
rm -rf /var/lib/apt/lists/*
# Install dotslash.
RUN curl -LSfs "https://github.com/facebook/dotslash/releases/download/v0.5.8/dotslash-ubuntu-22.04.$(uname -m).tar.gz" | tar fxz - -C /usr/local/bin
# Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000.
USER ubuntu
WORKDIR /workspace

View File

@@ -1,110 +0,0 @@
name: Bazel (experimental)
# Note this workflow was originally derived from:
# https://github.com/cerisier/toolchains_llvm_bootstrapped/blob/main/.github/workflows/ci.yaml
on:
pull_request: {}
push:
branches:
- main
workflow_dispatch:
concurrency:
# Cancel previous actions from the same PR or branch except 'main' branch.
# See https://docs.github.com/en/actions/using-jobs/using-concurrency and https://docs.github.com/en/actions/learn-github-actions/contexts for more info.
group: concurrency-group::${{ github.workflow }}::${{ github.event.pull_request.number > 0 && format('pr-{0}', github.event.pull_request.number) || github.ref_name }}${{ github.ref_name == 'main' && format('::{0}', github.run_id) || ''}}
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
test:
strategy:
fail-fast: false
matrix:
include:
# macOS
- os: macos-15-xlarge
target: aarch64-apple-darwin
- os: macos-15-xlarge
target: x86_64-apple-darwin
# Linux
- os: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
- os: ubuntu-24.04
target: x86_64-unknown-linux-gnu
- os: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
- os: ubuntu-24.04
target: x86_64-unknown-linux-musl
# TODO: Enable Windows once we fix the toolchain issues there.
#- os: windows-latest
# target: x86_64-pc-windows-gnullvm
runs-on: ${{ matrix.os }}
# Configure a human readable name for each job
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
steps:
- uses: actions/checkout@v6
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- name: Make DotSlash available in PATH (Unix)
if: runner.os != 'Windows'
run: cp "$(which dotslash)" /usr/local/bin
- name: Make DotSlash available in PATH (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe"
# Install Bazel via Bazelisk
- name: Set up Bazel
uses: bazelbuild/setup-bazelisk@v3
# TODO(mbolin): Bring this back once we have caching working. Currently,
# we never seem to get a cache hit but we still end up paying the cost of
# uploading at the end of the build, which takes over a minute!
#
# Cache build and external artifacts so that the next ci build is incremental.
# Because github action caches cannot be updated after a build, we need to
# store the contents of each build in a unique cache key, then fall back to loading
# it on the next ci run. We use hashFiles(...) in the key and restore-keys- with
# the prefix to load the most recent cache for the branch on a cache miss. You
# should customize the contents of hashFiles to capture any bazel input sources,
# although this doesn't need to be perfect. If none of the input sources change
# then a cache hit will load an existing cache and bazel won't have to do any work.
# In the case of a cache miss, you want the fallback cache to contain most of the
# previously built artifacts to minimize build time. The more precise you are with
# hashFiles sources the less work bazel will have to do.
# - name: Mount bazel caches
# uses: actions/cache@v5
# with:
# path: |
# ~/.cache/bazel-repo-cache
# ~/.cache/bazel-repo-contents-cache
# key: bazel-cache-${{ matrix.os }}-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'MODULE.bazel') }}
# restore-keys: |
# bazel-cache-${{ matrix.os }}
- name: Configure Bazel startup args (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
# Use a very short path to reduce argv/path length issues.
"BAZEL_STARTUP_ARGS=--output_user_root=C:\" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: bazel test //...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
shell: bash
run: |
bazel $BAZEL_STARTUP_ARGS --bazelrc=.github/workflows/ci.bazelrc test //... \
--build_metadata=REPO_URL=https://github.com/openai/codex.git \
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD) \
--build_metadata=ROLE=CI \
--build_metadata=VISIBILITY=PUBLIC \
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY"

View File

@@ -1,20 +0,0 @@
common --remote_download_minimal
common --nobuild_runfile_links
common --keep_going
# We prefer to run the build actions entirely remotely so we can dial up the concurrency.
# We have platform-specific tests, so we want to execute the tests on all platforms using the strongest sandboxing available on each platform.
# On linux, we can do a full remote build/test, by targeting the right (x86/arm) runners, so we have coverage of both.
# Linux crossbuilds don't work until we untangle the libc constraint mess.
common:linux --config=remote
common:linux --strategy=remote
common:linux --platforms=//:rbe
# On mac, we can run all the build actions remotely but test actions locally.
common:macos --config=remote
common:macos --strategy=remote
common:macos --strategy=TestRunner=darwin-sandbox,local
common:windows --strategy=TestRunner=local

View File

@@ -59,7 +59,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
components: rustfmt
- name: cargo fmt
@@ -77,7 +77,7 @@ jobs:
working-directory: codex-rs
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-shear
@@ -88,7 +88,7 @@ jobs:
# --- CI to validate on different os/targets --------------------------------
lint_build:
name: Lint/Build — ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
# Keep job-level if to avoid spinning up runners when not needed
@@ -106,78 +106,51 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: macos-15-xlarge
- runner: macos-14
target: x86_64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-musl
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
# Also run representative release builds on Mac and Linux because
# there could be release-only build errors we want to catch.
# Hopefully this also pre-populates the build cache to speed up
# releases.
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: release
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl
profile: release
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: release
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
components: clippy
@@ -261,21 +234,15 @@ jobs:
/var/cache/apt
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
DEBIAN_FRONTEND: noninteractive
TARGET: ${{ matrix.target }}
APT_UPDATE_ARGS: -o Acquire::Retries=3
APT_INSTALL_ARGS: --no-install-recommends
shell: bash
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
run: |
set -euo pipefail
sudo apt-get -y update -o Acquire::Retries=3
sudo apt-get -y install --no-install-recommends musl-tools pkg-config
- name: Install cargo-chef
if: ${{ matrix.profile == 'release' }}
@@ -369,7 +336,7 @@ jobs:
tests:
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runs_on || matrix.runner }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
needs: changed
if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }}
@@ -386,43 +353,46 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-15-xlarge
- runner: macos-14
target: aarch64-apple-darwin
profile: dev
- runner: ubuntu-24.04
target: x86_64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-x64
- runner: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
profile: dev
runs_on:
group: codex-runners
labels: codex-linux-arm64
- runner: windows-x64
- runner: windows-latest
target: x86_64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-x64
- runner: windows-arm64
- runner: windows-11-arm
target: aarch64-pc-windows-msvc
profile: dev
runs_on:
group: codex-runners
labels: codex-windows-arm64
steps:
- uses: actions/checkout@v6
# We have been running out of space when running this job on Linux for
# x86_64-unknown-linux-gnu, so remove some unnecessary dependencies.
- name: Remove unnecessary dependencies to save space
if: ${{ startsWith(matrix.runner, 'ubuntu') }}
shell: bash
run: |
set -euo pipefail
sudo rm -rf \
/usr/local/lib/android \
/usr/share/dotnet \
/usr/local/share/boost \
/usr/local/lib/node_modules \
/opt/ghc
sudo apt-get remove -y docker.io docker-compose podman buildah
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash
uses: facebook/install-dotslash@v2
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}

View File

@@ -20,7 +20,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- name: Validate tag matches Cargo.toml version
shell: bash
@@ -46,20 +45,11 @@ jobs:
echo "✅ Tag and Cargo.toml agree (${tag_ver})"
echo "::endgroup::"
- name: Verify config schema fixture
shell: bash
working-directory: codex-rs
run: |
set -euo pipefail
echo "If this fails, run: just write-config-schema to overwrite fixture with intentional changes."
cargo run -p codex-core --bin codex-write-config-schema
git diff --exit-code core/config.schema.json
build:
needs: tag-check
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
runs-on: ${{ matrix.runner }}
timeout-minutes: 60
timeout-minutes: 30
permissions:
contents: read
id-token: write
@@ -90,7 +80,7 @@ jobs:
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
@@ -104,17 +94,11 @@ jobs:
${{ github.workspace }}/codex-rs/target/
key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }}
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}}
name: Install musl build tools
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
run: |
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Cargo build
shell: bash
@@ -291,28 +275,7 @@ jobs:
# Must run from inside the dest dir so 7z won't
# embed the directory path inside the zip.
if [[ "${{ matrix.runner }}" == windows* ]]; then
if [[ "$base" == "codex-${{ matrix.target }}.exe" ]]; then
# Bundle the sandbox helper binaries into the main codex zip so
# WinGet installs include the required helpers next to codex.exe.
# Fall back to the single-binary zip if the helpers are missing
# to avoid breaking releases.
bundle_dir="$(mktemp -d)"
runner_src="$dest/codex-command-runner-${{ matrix.target }}.exe"
setup_src="$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
if [[ -f "$runner_src" && -f "$setup_src" ]]; then
cp "$dest/$base" "$bundle_dir/$base"
cp "$runner_src" "$bundle_dir/codex-command-runner.exe"
cp "$setup_src" "$bundle_dir/codex-windows-sandbox-setup.exe"
(cd "$bundle_dir" && 7z a "$dest/${base}.zip" .)
else
echo "warning: missing sandbox binaries; falling back to single-binary zip"
echo "warning: expected $runner_src and $setup_src"
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
rm -rf "$bundle_dir"
else
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
(cd "$dest" && 7z a "${base}.zip" "$base")
fi
# Also create .zst (existing behaviour) *and* remove the original
@@ -395,10 +358,6 @@ jobs:
ls -R dist/
- name: Add config schema release asset
run: |
cp codex-rs/core/config.schema.json dist/config-schema.json
- name: Define release name
id: release_name
run: |
@@ -469,19 +428,6 @@ jobs:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
- name: Trigger developers.openai.com deploy
# Only trigger the deploy if the release is not a pre-release.
# The deploy is used to update the developers.openai.com website with the new config schema json file.
if: ${{ !contains(steps.release_name.outputs.name, '-') }}
continue-on-error: true
env:
DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL: ${{ secrets.DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL }}
run: |
if ! curl -sS -f -o /dev/null -X POST "$DEV_WEBSITE_VERCEL_DEPLOY_HOOK_URL"; then
echo "::warning title=developers.openai.com deploy hook failed::Vercel deploy hook POST failed for ${GITHUB_REF_NAME}"
exit 1
fi
# Publish to npm using OIDC authentication.
# July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/
# npm docs: https://docs.npmjs.com/trusted-publishers

View File

@@ -24,7 +24,7 @@ jobs:
node-version: 22
cache: pnpm
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
- name: build codex
run: cargo build --bin codex

View File

@@ -93,21 +93,15 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@1.92
- uses: dtolnay/rust-toolchain@1.90
with:
targets: ${{ matrix.target }}
- if: ${{ matrix.install_musl }}
name: Install Zig
uses: mlugg/setup-zig@v2
with:
version: 0.14.0
- if: ${{ matrix.install_musl }}
name: Install musl build dependencies
env:
TARGET: ${{ matrix.target }}
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
run: |
sudo apt-get update
sudo apt-get install -y musl-tools pkg-config
- name: Build exec server binaries
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
@@ -204,7 +198,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
@@ -246,7 +240,7 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone --depth 1 https://github.com/bminor/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b

1
.gitignore vendored
View File

@@ -9,7 +9,6 @@ node_modules
# build
dist/
bazel-*
build/
out/
storybook-static/

View File

@@ -1,6 +0,0 @@
config:
MD013:
line_length: 100
globs:
- "docs/tui-chat-composer.md"

View File

@@ -13,14 +13,12 @@ In the codex-rs folder where the rust code lives:
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
Before finalizing a large change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspacewide Clippy builds; only run `just fix` without `-p` if you changed shared crates.
2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`.
When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite.
## TUI style conventions
@@ -79,11 +77,11 @@ If you dont have the tool:
- Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields.
- Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above.
### Spawning workspace binaries in tests (Cargo vs Bazel)
### Spawning workspace binaries in tests (Cargo vs Buck2)
- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries.
- Under Bazel, binaries and resources may live under runfiles; use `codex_utils_cargo_bin::cargo_bin` to resolve absolute paths that remain stable after `chdir`.
- When locating fixture files or test resources under Bazel, avoid `env!("CARGO_MANIFEST_DIR")`. Prefer `codex_utils_cargo_bin::find_resource!` so paths resolve correctly under both Cargo and Bazel runfiles.
- Under Buck2, `CARGO_BIN_EXE_*` may be project-relative (e.g. `buck-out/...`), which breaks if a test changes its working directory. `codex_utils_cargo_bin::cargo_bin` resolves to an absolute path first.
- When locating fixture files under Buck2, avoid `env!("CARGO_MANIFEST_DIR")` (Buck codegen sets it to `"."`). Prefer deriving paths from `codex_utils_cargo_bin::buck_project_root()` when needed.
### Integration tests (core)

View File

@@ -1,19 +0,0 @@
# We mark the local platform as glibc-compatible so that rust can grab a toolchain for us.
# TODO(zbarsky): Upstream a better libc constraint into rules_rust.
# We only enable this on linux though for sanity, and because it breaks remote execution.
platform(
name = "local",
constraint_values = [
"@toolchains_llvm_bootstrapped//constraints/libc:gnu.2.28",
],
parents = [
"@platforms//host",
],
)
alias(
name = "rbe",
actual = "@rbe_platform",
)
exports_files(["AGENTS.md"])

View File

@@ -1,124 +0,0 @@
bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "toolchains_llvm_bootstrapped", version = "0.3.1")
archive_override(
module_name = "toolchains_llvm_bootstrapped",
integrity = "sha256-4/2h4tYSUSptxFVI9G50yJxWGOwHSeTeOGBlaLQBV8g=",
strip_prefix = "toolchains_llvm_bootstrapped-d20baf67e04d8e2887e3779022890d1dc5e6b948",
urls = ["https://github.com/cerisier/toolchains_llvm_bootstrapped/archive/d20baf67e04d8e2887e3779022890d1dc5e6b948.tar.gz"],
)
osx = use_extension("@toolchains_llvm_bootstrapped//toolchain/extension:osx.bzl", "osx")
osx.framework(name = "ApplicationServices")
osx.framework(name = "AppKit")
osx.framework(name = "ColorSync")
osx.framework(name = "CoreFoundation")
osx.framework(name = "CoreGraphics")
osx.framework(name = "CoreServices")
osx.framework(name = "CoreText")
osx.framework(name = "CFNetwork")
osx.framework(name = "Foundation")
osx.framework(name = "ImageIO")
osx.framework(name = "Kernel")
osx.framework(name = "OSLog")
osx.framework(name = "Security")
osx.framework(name = "SystemConfiguration")
register_toolchains(
"@toolchains_llvm_bootstrapped//toolchain:all",
)
bazel_dep(name = "rules_cc", version = "0.2.16")
bazel_dep(name = "rules_platform", version = "0.1.0")
bazel_dep(name = "rules_rust", version = "0.68.1")
single_version_override(
module_name = "rules_rust",
patch_strip = 1,
patches = [
"//patches:rules_rust.patch",
"//patches:rules_rust_windows_gnu.patch",
"//patches:rules_rust_musl.patch",
],
)
RUST_TRIPLES = [
"aarch64-unknown-linux-musl",
"aarch64-apple-darwin",
"aarch64-pc-windows-gnullvm",
"x86_64-unknown-linux-musl",
"x86_64-apple-darwin",
"x86_64-pc-windows-gnullvm",
]
rust = use_extension("@rules_rust//rust:extensions.bzl", "rust")
rust.toolchain(
edition = "2024",
extra_target_triples = RUST_TRIPLES,
versions = ["1.90.0"],
)
use_repo(rust, "rust_toolchains")
register_toolchains("@rust_toolchains//:all")
bazel_dep(name = "rules_rs", version = "0.0.23")
crate = use_extension("@rules_rs//rs:extensions.bzl", "crate")
crate.from_cargo(
cargo_lock = "//codex-rs:Cargo.lock",
cargo_toml = "//codex-rs:Cargo.toml",
platform_triples = RUST_TRIPLES,
)
bazel_dep(name = "openssl", version = "3.5.4.bcr.0")
crate.annotation(
build_script_data = [
"@openssl//:gen_dir",
],
build_script_env = {
"OPENSSL_DIR": "$(execpath @openssl//:gen_dir)",
"OPENSSL_NO_VENDOR": "1",
"OPENSSL_STATIC": "1",
},
crate = "openssl-sys",
data = ["@openssl//:gen_dir"],
)
inject_repo(crate, "openssl")
# Fix readme inclusions
crate.annotation(
crate = "windows-link",
patch_args = ["-p1"],
patches = [
"//patches:windows-link.patch",
],
)
WINDOWS_IMPORT_LIB = """
load("@rules_cc//cc:defs.bzl", "cc_import")
cc_import(
name = "windows_import_lib",
static_library = glob(["lib/*.a"])[0],
)
"""
crate.annotation(
additive_build_file_content = WINDOWS_IMPORT_LIB,
crate = "windows_x86_64_gnullvm",
gen_build_script = "off",
deps = [":windows_import_lib"],
)
crate.annotation(
additive_build_file_content = WINDOWS_IMPORT_LIB,
crate = "windows_aarch64_gnullvm",
gen_build_script = "off",
deps = [":windows_import_lib"],
)
use_repo(crate, "crates")
rbe_platform_repository = use_repo_rule("//:rbe.bzl", "rbe_platform_repository")
rbe_platform_repository(
name = "rbe_platform",
)

1315
MODULE.bazel.lock generated

File diff suppressed because one or more lines are too long

View File

@@ -10,7 +10,6 @@ from_date = "2024-10-01"
to_date = "2024-10-15"
target_app = "cli"
# Test announcement only for local build version until 2026-01-10 excluded (past)
[[announcements]]
content = "This is a test announcement"
version_regex = "^0\\.0\\.0$"

1
codex-cli/bin/codex.js Executable file → Normal file
View File

@@ -95,6 +95,7 @@ function detectPackageManager() {
return "bun";
}
if (
__dirname.includes(".bun/install/global") ||
__dirname.includes(".bun\\install\\global")

View File

@@ -1 +0,0 @@

1207
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,6 @@ members = [
"app-server",
"app-server-protocol",
"app-server-test-client",
"debug-client",
"apply-patch",
"arg0",
"feedback",
@@ -27,7 +26,6 @@ members = [
"login",
"mcp-server",
"mcp-types",
"network-proxy",
"ollama",
"process-hardening",
"protocol",
@@ -36,6 +34,7 @@ members = [
"stdio-to-uds",
"otel",
"tui",
"tui2",
"utils/absolute-path",
"utils/cargo-bin",
"utils/git",
@@ -71,7 +70,6 @@ codex-arg0 = { path = "arg0" }
codex-async-utils = { path = "async-utils" }
codex-backend-client = { path = "backend-client" }
codex-chatgpt = { path = "chatgpt" }
codex-cli = { path = "cli"}
codex-client = { path = "codex-client" }
codex-common = { path = "common" }
codex-core = { path = "core" }
@@ -93,6 +91,7 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
codex-rmcp-client = { path = "rmcp-client" }
codex-stdio-to-uds = { path = "stdio-to-uds" }
codex-tui = { path = "tui" }
codex-tui2 = { path = "tui2" }
codex-utils-absolute-path = { path = "utils/absolute-path" }
codex-utils-cache = { path = "utils/cache" }
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
@@ -121,12 +120,12 @@ axum = { version = "0.8", default-features = false }
base64 = "0.22.1"
bytes = "1.10.1"
chardetng = "0.1.17"
chrono = "0.4.43"
chrono = "0.4.42"
clap = "4"
clap_complete = "4"
color-eyre = "0.6.3"
crossterm = "0.28.1"
ctor = "0.6.3"
ctor = "0.5.0"
derive_more = "2"
diffy = "0.4.2"
dirs = "6"
@@ -135,15 +134,14 @@ dunce = "1.0.4"
encoding_rs = "0.8.35"
env-flags = "0.1.1"
env_logger = "0.11.5"
escargot = "0.5"
eventsource-stream = "0.2.3"
futures = { version = "0.3", default-features = false }
globset = "0.4"
http = "1.3.1"
icu_decimal = "2.1"
icu_locale_core = "2.1"
icu_provider = { version = "2.1", features = ["sync"] }
ignore = "0.4.23"
indoc = "2.0"
image = { version = "^0.25.9", default-features = false }
include_dir = "0.7.4"
indexmap = "2.12.0"
@@ -178,6 +176,7 @@ pretty_assertions = "1.4.1"
pulldown-cmark = "0.10"
rand = "0.9"
ratatui = "0.29.0"
ratatui-core = "0.1.0"
ratatui-macros = "0.6.0"
regex = "1.12.2"
regex-lite = "0.1.8"
@@ -188,13 +187,11 @@ seccompiler = "0.5.0"
sentry = "0.46.0"
serde = "1"
serde_json = "1"
serde_path_to_error = "0.1.20"
serde_with = "3.16"
serde_yaml = "0.9"
serial_test = "3.2.0"
sha1 = "0.10.6"
sha2 = "0.10"
semver = "1.0"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.1"
@@ -212,8 +209,7 @@ tiny_http = "0.12"
tokio = "1"
tokio-stream = "0.1.18"
tokio-test = "0.4"
tokio-tungstenite = { version = "0.28.0", features = ["proxy", "rustls-tls-native-roots"] }
tokio-util = "0.7.18"
tokio-util = "0.7.16"
toml = "0.9.5"
toml_edit = "0.24.0"
tracing = "0.1.43"
@@ -222,9 +218,9 @@ tracing-subscriber = "0.3.22"
tracing-test = "0.2.5"
tree-sitter = "0.25.10"
tree-sitter-bash = "0.25"
zstd = "0.13"
tree-sitter-highlight = "0.25.10"
ts-rs = "11"
tui-scrollbar = "0.2.1"
uds_windows = "1.1.0"
unicode-segmentation = "1.12.0"
unicode-width = "0.2"
@@ -234,7 +230,7 @@ uuid = "1"
vt100 = "0.16.2"
walkdir = "2.5.0"
webbrowser = "1.0"
which = "8"
which = "6"
wildmatch = "2.6.1"
wiremock = "0.6"
@@ -302,10 +298,6 @@ opt-level = 0
# ratatui = { path = "../../ratatui" }
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
tokio-tungstenite = { git = "https://github.com/JakkuSakura/tokio-tungstenite", rev = "2ae536b0de793f3ddf31fc2f22d445bf1ef2023d" }
# Uncomment to debug local changes.
# rmcp = { path = "../../rust-sdk/crates/rmcp" }
[patch."ssh://git@github.com/JakkuSakura/tungstenite-rs.git"]
tungstenite = { git = "https://github.com/JakkuSakura/tungstenite-rs", rev = "f514de8644821113e5d18a027d6d28a5c8cc0a6e" }

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "ansi-escape",
crate_name = "codex_ansi_escape",
)

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "app-server-protocol",
crate_name = "codex_app_server_protocol",
)

View File

@@ -109,10 +109,6 @@ client_request_definitions! {
params: v2::ThreadResumeParams,
response: v2::ThreadResumeResponse,
},
ThreadFork => "thread/fork" {
params: v2::ThreadForkParams,
response: v2::ThreadForkResponse,
},
ThreadArchive => "thread/archive" {
params: v2::ThreadArchiveParams,
response: v2::ThreadArchiveResponse,
@@ -125,26 +121,10 @@ client_request_definitions! {
params: v2::ThreadListParams,
response: v2::ThreadListResponse,
},
ThreadLoadedList => "thread/loaded/list" {
params: v2::ThreadLoadedListParams,
response: v2::ThreadLoadedListResponse,
},
ThreadRead => "thread/read" {
params: v2::ThreadReadParams,
response: v2::ThreadReadResponse,
},
SkillsList => "skills/list" {
params: v2::SkillsListParams,
response: v2::SkillsListResponse,
},
AppsList => "app/list" {
params: v2::AppsListParams,
response: v2::AppsListResponse,
},
SkillsConfigWrite => "skills/config/write" {
params: v2::SkillsConfigWriteParams,
response: v2::SkillsConfigWriteResponse,
},
TurnStart => "turn/start" {
params: v2::TurnStartParams,
response: v2::TurnStartResponse,
@@ -162,22 +142,12 @@ client_request_definitions! {
params: v2::ModelListParams,
response: v2::ModelListResponse,
},
/// EXPERIMENTAL - list collaboration mode presets.
CollaborationModeList => "collaborationMode/list" {
params: v2::CollaborationModeListParams,
response: v2::CollaborationModeListResponse,
},
McpServerOauthLogin => "mcpServer/oauth/login" {
params: v2::McpServerOauthLoginParams,
response: v2::McpServerOauthLoginResponse,
},
McpServerRefresh => "config/mcpServer/reload" {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::McpServerRefreshResponse,
},
McpServerStatusList => "mcpServerStatus/list" {
params: v2::ListMcpServerStatusParams,
response: v2::ListMcpServerStatusResponse,
@@ -214,12 +184,6 @@ client_request_definitions! {
response: v2::CommandExecResponse,
},
/// Run a single turn to completion without managing a thread lifecycle.
ExecRun => "exec/run" {
params: v2::ExecRunParams,
response: v2::ExecRunResponse,
},
ConfigRead => "config/read" {
params: v2::ConfigReadParams,
response: v2::ConfigReadResponse,
@@ -233,11 +197,6 @@ client_request_definitions! {
response: v2::ConfigWriteResponse,
},
ConfigRequirementsRead => "configRequirements/read" {
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
response: v2::ConfigRequirementsReadResponse,
},
GetAccount => "account/read" {
params: v2::GetAccountParams,
response: v2::GetAccountResponse,
@@ -262,11 +221,6 @@ client_request_definitions! {
params: v1::ResumeConversationParams,
response: v1::ResumeConversationResponse,
},
/// Fork a recorded Codex conversation into a new session.
ForkConversation {
params: v1::ForkConversationParams,
response: v1::ForkConversationResponse,
},
ArchiveConversation {
params: v1::ArchiveConversationParams,
response: v1::ArchiveConversationResponse,
@@ -524,18 +478,6 @@ server_request_definitions! {
response: v2::FileChangeRequestApprovalResponse,
},
/// EXPERIMENTAL - Request input from the user for a tool call.
ToolRequestUserInput => "item/tool/requestUserInput" {
params: v2::ToolRequestUserInputParams,
response: v2::ToolRequestUserInputResponse,
},
/// Execute a dynamic tool call on the client.
DynamicToolCall => "item/tool/call" {
params: v2::DynamicToolCallParams,
response: v2::DynamicToolCallResponse,
},
/// DEPRECATED APIs below
/// Request to approve a patch.
/// This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage).
@@ -602,7 +544,6 @@ server_notification_definitions! {
ReasoningTextDelta => "item/reasoning/textDelta" (v2::ReasoningTextDeltaNotification),
ContextCompacted => "thread/compacted" (v2::ContextCompactedNotification),
DeprecationNotice => "deprecationNotice" (v2::DeprecationNoticeNotification),
ConfigWarning => "configWarning" (v2::ConfigWarningNotification),
/// Notifies the user of world-writable directories on Windows, which cannot be protected by the sandbox.
WindowsWorldWritableWarning => "windows/worldWritableWarning" (v2::WindowsWorldWritableWarningNotification),
@@ -770,22 +711,6 @@ mod tests {
Ok(())
}
#[test]
fn serialize_config_requirements_read() -> Result<()> {
let request = ClientRequest::ConfigRequirementsRead {
request_id: RequestId::Integer(1),
params: None,
};
assert_eq!(
json!({
"method": "configRequirements/read",
"id": 1,
}),
serde_json::to_value(&request)?,
);
Ok(())
}
#[test]
fn serialize_account_login_api_key() -> Result<()> {
let request = ClientRequest::LoginAccount {
@@ -909,21 +834,4 @@ mod tests {
);
Ok(())
}
#[test]
fn serialize_list_collaboration_modes() -> Result<()> {
let request = ClientRequest::CollaborationModeList {
request_id: RequestId::Integer(7),
params: v2::CollaborationModeListParams::default(),
};
assert_eq!(
json!({
"method": "collaborationMode/list",
"id": 7,
"params": {}
}),
serde_json::to_value(&request)?,
);
Ok(())
}
}

View File

@@ -197,12 +197,6 @@ impl ThreadHistoryBuilder {
if !payload.message.trim().is_empty() {
content.push(UserInput::Text {
text: payload.message.clone(),
text_elements: payload
.text_elements
.iter()
.cloned()
.map(Into::into)
.collect(),
});
}
if let Some(images) = &payload.images {
@@ -210,9 +204,6 @@ impl ThreadHistoryBuilder {
content.push(UserInput::Image { url: image.clone() });
}
}
for path in &payload.local_images {
content.push(UserInput::LocalImage { path: path.clone() });
}
content
}
}
@@ -253,8 +244,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "First turn".into(),
images: Some(vec!["https://example.com/one.png".into()]),
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Hi there".into(),
@@ -268,8 +257,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "Second turn".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Reply two".into(),
@@ -290,7 +277,6 @@ mod tests {
content: vec![
UserInput::Text {
text: "First turn".into(),
text_elements: Vec::new(),
},
UserInput::Image {
url: "https://example.com/one.png".into(),
@@ -322,8 +308,7 @@ mod tests {
ThreadItem::UserMessage {
id: "item-4".into(),
content: vec![UserInput::Text {
text: "Second turn".into(),
text_elements: Vec::new(),
text: "Second turn".into()
}],
}
);
@@ -342,8 +327,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "Turn start".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentReasoning(AgentReasoningEvent {
text: "first summary".into(),
@@ -388,8 +371,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "Please do the thing".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Working...".into(),
@@ -400,8 +381,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "Let's try again".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "Second attempt complete.".into(),
@@ -419,8 +398,7 @@ mod tests {
ThreadItem::UserMessage {
id: "item-1".into(),
content: vec![UserInput::Text {
text: "Please do the thing".into(),
text_elements: Vec::new(),
text: "Please do the thing".into()
}],
}
);
@@ -440,8 +418,7 @@ mod tests {
ThreadItem::UserMessage {
id: "item-3".into(),
content: vec![UserInput::Text {
text: "Let's try again".into(),
text_elements: Vec::new(),
text: "Let's try again".into()
}],
}
);
@@ -460,8 +437,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "First".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "A1".into(),
@@ -469,8 +444,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "Second".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "A2".into(),
@@ -479,8 +452,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "Third".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "A3".into(),
@@ -498,7 +469,6 @@ mod tests {
id: "item-1".into(),
content: vec![UserInput::Text {
text: "First".into(),
text_elements: Vec::new(),
}],
},
ThreadItem::AgentMessage {
@@ -516,7 +486,6 @@ mod tests {
id: "item-3".into(),
content: vec![UserInput::Text {
text: "Third".into(),
text_elements: Vec::new(),
}],
},
ThreadItem::AgentMessage {
@@ -535,8 +504,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "One".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "A1".into(),
@@ -544,8 +511,6 @@ mod tests {
EventMsg::UserMessage(UserMessageEvent {
message: "Two".into(),
images: None,
text_elements: Vec::new(),
local_images: Vec::new(),
}),
EventMsg::AgentMessage(AgentMessageEvent {
message: "A2".into(),

View File

@@ -16,8 +16,6 @@ use codex_protocol::protocol::ReviewDecision;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::user_input::ByteRange as CoreByteRange;
use codex_protocol::user_input::TextElement as CoreTextElement;
use codex_utils_absolute_path::AbsolutePathBuf;
use schemars::JsonSchema;
use serde::Deserialize;
@@ -85,15 +83,6 @@ pub struct ResumeConversationResponse {
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ForkConversationResponse {
pub conversation_id: ThreadId,
pub model: String,
pub initial_messages: Option<Vec<EventMsg>>,
pub rollout_path: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(untagged)]
pub enum GetConversationSummaryParams {
@@ -128,7 +117,6 @@ pub struct ConversationSummary {
pub path: PathBuf,
pub preview: String,
pub timestamp: Option<String>,
pub updated_at: Option<String>,
pub model_provider: String,
pub cwd: PathBuf,
pub cli_version: String,
@@ -160,14 +148,6 @@ pub struct ResumeConversationParams {
pub overrides: Option<NewConversationParams>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct ForkConversationParams {
pub path: Option<PathBuf>,
pub conversation_id: Option<ThreadId>,
pub overrides: Option<NewConversationParams>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
pub struct AddConversationSubscriptionResponse {
@@ -447,71 +427,9 @@ pub struct RemoveConversationListenerParams {
#[serde(rename_all = "camelCase")]
#[serde(tag = "type", content = "data")]
pub enum InputItem {
Text {
text: String,
/// UI-defined spans within `text` used to render or persist special elements.
#[serde(default)]
text_elements: Vec<V1TextElement>,
},
Image {
image_url: String,
},
LocalImage {
path: PathBuf,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename = "ByteRange")]
pub struct V1ByteRange {
/// Start byte offset (inclusive) within the UTF-8 text buffer.
pub start: usize,
/// End byte offset (exclusive) within the UTF-8 text buffer.
pub end: usize,
}
impl From<CoreByteRange> for V1ByteRange {
fn from(value: CoreByteRange) -> Self {
Self {
start: value.start,
end: value.end,
}
}
}
impl From<V1ByteRange> for CoreByteRange {
fn from(value: V1ByteRange) -> Self {
Self {
start: value.start,
end: value.end,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(rename = "TextElement")]
pub struct V1TextElement {
/// Byte range in the parent `text` buffer that this element occupies.
pub byte_range: V1ByteRange,
/// Optional human-readable placeholder for the element, displayed in the UI.
pub placeholder: Option<String>,
}
impl From<CoreTextElement> for V1TextElement {
fn from(value: CoreTextElement) -> Self {
Self {
byte_range: value.byte_range.into(),
placeholder: value._placeholder_for_conversion_only().map(str::to_string),
}
}
}
impl From<V1TextElement> for CoreTextElement {
fn from(value: V1TextElement) -> Self {
Self::new(value.byte_range.into(), value.placeholder)
}
Text { text: String },
Image { image_url: String },
LocalImage { path: PathBuf },
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]

View File

@@ -4,14 +4,10 @@ use std::path::PathBuf;
use crate::protocol::common::AuthMode;
use codex_protocol::account::PlanType;
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode as CoreSandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::config_types::WebSearchMode;
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
use codex_protocol::items::TurnItem as CoreTurnItem;
use codex_protocol::models::ResponseItem;
@@ -19,7 +15,6 @@ use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::parse_command::ParsedCommand as CoreParsedCommand;
use codex_protocol::plan_tool::PlanItemArg as CorePlanItemArg;
use codex_protocol::plan_tool::StepStatus as CorePlanStepStatus;
use codex_protocol::protocol::AgentStatus as CoreAgentStatus;
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo;
use codex_protocol::protocol::CreditsSnapshot as CoreCreditsSnapshot;
@@ -28,14 +23,10 @@ use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow as CoreRateLimitWindow;
use codex_protocol::protocol::SessionSource as CoreSessionSource;
use codex_protocol::protocol::SkillErrorInfo as CoreSkillErrorInfo;
use codex_protocol::protocol::SkillInterface as CoreSkillInterface;
use codex_protocol::protocol::SkillMetadata as CoreSkillMetadata;
use codex_protocol::protocol::SkillScope as CoreSkillScope;
use codex_protocol::protocol::SubAgentSource as CoreSubAgentSource;
use codex_protocol::protocol::TokenUsage as CoreTokenUsage;
use codex_protocol::protocol::TokenUsageInfo as CoreTokenUsageInfo;
use codex_protocol::user_input::ByteRange as CoreByteRange;
use codex_protocol::user_input::TextElement as CoreTextElement;
use codex_protocol::user_input::UserInput as CoreUserInput;
use codex_utils_absolute_path::AbsolutePathBuf;
use mcp_types::ContentBlock as McpContentBlock;
@@ -220,6 +211,7 @@ v2_enum_from_core!(
}
);
// TODO(mbolin): Support in-repo layer.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
@@ -325,15 +317,6 @@ pub struct ToolsV2 {
pub view_image: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolSpec {
pub name: String,
pub description: String,
pub input_schema: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
@@ -344,7 +327,6 @@ pub struct ProfileV2 {
pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
pub web_search: Option<WebSearchMode>,
pub chatgpt_base_url: Option<String>,
#[serde(default, flatten)]
pub additional: HashMap<String, JsonValue>,
@@ -373,7 +355,6 @@ pub struct Config {
pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
pub forced_chatgpt_workspace_id: Option<String>,
pub forced_login_method: Option<ForcedLoginMethod>,
pub web_search: Option<WebSearchMode>,
pub tools: Option<ToolsV2>,
pub profile: Option<String>,
#[serde(default)]
@@ -404,8 +385,6 @@ pub struct ConfigLayer {
pub name: ConfigLayerSource,
pub version: String,
pub config: JsonValue,
#[serde(skip_serializing_if = "Option::is_none")]
pub disabled_reason: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
@@ -462,10 +441,6 @@ pub enum ConfigWriteErrorCode {
pub struct ConfigReadParams {
#[serde(default)]
pub include_layers: bool,
/// Optional working directory to resolve project config layers. If specified,
/// return the effective config as seen from that directory (i.e., including any
/// project layers between `cwd` and the project/repo root).
pub cwd: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -478,22 +453,6 @@ pub struct ConfigReadResponse {
pub layers: Option<Vec<ConfigLayer>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigRequirements {
pub allowed_approval_policies: Option<Vec<AskForApproval>>,
pub allowed_sandbox_modes: Option<Vec<SandboxMode>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigRequirementsReadResponse {
/// Null if no requirements are configured (e.g. no requirements.toml/MDM entries).
pub requirements: Option<ConfigRequirements>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -711,7 +670,6 @@ pub enum SessionSource {
VsCode,
Exec,
AppServer,
SubAgent(CoreSubAgentSource),
#[serde(other)]
Unknown,
}
@@ -723,7 +681,7 @@ impl From<CoreSessionSource> for SessionSource {
CoreSessionSource::VSCode => SessionSource::VsCode,
CoreSessionSource::Exec => SessionSource::Exec,
CoreSessionSource::Mcp => SessionSource::AppServer,
CoreSessionSource::SubAgent(sub) => SessionSource::SubAgent(sub),
CoreSessionSource::SubAgent(_) => SessionSource::Unknown,
CoreSessionSource::Unknown => SessionSource::Unknown,
}
}
@@ -736,7 +694,6 @@ impl From<SessionSource> for CoreSessionSource {
SessionSource::VsCode => CoreSessionSource::VSCode,
SessionSource::Exec => CoreSessionSource::Exec,
SessionSource::AppServer => CoreSessionSource::Mcp,
SessionSource::SubAgent(sub) => CoreSessionSource::SubAgent(sub),
SessionSource::Unknown => CoreSessionSource::Unknown,
}
}
@@ -914,8 +871,6 @@ pub struct Model {
pub description: String,
pub supported_reasoning_efforts: Vec<ReasoningEffortOption>,
pub default_reasoning_effort: ReasoningEffort,
#[serde(default)]
pub supports_personality: bool,
// Only one model should be marked as default.
pub is_default: bool,
}
@@ -938,20 +893,6 @@ pub struct ModelListResponse {
pub next_cursor: Option<String>,
}
/// EXPERIMENTAL - list collaboration mode presets.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CollaborationModeListParams {}
/// EXPERIMENTAL - collaboration mode presets response.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CollaborationModeListResponse {
pub data: Vec<CollaborationModeMask>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -983,49 +924,6 @@ pub struct ListMcpServerStatusResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppsListParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppInfo {
pub id: String,
pub name: String,
pub description: Option<String>,
pub logo_url: Option<String>,
pub install_url: Option<String>,
#[serde(default)]
pub is_accessible: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct AppsListResponse {
pub data: Vec<AppInfo>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// If None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpServerRefreshParams {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct McpServerRefreshResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1083,35 +981,6 @@ pub struct CommandExecResponse {
pub stderr: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ExecRunParams {
pub input: Vec<UserInput>,
pub model: Option<String>,
pub model_provider: Option<String>,
pub cwd: Option<String>,
pub effort: Option<ReasoningEffort>,
pub summary: Option<ReasoningSummary>,
pub collaboration_mode: Option<CollaborationMode>,
pub personality: Option<Personality>,
pub config: Option<HashMap<String, JsonValue>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub output_schema: Option<JsonValue>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ExecRunResponse {
pub thread_id: String,
pub turn_id: String,
pub status: TurnStatus,
pub last_agent_message: Option<String>,
pub error: Option<TurnError>,
}
// === Threads, Turns, and Items ===
// Thread APIs
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
@@ -1126,9 +995,6 @@ pub struct ThreadStartParams {
pub config: Option<HashMap<String, JsonValue>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
pub ephemeral: Option<bool>,
pub dynamic_tools: Option<Vec<DynamicToolSpec>>,
/// If true, opt into emitting raw response items on the event stream.
///
/// This is for internal use only (e.g. Codex Cloud).
@@ -1183,7 +1049,6 @@ pub struct ThreadResumeParams {
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1199,47 +1064,6 @@ pub struct ThreadResumeResponse {
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// There are two ways to fork a thread:
/// 1. By thread_id: load the thread from disk by thread_id and fork it into a new thread.
/// 2. By path: load the thread from disk by path and fork it into a new thread.
///
/// If using path, the thread_id param will be ignored.
///
/// Prefer using thread_id whenever possible.
pub struct ThreadForkParams {
pub thread_id: String,
/// [UNSTABLE] Specify the rollout path to fork from.
/// If specified, the thread_id param will be ignored.
pub path: Option<PathBuf>,
/// Configuration overrides for the forked thread, if any.
pub model: Option<String>,
pub model_provider: Option<String>,
pub cwd: Option<String>,
pub approval_policy: Option<AskForApproval>,
pub sandbox: Option<SandboxMode>,
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadForkResponse {
pub thread: Thread,
pub model: String,
pub model_provider: String,
pub cwd: PathBuf,
pub approval_policy: AskForApproval,
pub sandbox: SandboxPolicy,
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1284,22 +1108,9 @@ pub struct ThreadListParams {
pub cursor: Option<String>,
/// Optional page size; defaults to a reasonable server-side value.
pub limit: Option<u32>,
/// Optional sort key; defaults to created_at.
pub sort_key: Option<ThreadSortKey>,
/// Optional provider filter; when set, only sessions recorded under these
/// providers are returned. When present but empty, includes all providers.
pub model_providers: Option<Vec<String>>,
/// Optional archived filter; when set to true, only archived threads are returned.
/// If false or null, only non-archived threads are returned.
pub archived: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "snake_case")]
#[ts(export_to = "v2/")]
pub enum ThreadSortKey {
CreatedAt,
UpdatedAt,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1312,44 +1123,6 @@ pub struct ThreadListResponse {
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadLoadedListParams {
/// Opaque pagination cursor returned by a previous call.
pub cursor: Option<String>,
/// Optional page size; defaults to no limit.
pub limit: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadLoadedListResponse {
/// Thread ids for sessions currently loaded in memory.
pub data: Vec<String>,
/// Opaque cursor to pass to the next call to continue after the last item.
/// if None, there are no more items to return.
pub next_cursor: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadReadParams {
pub thread_id: String,
/// When true, include turns and their items from rollout history.
#[serde(default)]
pub include_turns: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ThreadReadResponse {
pub thread: Thread,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1387,34 +1160,11 @@ pub enum SkillScope {
pub struct SkillMetadata {
pub name: String,
pub description: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
/// Legacy short_description from SKILL.md. Prefer SKILL.toml interface.short_description.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub short_description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub interface: Option<SkillInterface>,
pub path: PathBuf,
pub scope: SkillScope,
pub enabled: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillInterface {
#[ts(optional)]
pub display_name: Option<String>,
#[ts(optional)]
pub short_description: Option<String>,
#[ts(optional)]
pub icon_small: Option<PathBuf>,
#[ts(optional)]
pub icon_large: Option<PathBuf>,
#[ts(optional)]
pub brand_color: Option<String>,
#[ts(optional)]
pub default_prompt: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1434,44 +1184,14 @@ pub struct SkillsListEntry {
pub errors: Vec<SkillErrorInfo>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsConfigWriteParams {
pub path: PathBuf,
pub enabled: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct SkillsConfigWriteResponse {
pub effective_enabled: bool,
}
impl From<CoreSkillMetadata> for SkillMetadata {
fn from(value: CoreSkillMetadata) -> Self {
Self {
name: value.name,
description: value.description,
short_description: value.short_description,
interface: value.interface.map(SkillInterface::from),
path: value.path,
scope: value.scope.into(),
enabled: true,
}
}
}
impl From<CoreSkillInterface> for SkillInterface {
fn from(value: CoreSkillInterface) -> Self {
Self {
display_name: value.display_name,
short_description: value.short_description,
brand_color: value.brand_color,
default_prompt: value.default_prompt,
icon_small: value.icon_small,
icon_large: value.icon_large,
}
}
}
@@ -1508,11 +1228,8 @@ pub struct Thread {
/// Unix timestamp (in seconds) when the thread was created.
#[ts(type = "number")]
pub created_at: i64,
/// Unix timestamp (in seconds) when the thread was last updated.
#[ts(type = "number")]
pub updated_at: i64,
/// [UNSTABLE] Path to the thread on disk.
pub path: Option<PathBuf>,
pub path: PathBuf,
/// Working directory captured for the thread.
pub cwd: PathBuf,
/// Version of the CLI that created the thread.
@@ -1521,8 +1238,7 @@ pub struct Thread {
pub source: SessionSource,
/// Optional Git metadata captured when the thread was created.
pub git_info: Option<GitInfo>,
/// Only populated on `thread/resume`, `thread/rollback`, `thread/fork`, and `thread/read`
/// (when `includeTurns` is true) responses.
/// Only populated on `thread/resume` and `thread/rollback` responses.
/// For all other responses and notifications returning a Thread,
/// the turns field will be an empty list.
pub turns: Vec<Turn>,
@@ -1598,7 +1314,7 @@ impl From<CoreTokenUsage> for TokenUsageBreakdown {
#[ts(export_to = "v2/")]
pub struct Turn {
pub id: String,
/// Only populated on a `thread/resume` or `thread/fork` response.
/// Only populated on a `thread/resume` response.
/// For all other responses and notifications returning a Turn,
/// the items field will be an empty list.
pub items: Vec<ThreadItem>,
@@ -1659,14 +1375,8 @@ pub struct TurnStartParams {
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
/// Override the personality for this turn and subsequent turns.
pub personality: Option<Personality>,
/// Optional JSON Schema used to constrain the final assistant message for this turn.
pub output_schema: Option<JsonValue>,
/// EXPERIMENTAL - set a pre-set collaboration mode.
/// Takes precedence over model, reasoning_effort, and developer instructions if set.
pub collaboration_mode: Option<CollaborationMode>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1742,110 +1452,22 @@ pub struct TurnInterruptParams {
pub struct TurnInterruptResponse {}
// User input types
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ByteRange {
pub start: usize,
pub end: usize,
}
impl From<CoreByteRange> for ByteRange {
fn from(value: CoreByteRange) -> Self {
Self {
start: value.start,
end: value.end,
}
}
}
impl From<ByteRange> for CoreByteRange {
fn from(value: ByteRange) -> Self {
Self {
start: value.start,
end: value.end,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TextElement {
/// Byte range in the parent `text` buffer that this element occupies.
pub byte_range: ByteRange,
/// Optional human-readable placeholder for the element, displayed in the UI.
placeholder: Option<String>,
}
impl TextElement {
pub fn new(byte_range: ByteRange, placeholder: Option<String>) -> Self {
Self {
byte_range,
placeholder,
}
}
pub fn set_placeholder(&mut self, placeholder: Option<String>) {
self.placeholder = placeholder;
}
pub fn placeholder(&self) -> Option<&str> {
self.placeholder.as_deref()
}
}
impl From<CoreTextElement> for TextElement {
fn from(value: CoreTextElement) -> Self {
Self::new(
value.byte_range.into(),
value._placeholder_for_conversion_only().map(str::to_string),
)
}
}
impl From<TextElement> for CoreTextElement {
fn from(value: TextElement) -> Self {
Self::new(value.byte_range.into(), value.placeholder)
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(tag = "type", rename_all = "camelCase")]
#[ts(tag = "type")]
#[ts(export_to = "v2/")]
pub enum UserInput {
Text {
text: String,
/// UI-defined spans within `text` used to render or persist special elements.
#[serde(default)]
text_elements: Vec<TextElement>,
},
Image {
url: String,
},
LocalImage {
path: PathBuf,
},
Skill {
name: String,
path: PathBuf,
},
Text { text: String },
Image { url: String },
LocalImage { path: PathBuf },
}
impl UserInput {
pub fn into_core(self) -> CoreUserInput {
match self {
UserInput::Text {
text,
text_elements,
} => CoreUserInput::Text {
text,
text_elements: text_elements.into_iter().map(Into::into).collect(),
},
UserInput::Text { text } => CoreUserInput::Text { text },
UserInput::Image { url } => CoreUserInput::Image { image_url: url },
UserInput::LocalImage { path } => CoreUserInput::LocalImage { path },
UserInput::Skill { name, path } => CoreUserInput::Skill { name, path },
}
}
}
@@ -1853,16 +1475,9 @@ impl UserInput {
impl From<CoreUserInput> for UserInput {
fn from(value: CoreUserInput) -> Self {
match value {
CoreUserInput::Text {
text,
text_elements,
} => UserInput::Text {
text,
text_elements: text_elements.into_iter().map(Into::into).collect(),
},
CoreUserInput::Text { text } => UserInput::Text { text },
CoreUserInput::Image { image_url } => UserInput::Image { url: image_url },
CoreUserInput::LocalImage { path } => UserInput::LocalImage { path },
CoreUserInput::Skill { name, path } => UserInput::Skill { name, path },
_ => unreachable!("unsupported user input variant"),
}
}
@@ -1934,25 +1549,6 @@ pub enum ThreadItem {
},
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
CollabAgentToolCall {
/// Unique identifier for this collab tool call.
id: String,
/// Name of the collab tool that was invoked.
tool: CollabAgentTool,
/// Current status of the collab tool call.
status: CollabAgentToolCallStatus,
/// Thread ID of the agent issuing the collab request.
sender_thread_id: String,
/// Thread ID of the receiving agent, when applicable. In case of spawn operation,
/// this corresponds to the newly spawned agent.
receiver_thread_ids: Vec<String>,
/// Prompt text sent as part of the collab tool call, when available.
prompt: Option<String>,
/// Last known status of the target agents, when available.
agents_states: HashMap<String, CollabAgentState>,
},
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
WebSearch { id: String, query: String },
#[serde(rename_all = "camelCase")]
#[ts(rename_all = "camelCase")]
@@ -2005,16 +1601,6 @@ pub enum CommandExecutionStatus {
Declined,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum CollabAgentTool {
SpawnAgent,
SendInput,
Wait,
CloseAgent,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2053,66 +1639,6 @@ pub enum McpToolCallStatus {
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum CollabAgentToolCallStatus {
InProgress,
Completed,
Failed,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub enum CollabAgentStatus {
PendingInit,
Running,
Completed,
Errored,
Shutdown,
NotFound,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct CollabAgentState {
pub status: CollabAgentStatus,
pub message: Option<String>,
}
impl From<CoreAgentStatus> for CollabAgentState {
fn from(value: CoreAgentStatus) -> Self {
match value {
CoreAgentStatus::PendingInit => Self {
status: CollabAgentStatus::PendingInit,
message: None,
},
CoreAgentStatus::Running => Self {
status: CollabAgentStatus::Running,
message: None,
},
CoreAgentStatus::Completed(message) => Self {
status: CollabAgentStatus::Completed,
message,
},
CoreAgentStatus::Errored(message) => Self {
status: CollabAgentStatus::Errored,
message: Some(message),
},
CoreAgentStatus::Shutdown => Self {
status: CollabAgentStatus::Shutdown,
message: None,
},
CoreAgentStatus::NotFound => Self {
status: CollabAgentStatus::NotFound,
message: None,
},
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2370,18 +1896,6 @@ pub struct CommandExecutionRequestApprovalParams {
pub item_id: String,
/// Optional explanatory reason (e.g. request for network access).
pub reason: Option<String>,
/// The command to be executed.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command: Option<String>,
/// The command's working directory.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cwd: Option<PathBuf>,
/// Best-effort parsed command actions for friendly display.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub command_actions: Option<Vec<CommandAction>>,
/// Optional proposed execpolicy amendment to allow similar commands without prompting.
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
}
@@ -2413,72 +1927,6 @@ pub struct FileChangeRequestApprovalResponse {
pub decision: FileChangeApprovalDecision,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolCallParams {
pub thread_id: String,
pub turn_id: String,
pub call_id: String,
pub tool: String,
pub arguments: JsonValue,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct DynamicToolCallResponse {
pub output: String,
pub success: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Defines a single selectable option for request_user_input.
pub struct ToolRequestUserInputOption {
pub label: String,
pub description: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Represents one request_user_input question and its optional options.
pub struct ToolRequestUserInputQuestion {
pub id: String,
pub header: String,
pub question: String,
pub options: Option<Vec<ToolRequestUserInputOption>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Params sent with a request_user_input event.
pub struct ToolRequestUserInputParams {
pub thread_id: String,
pub turn_id: String,
pub item_id: String,
pub questions: Vec<ToolRequestUserInputQuestion>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Captures a user's answer to a request_user_input question.
pub struct ToolRequestUserInputAnswer {
pub answers: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
/// EXPERIMENTAL. Response payload mapping question ids to answers.
pub struct ToolRequestUserInputResponse {
pub answers: HashMap<String, ToolRequestUserInputAnswer>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -2568,42 +2016,6 @@ pub struct DeprecationNoticeNotification {
pub details: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TextPosition {
/// 1-based line number.
pub line: usize,
/// 1-based column number (in Unicode scalar values).
pub column: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct TextRange {
pub start: TextPosition,
pub end: TextPosition,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ConfigWarningNotification {
/// Concise summary of the warning.
pub summary: String,
/// Optional extra guidance or error details.
pub details: Option<String>,
/// Optional path to the config file that triggered the warning.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub path: Option<String>,
/// Optional range for the error location inside the config file.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub range: Option<TextRange>,
}
#[cfg(test)]
mod tests {
use super::*;
@@ -2644,7 +2056,6 @@ mod tests {
content: vec![
CoreUserInput::Text {
text: "hello".to_string(),
text_elements: Vec::new(),
},
CoreUserInput::Image {
image_url: "https://example.com/image.png".to_string(),
@@ -2652,10 +2063,6 @@ mod tests {
CoreUserInput::LocalImage {
path: PathBuf::from("local/image.png"),
},
CoreUserInput::Skill {
name: "skill-creator".to_string(),
path: PathBuf::from("/repo/.codex/skills/skill-creator/SKILL.md"),
},
],
});
@@ -2666,7 +2073,6 @@ mod tests {
content: vec![
UserInput::Text {
text: "hello".to_string(),
text_elements: Vec::new(),
},
UserInput::Image {
url: "https://example.com/image.png".to_string(),
@@ -2674,10 +2080,6 @@ mod tests {
UserInput::LocalImage {
path: PathBuf::from("local/image.png"),
},
UserInput::Skill {
name: "skill-creator".to_string(),
path: PathBuf::from("/repo/.codex/skills/skill-creator/SKILL.md"),
},
],
}
);

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "codex-app-server-test-client",
crate_name = "codex_app_server_test_client",
)

View File

@@ -256,11 +256,7 @@ fn send_message_v2_with_policies(
println!("< thread/start response: {thread_response:?}");
let mut turn_params = TurnStartParams {
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: user_message,
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
input: vec![V2UserInput::Text { text: user_message }],
..Default::default()
};
turn_params.approval_policy = approval_policy;
@@ -292,8 +288,6 @@ fn send_follow_up_v2(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: first_message,
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
};
@@ -305,8 +299,6 @@ fn send_follow_up_v2(
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: follow_up_message,
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
..Default::default()
};
@@ -479,8 +471,6 @@ impl CodexClient {
conversation_id: *conversation_id,
items: vec![InputItem::Text {
text: message.to_string(),
// Test client sends plain text without UI element ranges.
text_elements: Vec::new(),
}],
},
};
@@ -555,7 +545,7 @@ impl CodexClient {
print!("{}", event.delta);
std::io::stdout().flush().ok();
}
EventMsg::TurnComplete(event) => {
EventMsg::TaskComplete(event) => {
println!("\n[task complete: {event:?}]");
break;
}
@@ -842,9 +832,6 @@ impl CodexClient {
turn_id,
item_id,
reason,
command,
cwd,
command_actions,
proposed_execpolicy_amendment,
} = params;
@@ -854,17 +841,6 @@ impl CodexClient {
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(command) = command.as_deref() {
println!("< command: {command}");
}
if let Some(cwd) = cwd.as_ref() {
println!("< cwd: {}", cwd.display());
}
if let Some(command_actions) = command_actions.as_ref()
&& !command_actions.is_empty()
{
println!("< command actions: {command_actions:?}");
}
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
}

View File

@@ -1,8 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "app-server",
crate_name = "codex_app_server",
integration_deps_extra = ["//codex-rs/app-server/tests/common:common"],
test_tags = ["no-sandbox"],
)

View File

@@ -22,7 +22,6 @@ codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-backend-client = { workspace = true }
codex-file-search = { workspace = true }
codex-chatgpt = { workspace = true }
codex-login = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }
@@ -35,7 +34,6 @@ serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
mcp-types = { workspace = true }
tempfile = { workspace = true }
time = { workspace = true }
toml = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
@@ -50,20 +48,11 @@ uuid = { workspace = true, features = ["serde", "v7"] }
[dev-dependencies]
app_test_support = { workspace = true }
axum = { workspace = true, default-features = false, features = [
"http1",
"json",
"tokio",
] }
base64 = { workspace = true }
core_test_support = { workspace = true }
mcp-types = { workspace = true }
os_info = { workspace = true }
pretty_assertions = { workspace = true }
rmcp = { workspace = true, default-features = false, features = [
"server",
"transport-streamable-http-server",
] }
serial_test = { workspace = true }
wiremock = { workspace = true }
shlex = { workspace = true }

View File

@@ -41,9 +41,8 @@ Use the thread APIs to create, list, or archive conversations. Drive a conversat
## Lifecycle Overview
- Initialize once: Immediately after launching the codex app-server process, send an `initialize` request with your client metadata, then emit an `initialized` notification. Any other request before this handshake gets rejected.
- Start (or resume) a thread: Call `thread/start` to open a fresh conversation. The response returns the thread object and youll also get a `thread/started` notification. If youre continuing an existing conversation, call `thread/resume` with its ID instead. If you want to branch from an existing conversation, call `thread/fork` to create a new thread id with copied history.
- Start (or resume) a thread: Call `thread/start` to open a fresh conversation. The response returns the thread object and youll also get a `thread/started` notification. If youre continuing an existing conversation, call `thread/resume` with its ID instead.
- Begin a turn: To send user input, call `turn/start` with the target `threadId` and the user's input. Optional fields let you override model, cwd, sandbox policy, etc. This immediately returns the new turn object and triggers a `turn/started` notification.
- Or run once: Call `exec/run` to execute a single turn to completion and receive the final status/message in the response without subscribing to the event stream.
- Stream events: After `turn/start`, keep reading JSON-RPC notifications on stdout. Youll see `item/started`, `item/completed`, deltas like `item/agentMessage/delta`, tool progress, etc. These represent streaming model output plus any side effects (commands, tool calls, reasoning notes).
- Finish the turn: When the model is done (or the turn is interrupted via making the `turn/interrupt` call), the server sends `turn/completed` with the final turn state and token usage.
@@ -53,10 +52,6 @@ Clients must send a single `initialize` request before invoking any other method
Applications building on top of `codex app-server` should identify themselves via the `clientInfo` parameter.
**Important**: `clientInfo.name` is used to identify the client for the OpenAI Compliance Logs Platform. If
you are developing a new Codex integration that is intended for enterprise use, please contact us to get it
added to a known clients list. For more context: https://chatgpt.com/admin/api-reference#tag/Logs:-Codex
Example (from OpenAI's official VSCode extension):
```json
@@ -65,7 +60,7 @@ Example (from OpenAI's official VSCode extension):
"id": 0,
"params": {
"clientInfo": {
"name": "codex_vscode",
"name": "codex-vscode",
"title": "Codex VS Code Extension",
"version": "0.1.0"
}
@@ -77,32 +72,22 @@ Example (from OpenAI's official VSCode extension):
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; emits `thread/started` and auto-subscribes you to turn/item events for the new thread.
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders` filtering.
- `thread/loaded/list` — list the thread ids currently loaded in memory.
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`.
- `thread/archive` — move a threads rollout file into the archived directory; returns `{}` on success.
- `thread/rollback` — drop the last N turns from the agents in-memory context and persist a rollback marker in the rollout so future resumes see the pruned history; returns the updated `thread` (with `turns` populated) on success.
- `turn/start` — add user input to a thread and begin Codex generation; responds with the initial `turn` object and streams `turn/started`, `item/*`, and `turn/completed` notifications.
- `turn/interrupt` — request cancellation of an in-flight turn by `(thread_id, turn_id)`; success is an empty `{}` response and the turn finishes with `status: "interrupted"`.
- `review/start` — kick off Codexs automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
- `exec/run` — run a single turn to completion and return the final status/message inline without streaming notifications.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `model/list` — list available models (with reasoning effort options).
- `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination).
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).
- `app/list` — list available apps.
- `skills/config/write` — write user-level skill config by path.
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
- `tool/requestUserInput` — prompt the user with 13 short questions for a tool call and return their answers (experimental).
- `config/mcpServer/reload` — reload MCP server config from disk and queue a refresh for loaded threads (applied on each thread's next active turn); returns `{}`. Use this after editing `config.toml` without restarting the server.
- `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `config/read` — fetch the effective config on disk after resolving config layering.
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
- `configRequirements/read` — fetch the loaded requirements allow-lists from `requirements.toml` and/or MDM (or `null` if none are configured).
### Example: Start or resume a thread
@@ -116,20 +101,6 @@ Start a fresh thread when you need a new Codex conversation.
"cwd": "/Users/me/project",
"approvalPolicy": "never",
"sandbox": "workspaceWrite",
"personality": "friendly",
"dynamicTools": [
{
"name": "lookup_ticket",
"description": "Fetch a ticket by id",
"inputSchema": {
"type": "object",
"properties": {
"id": { "type": "string" }
},
"required": ["id"]
}
}
],
} }
{ "id": 10, "result": {
"thread": {
@@ -142,33 +113,20 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/started", "params": { "thread": { } } }
```
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted:
```json
{ "method": "thread/resume", "id": 11, "params": {
"threadId": "thr_123",
"personality": "friendly"
} }
{ "method": "thread/resume", "id": 11, "params": { "threadId": "thr_123" } }
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
To branch from a stored session, call `thread/fork` with the `thread.id`. This creates a new thread id and emits a `thread/started` notification for it:
```json
{ "method": "thread/fork", "id": 12, "params": { "threadId": "thr_123" } }
{ "id": 12, "result": { "thread": { "id": "thr_456", } } }
{ "method": "thread/started", "params": { "thread": { } } }
```
### Example: List threads (with pagination & filters)
`thread/list` lets you render a history UI. Results default to `createdAt` (newest first) descending. Pass any combination of:
`thread/list` lets you render a history UI. Pass any combination of:
- `cursor` — opaque string from a prior response; omit for the first page.
- `limit` — server defaults to a reasonable page size if unset.
- `sortKey``created_at` (default) or `updated_at`.
- `modelProviders` — restrict results to specific providers; unset, null, or an empty array will include all providers.
- `archived` — when `true`, list archived threads only. When `false` or `null`, list non-archived threads (default).
Example:
@@ -176,12 +134,11 @@ Example:
{ "method": "thread/list", "id": 20, "params": {
"cursor": null,
"limit": 25,
"sortKey": "created_at"
} }
{ "id": 20, "result": {
"data": [
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111, "updatedAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000, "updatedAt": 1730750000 }
{ "id": "thr_a", "preview": "Create a TUI", "modelProvider": "openai", "createdAt": 1730831111 },
{ "id": "thr_b", "preview": "Fix tests", "modelProvider": "openai", "createdAt": 1730750000 }
],
"nextCursor": "opaque-token-or-null"
} }
@@ -189,31 +146,6 @@ Example:
When `nextCursor` is `null`, youve reached the final page.
### Example: List loaded threads
`thread/loaded/list` returns thread ids currently loaded in memory. This is useful when you want to check which sessions are active without scanning rollouts on disk.
```json
{ "method": "thread/loaded/list", "id": 21 }
{ "id": 21, "result": {
"data": ["thr_123", "thr_456"]
} }
```
### Example: Read a thread
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want the rollout history loaded into `thread.turns`.
```json
{ "method": "thread/read", "id": 22, "params": { "threadId": "thr_123" } }
{ "id": 22, "result": { "thread": { "id": "thr_123", "turns": [] } } }
```
```json
{ "method": "thread/read", "id": 23, "params": { "threadId": "thr_123", "includeTurns": true } }
{ "id": 23, "result": { "thread": { "id": "thr_123", "turns": [ ... ] } } }
```
### Example: Archive a thread
Use `thread/archive` to move the persisted rollout (stored as a JSONL file on disk) into the archived sessions directory.
@@ -223,7 +155,7 @@ Use `thread/archive` to move the persisted rollout (stored as a JSONL file on di
{ "id": 21, "result": {} }
```
An archived thread will not appear in `thread/list` unless `archived` is set to `true`.
An archived thread will not appear in future calls to `thread/list`.
### Example: Start a turn (send user input)
@@ -250,7 +182,6 @@ You can optionally specify config overrides on the new turn. If specified, these
"model": "gpt-5.1-codex",
"effort": "medium",
"summary": "concise",
"personality": "friendly",
// Optional JSON Schema to constrain the final assistant message for this turn.
"outputSchema": {
"type": "object",
@@ -269,14 +200,13 @@ You can optionally specify config overrides on the new turn. If specified, these
### Example: Start a turn (invoke a skill)
Invoke a skill explicitly by including `$<skill-name>` in the text input and adding a `skill` input item alongside it.
Invoke a skill by sending a text input that begins with `$<skill-name>`.
```json
{ "method": "turn/start", "id": 33, "params": {
"threadId": "thr_123",
"input": [
{ "type": "text", "text": "$skill-creator Add a new skill for triaging flaky CI and include step-by-step usage." },
{ "type": "skill", "name": "skill-creator", "path": "/Users/me/.codex/skills/skill-creator/SKILL.md" }
{ "type": "text", "text": "$skill-creator Add a new skill for triaging flaky CI and include step-by-step usage." }
]
} }
{ "id": 33, "result": { "turn": {
@@ -370,34 +300,6 @@ containing an `exitedReviewMode` item with the final review text:
The `review` string is plain text that already bundles the overall explanation plus a bullet list for each structured finding (matching `ThreadItem::ExitedReviewMode` in the generated schema). Use this notification to render the reviewer output in your client.
### Example: Exec run to completion
Run a single turn to completion without subscribing to streamed turn/item events:
```json
{ "method": "exec/run", "id": 31, "params": {
"input": [{
"type": "text",
"text": "Generate a concise thread title",
"textElements": []
}]
} }
{ "id": 31, "result": {
"threadId": "thread-123",
"turnId": "turn-456",
"status": "completed",
"lastAgentMessage": "Fix title generation for app-server",
"error": null
} }
```
Notes:
- `exec/run` is best for one-off utilities (for example, generating a title) where you just need the final result.
- `exec/run` always runs ephemerally, so it does not create a rollout and will not appear in `thread/list`.
- `exec/run` forces `approvalPolicy: "never"` and a read-only sandbox.
- Use `turn/start` when you want streaming events, intermediate items, or a long-lived thread.
### Example: One-off command execution
Run a standalone command (argv vector) in the servers sandbox without creating a thread or turn:
@@ -445,7 +347,6 @@ Today both notifications carry an empty `items` array even when item events were
- `commandExecution``{id, command, cwd, status, commandActions, aggregatedOutput?, exitCode?, durationMs?}` for sandboxed commands; `status` is `inProgress`, `completed`, `failed`, or `declined`.
- `fileChange``{id, changes, status}` describing proposed edits; `changes` list `{path, kind, diff}` and `status` is `inProgress`, `completed`, `failed`, or `declined`.
- `mcpToolCall``{id, server, tool, status, arguments, result?, error?}` describing MCP calls; `status` is `inProgress`, `completed`, or `failed`.
- `collabToolCall``{id, tool, status, senderThreadId, receiverThreadId?, newThreadId?, prompt?, agentStatus?}` describing collab tool calls (`spawn_agent`, `send_input`, `wait`, `close_agent`); `status` is `inProgress`, `completed`, or `failed`.
- `webSearch``{id, query}` for a web search request issued by the agent.
- `imageView``{id, path}` emitted when the agent invokes the image viewer tool.
- `enteredReviewMode``{id, review}` sent when the reviewer starts; `review` is a short user-facing label such as `"current changes"` or the requested target description.
@@ -510,7 +411,7 @@ Certain actions (shell commands or modifying files) may require explicit user ap
Order of messages:
1. `item/started` — shows the pending `commandExecution` item with `command`, `cwd`, and other fields so you can render the proposed action.
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason`, plus `command`, `cwd`, and `commandActions` for friendly display.
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `reason` or `risk`, plus `parsedCmd` for friendly display.
3. Client response — `{ "decision": "accept", "acceptSettings": { "forSession": false } }` or `{ "decision": "decline" }`.
4. `item/completed` — final `commandExecution` item with `status: "completed" | "failed" | "declined"` and execution output. Render this as the authoritative result.
@@ -527,30 +428,7 @@ UI guidance for IDEs: surface an approval dialog as soon as the request arrives.
## Skills
Invoke a skill by including `$<skill-name>` in the text input. Add a `skill` input item (recommended) so the backend injects full skill instructions instead of relying on the model to resolve the name.
```json
{
"method": "turn/start",
"id": 101,
"params": {
"threadId": "thread-1",
"input": [
{
"type": "text",
"text": "$skill-creator Add a new skill for triaging flaky CI."
},
{
"type": "skill",
"name": "skill-creator",
"path": "/Users/me/.codex/skills/skill-creator/SKILL.md"
}
]
}
}
```
If you omit the `skill` item, the model will still parse the `$<skill-name>` marker and try to locate the skill, which can add latency.
Skills are invoked by sending a text input that starts with `$<skill-name>`. The rest of the text is passed to the skill as its input.
Example:
@@ -558,49 +436,20 @@ Example:
$skill-creator Add a new skill for triaging flaky CI and include step-by-step usage.
```
Use `skills/list` to fetch the available skills (optionally scoped by `cwds`, with `forceReload`).
Use `skills/list` to fetch the available skills (optionally scoped by `cwd` and/or with `forceReload`).
```json
{ "method": "skills/list", "id": 25, "params": {
"cwds": ["/Users/me/project"],
"cwd": "/Users/me/project",
"forceReload": false
} }
{ "id": 25, "result": {
"data": [{
"cwd": "/Users/me/project",
"skills": [
{
"name": "skill-creator",
"description": "Create or update a Codex skill",
"enabled": true,
"interface": {
"displayName": "Skill Creator",
"shortDescription": "Create or update a Codex skill",
"iconSmall": "icon.svg",
"iconLarge": "icon-large.svg",
"brandColor": "#111111",
"defaultPrompt": "Add a new skill for triaging flaky CI."
}
}
],
"errors": []
}]
"skills": [
{ "name": "skill-creator", "description": "Create or update a Codex skill" }
]
} }
```
To enable or disable a skill by path:
```json
{
"method": "skills/config/write",
"id": 26,
"params": {
"path": "/Users/me/.codex/skills/skill-creator/SKILL.md",
"enabled": false
}
}
```
## Auth endpoints
The JSON-RPC auth/account surface exposes request/response methods plus server-initiated notifications (no `id`). Use these to determine auth state, start or cancel logins, logout, and inspect ChatGPT rate limits.

View File

@@ -14,9 +14,6 @@ use codex_app_server_protocol::AgentMessageDeltaNotification;
use codex_app_server_protocol::ApplyPatchApprovalParams;
use codex_app_server_protocol::ApplyPatchApprovalResponse;
use codex_app_server_protocol::CodexErrorInfo as V2CodexErrorInfo;
use codex_app_server_protocol::CollabAgentState as V2CollabAgentStatus;
use codex_app_server_protocol::CollabAgentTool;
use codex_app_server_protocol::CollabAgentToolCallStatus as V2CollabToolCallStatus;
use codex_app_server_protocol::CommandAction as V2ParsedCommand;
use codex_app_server_protocol::CommandExecutionApprovalDecision;
use codex_app_server_protocol::CommandExecutionOutputDeltaNotification;
@@ -25,7 +22,6 @@ use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::CommandExecutionStatus;
use codex_app_server_protocol::ContextCompactedNotification;
use codex_app_server_protocol::DeprecationNoticeNotification;
use codex_app_server_protocol::DynamicToolCallParams;
use codex_app_server_protocol::ErrorNotification;
use codex_app_server_protocol::ExecCommandApprovalParams;
use codex_app_server_protocol::ExecCommandApprovalResponse;
@@ -55,10 +51,6 @@ use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadRollbackResponse;
use codex_app_server_protocol::ThreadTokenUsage;
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
use codex_app_server_protocol::ToolRequestUserInputOption;
use codex_app_server_protocol::ToolRequestUserInputParams;
use codex_app_server_protocol::ToolRequestUserInputQuestion;
use codex_app_server_protocol::ToolRequestUserInputResponse;
use codex_app_server_protocol::Turn;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnDiffUpdatedNotification;
@@ -86,11 +78,8 @@ use codex_core::protocol::TurnDiffEvent;
use codex_core::review_format::format_review_findings_block;
use codex_core::review_prompts;
use codex_protocol::ThreadId;
use codex_protocol::dynamic_tools::DynamicToolResponse as CoreDynamicToolResponse;
use codex_protocol::plan_tool::UpdatePlanArgs;
use codex_protocol::protocol::ReviewOutputEvent;
use codex_protocol::request_user_input::RequestUserInputAnswer as CoreRequestUserInputAnswer;
use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestUserInputResponse;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::PathBuf;
@@ -117,7 +106,7 @@ pub(crate) async fn apply_bespoke_event_handling(
msg,
} = event;
match msg {
EventMsg::TurnComplete(_ev) => {
EventMsg::TaskComplete(_ev) => {
handle_turn_complete(
conversation_id,
event_turn_id,
@@ -243,9 +232,6 @@ pub(crate) async fn apply_bespoke_event_handling(
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
item_id: item_id.clone(),
reason,
command: Some(command_string.clone()),
cwd: Some(cwd.clone()),
command_actions: Some(command_actions.clone()),
proposed_execpolicy_amendment: proposed_execpolicy_amendment_v2,
};
let rx = outgoing
@@ -269,91 +255,6 @@ pub(crate) async fn apply_bespoke_event_handling(
});
}
},
EventMsg::RequestUserInput(request) => {
if matches!(api_version, ApiVersion::V2) {
let questions = request
.questions
.into_iter()
.map(|question| ToolRequestUserInputQuestion {
id: question.id,
header: question.header,
question: question.question,
options: question.options.map(|options| {
options
.into_iter()
.map(|option| ToolRequestUserInputOption {
label: option.label,
description: option.description,
})
.collect()
}),
})
.collect();
let params = ToolRequestUserInputParams {
thread_id: conversation_id.to_string(),
turn_id: request.turn_id,
item_id: request.call_id,
questions,
};
let rx = outgoing
.send_request(ServerRequestPayload::ToolRequestUserInput(params))
.await;
tokio::spawn(async move {
on_request_user_input_response(event_turn_id, rx, conversation).await;
});
} else {
error!(
"request_user_input is only supported on api v2 (call_id: {})",
request.call_id
);
let empty = CoreRequestUserInputResponse {
answers: HashMap::new(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response: empty,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
}
}
EventMsg::DynamicToolCallRequest(request) => {
if matches!(api_version, ApiVersion::V2) {
let call_id = request.call_id;
let params = DynamicToolCallParams {
thread_id: conversation_id.to_string(),
turn_id: request.turn_id,
call_id: call_id.clone(),
tool: request.tool,
arguments: request.arguments,
};
let rx = outgoing
.send_request(ServerRequestPayload::DynamicToolCall(params))
.await;
tokio::spawn(async move {
crate::dynamic_tools::on_call_response(call_id, rx, conversation).await;
});
} else {
error!(
"dynamic tool calls are only supported on api v2 (call_id: {})",
request.call_id
);
let call_id = request.call_id;
let _ = conversation
.submit(Op::DynamicToolResponse {
id: call_id.clone(),
response: CoreDynamicToolResponse {
call_id,
output: "dynamic tool calls require api v2".to_string(),
success: false,
},
})
.await;
}
}
// TODO(celia): properly construct McpToolCall TurnItem in core.
EventMsg::McpToolCallBegin(begin_event) => {
let notification = construct_mcp_tool_call_notification(
@@ -377,218 +278,6 @@ pub(crate) async fn apply_bespoke_event_handling(
.send_server_notification(ServerNotification::ItemCompleted(notification))
.await;
}
EventMsg::CollabAgentSpawnBegin(begin_event) => {
let item = ThreadItem::CollabAgentToolCall {
id: begin_event.call_id,
tool: CollabAgentTool::SpawnAgent,
status: V2CollabToolCallStatus::InProgress,
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids: Vec::new(),
prompt: Some(begin_event.prompt),
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemStarted(notification))
.await;
}
EventMsg::CollabAgentSpawnEnd(end_event) => {
let has_receiver = end_event.new_thread_id.is_some();
let status = match &end_event.status {
codex_protocol::protocol::AgentStatus::Errored(_)
| codex_protocol::protocol::AgentStatus::NotFound => V2CollabToolCallStatus::Failed,
_ if has_receiver => V2CollabToolCallStatus::Completed,
_ => V2CollabToolCallStatus::Failed,
};
let (receiver_thread_ids, agents_states) = match end_event.new_thread_id {
Some(id) => {
let receiver_id = id.to_string();
let received_status = V2CollabAgentStatus::from(end_event.status.clone());
(
vec![receiver_id.clone()],
[(receiver_id, received_status)].into_iter().collect(),
)
}
None => (Vec::new(), HashMap::new()),
};
let item = ThreadItem::CollabAgentToolCall {
id: end_event.call_id,
tool: CollabAgentTool::SpawnAgent,
status,
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: Some(end_event.prompt),
agents_states,
};
let notification = ItemCompletedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemCompleted(notification))
.await;
}
EventMsg::CollabAgentInteractionBegin(begin_event) => {
let receiver_thread_ids = vec![begin_event.receiver_thread_id.to_string()];
let item = ThreadItem::CollabAgentToolCall {
id: begin_event.call_id,
tool: CollabAgentTool::SendInput,
status: V2CollabToolCallStatus::InProgress,
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: Some(begin_event.prompt),
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemStarted(notification))
.await;
}
EventMsg::CollabAgentInteractionEnd(end_event) => {
let status = match &end_event.status {
codex_protocol::protocol::AgentStatus::Errored(_)
| codex_protocol::protocol::AgentStatus::NotFound => V2CollabToolCallStatus::Failed,
_ => V2CollabToolCallStatus::Completed,
};
let receiver_id = end_event.receiver_thread_id.to_string();
let received_status = V2CollabAgentStatus::from(end_event.status);
let item = ThreadItem::CollabAgentToolCall {
id: end_event.call_id,
tool: CollabAgentTool::SendInput,
status,
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id.clone()],
prompt: Some(end_event.prompt),
agents_states: [(receiver_id, received_status)].into_iter().collect(),
};
let notification = ItemCompletedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemCompleted(notification))
.await;
}
EventMsg::CollabWaitingBegin(begin_event) => {
let receiver_thread_ids = begin_event
.receiver_thread_ids
.iter()
.map(ToString::to_string)
.collect();
let item = ThreadItem::CollabAgentToolCall {
id: begin_event.call_id,
tool: CollabAgentTool::Wait,
status: V2CollabToolCallStatus::InProgress,
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: None,
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemStarted(notification))
.await;
}
EventMsg::CollabWaitingEnd(end_event) => {
let status = if end_event.statuses.values().any(|status| {
matches!(
status,
codex_protocol::protocol::AgentStatus::Errored(_)
| codex_protocol::protocol::AgentStatus::NotFound
)
}) {
V2CollabToolCallStatus::Failed
} else {
V2CollabToolCallStatus::Completed
};
let receiver_thread_ids = end_event.statuses.keys().map(ToString::to_string).collect();
let agents_states = end_event
.statuses
.iter()
.map(|(id, status)| (id.to_string(), V2CollabAgentStatus::from(status.clone())))
.collect();
let item = ThreadItem::CollabAgentToolCall {
id: end_event.call_id,
tool: CollabAgentTool::Wait,
status,
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids,
prompt: None,
agents_states,
};
let notification = ItemCompletedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemCompleted(notification))
.await;
}
EventMsg::CollabCloseBegin(begin_event) => {
let item = ThreadItem::CollabAgentToolCall {
id: begin_event.call_id,
tool: CollabAgentTool::CloseAgent,
status: V2CollabToolCallStatus::InProgress,
sender_thread_id: begin_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![begin_event.receiver_thread_id.to_string()],
prompt: None,
agents_states: HashMap::new(),
};
let notification = ItemStartedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemStarted(notification))
.await;
}
EventMsg::CollabCloseEnd(end_event) => {
let status = match &end_event.status {
codex_protocol::protocol::AgentStatus::Errored(_)
| codex_protocol::protocol::AgentStatus::NotFound => V2CollabToolCallStatus::Failed,
_ => V2CollabToolCallStatus::Completed,
};
let receiver_id = end_event.receiver_thread_id.to_string();
let agents_states = [(
receiver_id.clone(),
V2CollabAgentStatus::from(end_event.status),
)]
.into_iter()
.collect();
let item = ThreadItem::CollabAgentToolCall {
id: end_event.call_id,
tool: CollabAgentTool::CloseAgent,
status,
sender_thread_id: end_event.sender_thread_id.to_string(),
receiver_thread_ids: vec![receiver_id],
prompt: None,
agents_states,
};
let notification = ItemCompletedNotification {
thread_id: conversation_id.to_string(),
turn_id: event_turn_id.clone(),
item,
};
outgoing
.send_server_notification(ServerNotification::ItemCompleted(notification))
.await;
}
EventMsg::AgentMessageContentDelta(event) => {
let notification = AgentMessageDeltaNotification {
thread_id: conversation_id.to_string(),
@@ -1042,15 +731,7 @@ pub(crate) async fn apply_bespoke_event_handling(
};
if let Some(request_id) = pending {
let Some(rollout_path) = conversation.rollout_path() else {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: "thread has no persisted rollout".to_string(),
data: None,
};
outgoing.send_error(request_id, error).await;
return;
};
let rollout_path = conversation.rollout_path();
let response = match read_summary_from_rollout(
rollout_path.as_path(),
fallback_model_provider.as_str(),
@@ -1451,65 +1132,6 @@ async fn on_exec_approval_response(
}
}
async fn on_request_user_input_response(
event_turn_id: String,
receiver: oneshot::Receiver<JsonValue>,
conversation: Arc<CodexThread>,
) {
let response = receiver.await;
let value = match response {
Ok(value) => value,
Err(err) => {
error!("request failed: {err:?}");
let empty = CoreRequestUserInputResponse {
answers: HashMap::new(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response: empty,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
return;
}
};
let response =
serde_json::from_value::<ToolRequestUserInputResponse>(value).unwrap_or_else(|err| {
error!("failed to deserialize ToolRequestUserInputResponse: {err}");
ToolRequestUserInputResponse {
answers: HashMap::new(),
}
});
let response = CoreRequestUserInputResponse {
answers: response
.answers
.into_iter()
.map(|(id, answer)| {
(
id,
CoreRequestUserInputAnswer {
answers: answer.answers,
},
)
})
.collect(),
};
if let Err(err) = conversation
.submit(Op::UserInputAnswer {
id: event_turn_id,
response,
})
.await
{
error!("failed to submit UserInputAnswer: {err}");
}
}
const REVIEW_FALLBACK_MESSAGE: &str = "Reviewer failed to output a response.";
fn render_review_output_text(output: &ReviewOutputEvent) -> String {

File diff suppressed because it is too large Load Diff

View File

@@ -3,18 +3,13 @@ use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigReadResponse;
use codex_app_server_protocol::ConfigRequirements;
use codex_app_server_protocol::ConfigRequirementsReadResponse;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::ConfigWriteErrorCode;
use codex_app_server_protocol::ConfigWriteResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::SandboxMode;
use codex_core::config::ConfigService;
use codex_core::config::ConfigServiceError;
use codex_core::config_loader::ConfigRequirementsToml;
use codex_core::config_loader::LoaderOverrides;
use codex_core::config_loader::SandboxModeRequirement as CoreSandboxModeRequirement;
use serde_json::json;
use std::path::PathBuf;
use toml::Value as TomlValue;
@@ -42,19 +37,6 @@ impl ConfigApi {
self.service.read(params).await.map_err(map_error)
}
pub(crate) async fn config_requirements_read(
&self,
) -> Result<ConfigRequirementsReadResponse, JSONRPCErrorError> {
let requirements = self
.service
.read_requirements()
.await
.map_err(map_error)?
.map(map_requirements_toml_to_api);
Ok(ConfigRequirementsReadResponse { requirements })
}
pub(crate) async fn write_value(
&self,
params: ConfigValueWriteParams,
@@ -70,32 +52,6 @@ impl ConfigApi {
}
}
fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigRequirements {
ConfigRequirements {
allowed_approval_policies: requirements.allowed_approval_policies.map(|policies| {
policies
.into_iter()
.map(codex_app_server_protocol::AskForApproval::from)
.collect()
}),
allowed_sandbox_modes: requirements.allowed_sandbox_modes.map(|modes| {
modes
.into_iter()
.filter_map(map_sandbox_mode_requirement_to_api)
.collect()
}),
}
}
fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Option<SandboxMode> {
match mode {
CoreSandboxModeRequirement::ReadOnly => Some(SandboxMode::ReadOnly),
CoreSandboxModeRequirement::WorkspaceWrite => Some(SandboxMode::WorkspaceWrite),
CoreSandboxModeRequirement::DangerFullAccess => Some(SandboxMode::DangerFullAccess),
CoreSandboxModeRequirement::ExternalSandbox => None,
}
}
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
if let Some(code) = err.write_error_code() {
return config_write_error(code, err.to_string());
@@ -117,39 +73,3 @@ fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) ->
})),
}
}
#[cfg(test)]
mod tests {
use super::*;
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
use pretty_assertions::assert_eq;
#[test]
fn map_requirements_toml_to_api_converts_core_enums() {
let requirements = ConfigRequirementsToml {
allowed_approval_policies: Some(vec![
CoreAskForApproval::Never,
CoreAskForApproval::OnRequest,
]),
allowed_sandbox_modes: Some(vec![
CoreSandboxModeRequirement::ReadOnly,
CoreSandboxModeRequirement::ExternalSandbox,
]),
mcp_servers: None,
};
let mapped = map_requirements_toml_to_api(requirements);
assert_eq!(
mapped.allowed_approval_policies,
Some(vec![
codex_app_server_protocol::AskForApproval::Never,
codex_app_server_protocol::AskForApproval::OnRequest,
])
);
assert_eq!(
mapped.allowed_sandbox_modes,
Some(vec![SandboxMode::ReadOnly]),
);
}
}

View File

@@ -1,58 +0,0 @@
use codex_app_server_protocol::DynamicToolCallResponse;
use codex_core::CodexThread;
use codex_protocol::dynamic_tools::DynamicToolResponse as CoreDynamicToolResponse;
use codex_protocol::protocol::Op;
use std::sync::Arc;
use tokio::sync::oneshot;
use tracing::error;
pub(crate) async fn on_call_response(
call_id: String,
receiver: oneshot::Receiver<serde_json::Value>,
conversation: Arc<CodexThread>,
) {
let response = receiver.await;
let value = match response {
Ok(value) => value,
Err(err) => {
error!("request failed: {err:?}");
let fallback = CoreDynamicToolResponse {
call_id: call_id.clone(),
output: "dynamic tool request failed".to_string(),
success: false,
};
if let Err(err) = conversation
.submit(Op::DynamicToolResponse {
id: call_id.clone(),
response: fallback,
})
.await
{
error!("failed to submit DynamicToolResponse: {err}");
}
return;
}
};
let response = serde_json::from_value::<DynamicToolCallResponse>(value).unwrap_or_else(|err| {
error!("failed to deserialize DynamicToolCallResponse: {err}");
DynamicToolCallResponse {
output: "dynamic tool response was invalid".to_string(),
success: false,
}
});
let response = CoreDynamicToolResponse {
call_id: call_id.clone(),
output: response.output,
success: response.success,
};
if let Err(err) = conversation
.submit(Op::DynamicToolResponse {
id: call_id,
response,
})
.await
{
error!("failed to submit DynamicToolResponse: {err}");
}
}

View File

@@ -1,9 +1,7 @@
#![deny(clippy::print_stdout, clippy::print_stderr)]
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::config_loader::LoaderOverrides;
use std::io::ErrorKind;
use std::io::Result as IoResult;
@@ -12,15 +10,7 @@ use std::path::PathBuf;
use crate::message_processor::MessageProcessor;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use codex_app_server_protocol::ConfigLayerSource;
use codex_app_server_protocol::ConfigWarningNotification;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::TextPosition as AppTextPosition;
use codex_app_server_protocol::TextRange as AppTextRange;
use codex_core::ExecPolicyError;
use codex_core::check_execpolicy_for_warnings;
use codex_core::config_loader::ConfigLoadError;
use codex_core::config_loader::TextRange as CoreTextRange;
use codex_feedback::CodexFeedback;
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
@@ -31,7 +21,6 @@ use toml::Value as TomlValue;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::warn;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::Layer;
use tracing_subscriber::layer::SubscriberExt;
@@ -40,7 +29,6 @@ use tracing_subscriber::util::SubscriberInitExt;
mod bespoke_event_handling;
mod codex_message_processor;
mod config_api;
mod dynamic_tools;
mod error_code;
mod fuzzy_file_search;
mod message_processor;
@@ -52,117 +40,10 @@ mod outgoing_message;
/// plenty for an interactive CLI.
const CHANNEL_CAPACITY: usize = 128;
fn config_warning_from_error(
summary: impl Into<String>,
err: &std::io::Error,
) -> ConfigWarningNotification {
let (path, range) = match config_error_location(err) {
Some((path, range)) => (Some(path), Some(range)),
None => (None, None),
};
ConfigWarningNotification {
summary: summary.into(),
details: Some(err.to_string()),
path,
range,
}
}
fn config_error_location(err: &std::io::Error) -> Option<(String, AppTextRange)> {
err.get_ref()
.and_then(|err| err.downcast_ref::<ConfigLoadError>())
.map(|err| {
let config_error = err.config_error();
(
config_error.path.to_string_lossy().to_string(),
app_text_range(&config_error.range),
)
})
}
fn exec_policy_warning_location(err: &ExecPolicyError) -> (Option<String>, Option<AppTextRange>) {
match err {
ExecPolicyError::ParsePolicy { path, source } => {
if let Some(location) = source.location() {
let range = AppTextRange {
start: AppTextPosition {
line: location.range.start.line,
column: location.range.start.column,
},
end: AppTextPosition {
line: location.range.end.line,
column: location.range.end.column,
},
};
return (Some(location.path), Some(range));
}
(Some(path.clone()), None)
}
_ => (None, None),
}
}
fn app_text_range(range: &CoreTextRange) -> AppTextRange {
AppTextRange {
start: AppTextPosition {
line: range.start.line,
column: range.start.column,
},
end: AppTextPosition {
line: range.end.line,
column: range.end.column,
},
}
}
fn project_config_warning(config: &Config) -> Option<ConfigWarningNotification> {
let mut disabled_folders = Vec::new();
for layer in config
.config_layer_stack
.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst, true)
{
if !matches!(layer.name, ConfigLayerSource::Project { .. })
|| layer.disabled_reason.is_none()
{
continue;
}
if let ConfigLayerSource::Project { dot_codex_folder } = &layer.name {
disabled_folders.push((
dot_codex_folder.as_path().display().to_string(),
layer
.disabled_reason
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| "Config folder disabled.".to_string()),
));
}
}
if disabled_folders.is_empty() {
return None;
}
let mut message = "The following config folders are disabled:\n".to_string();
for (index, (folder, reason)) in disabled_folders.iter().enumerate() {
let display_index = index + 1;
message.push_str(&format!(" {display_index}. {folder}\n"));
message.push_str(&format!(" {reason}\n"));
}
Some(ConfigWarningNotification {
summary: message,
details: None,
path: None,
range: None,
})
}
pub async fn run_main(
codex_linux_sandbox_exe: Option<PathBuf>,
cli_config_overrides: CliConfigOverrides,
loader_overrides: LoaderOverrides,
default_analytics_enabled: bool,
) -> IoResult<()> {
// Set up channels.
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
@@ -200,57 +81,24 @@ pub async fn run_main(
)
})?;
let loader_overrides_for_config_api = loader_overrides.clone();
let mut config_warnings = Vec::new();
let config = match ConfigBuilder::default()
let config = ConfigBuilder::default()
.cli_overrides(cli_kv_overrides.clone())
.loader_overrides(loader_overrides)
.build()
.await
{
Ok(config) => config,
Err(err) => {
let message = config_warning_from_error("Invalid configuration; using defaults.", &err);
config_warnings.push(message);
Config::load_default_with_cli_overrides(cli_kv_overrides.clone()).map_err(|e| {
std::io::Error::new(
ErrorKind::InvalidData,
format!("error loading default config after config error: {e}"),
)
})?
}
};
if let Ok(Some(err)) =
check_execpolicy_for_warnings(&config.features, &config.config_layer_stack).await
{
let (path, range) = exec_policy_warning_location(&err);
let message = ConfigWarningNotification {
summary: "Error parsing rules; custom rules not applied.".to_string(),
details: Some(err.to_string()),
path,
range,
};
config_warnings.push(message);
}
if let Some(warning) = project_config_warning(&config) {
config_warnings.push(warning);
}
.map_err(|e| {
std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}"))
})?;
let feedback = CodexFeedback::new();
let otel = codex_core::otel_init::build_provider(
&config,
env!("CARGO_PKG_VERSION"),
Some("codex_app_server"),
default_analytics_enabled,
)
.map_err(|e| {
std::io::Error::new(
ErrorKind::InvalidData,
format!("error loading otel config: {e}"),
)
})?;
let otel =
codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION")).map_err(|e| {
std::io::Error::new(
ErrorKind::InvalidData,
format!("error loading otel config: {e}"),
)
})?;
// Install a simple subscriber so `tracing` output is visible. Users can
// control the log level with `RUST_LOG`.
@@ -273,12 +121,6 @@ pub async fn run_main(
.with(otel_logger_layer)
.with(otel_tracing_layer)
.try_init();
for warning in &config_warnings {
match &warning.details {
Some(details) => error!("{} {}", warning.summary, details),
None => error!("{}", warning.summary),
}
}
// Task: process incoming messages.
let processor_handle = tokio::spawn({
@@ -292,41 +134,14 @@ pub async fn run_main(
cli_overrides,
loader_overrides,
feedback.clone(),
config_warnings,
);
let mut thread_created_rx = processor.thread_created_receiver();
async move {
let mut listen_for_threads = true;
loop {
tokio::select! {
msg = incoming_rx.recv() => {
let Some(msg) = msg else {
break;
};
match msg {
JSONRPCMessage::Request(r) => processor.process_request(r).await,
JSONRPCMessage::Response(r) => processor.process_response(r).await,
JSONRPCMessage::Notification(n) => processor.process_notification(n).await,
JSONRPCMessage::Error(e) => processor.process_error(e),
}
}
created = thread_created_rx.recv(), if listen_for_threads => {
match created {
Ok(thread_id) => {
processor.try_attach_thread_listener(thread_id).await;
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => {
// TODO(jif) handle lag.
// Assumes thread creation volume is low enough that lag never happens.
// If it does, we log and continue without resyncing to avoid attaching
// listeners for threads that should remain unsubscribed.
warn!("thread_created receiver lagged; skipping resync");
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
listen_for_threads = false;
}
}
}
while let Some(msg) = incoming_rx.recv().await {
match msg {
JSONRPCMessage::Request(r) => processor.process_request(r).await,
JSONRPCMessage::Response(r) => processor.process_response(r).await,
JSONRPCMessage::Notification(n) => processor.process_notification(n).await,
JSONRPCMessage::Error(e) => processor.process_error(e),
}
}

View File

@@ -20,7 +20,6 @@ fn main() -> anyhow::Result<()> {
codex_linux_sandbox_exe,
CliConfigOverrides::default(),
loader_overrides,
false,
)
.await?;
Ok(())

View File

@@ -10,7 +10,6 @@ use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::ConfigWarningNotification;
use codex_app_server_protocol::InitializeResponse;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCErrorError;
@@ -18,19 +17,14 @@ use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_core::AuthManager;
use codex_core::ThreadManager;
use codex_core::config::Config;
use codex_core::config_loader::LoaderOverrides;
use codex_core::default_client::SetOriginatorError;
use codex_core::default_client::USER_AGENT_SUFFIX;
use codex_core::default_client::get_codex_user_agent;
use codex_core::default_client::set_default_originator;
use codex_feedback::CodexFeedback;
use codex_protocol::ThreadId;
use codex_protocol::protocol::SessionSource;
use tokio::sync::broadcast;
use toml::Value as TomlValue;
pub(crate) struct MessageProcessor {
@@ -38,7 +32,6 @@ pub(crate) struct MessageProcessor {
codex_message_processor: CodexMessageProcessor,
config_api: ConfigApi,
initialized: bool,
config_warnings: Vec<ConfigWarningNotification>,
}
impl MessageProcessor {
@@ -51,7 +44,6 @@ impl MessageProcessor {
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
feedback: CodexFeedback,
config_warnings: Vec<ConfigWarningNotification>,
) -> Self {
let outgoing = Arc::new(outgoing);
let auth_manager = AuthManager::shared(
@@ -80,7 +72,6 @@ impl MessageProcessor {
codex_message_processor,
config_api,
initialized: false,
config_warnings,
}
}
@@ -130,27 +121,6 @@ impl MessageProcessor {
title: _title,
version,
} = params.client_info;
if let Err(error) = set_default_originator(name.clone()) {
match error {
SetOriginatorError::InvalidHeaderValue => {
let error = JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!(
"Invalid clientInfo.name: '{name}'. Must be a valid HTTP header value."
),
data: None,
};
self.outgoing.send_error(request_id, error).await;
return;
}
SetOriginatorError::AlreadyInitialized => {
// No-op. This is expected to happen if the originator is already set via env var.
// TODO(owen): Once we remove support for CODEX_INTERNAL_ORIGINATOR_OVERRIDE,
// this will be an unexpected state and we can return a JSON-RPC error indicating
// internal server error.
}
}
}
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);
@@ -161,15 +131,6 @@ impl MessageProcessor {
self.outgoing.send_response(request_id, response).await;
self.initialized = true;
if !self.config_warnings.is_empty() {
for notification in self.config_warnings.drain(..) {
self.outgoing
.send_server_notification(ServerNotification::ConfigWarning(
notification,
))
.await;
}
}
return;
}
@@ -197,12 +158,6 @@ impl MessageProcessor {
ClientRequest::ConfigBatchWrite { request_id, params } => {
self.handle_config_batch_write(request_id, params).await;
}
ClientRequest::ConfigRequirementsRead {
request_id,
params: _,
} => {
self.handle_config_requirements_read(request_id).await;
}
other => {
self.codex_message_processor.process_request(other).await;
}
@@ -215,19 +170,6 @@ impl MessageProcessor {
tracing::info!("<- notification: {:?}", notification);
}
pub(crate) fn thread_created_receiver(&self) -> broadcast::Receiver<ThreadId> {
self.codex_message_processor.thread_created_receiver()
}
pub(crate) async fn try_attach_thread_listener(&mut self, thread_id: ThreadId) {
if !self.initialized {
return;
}
self.codex_message_processor
.try_attach_thread_listener(thread_id)
.await;
}
/// Handle a standalone JSON-RPC response originating from the peer.
pub(crate) async fn process_response(&mut self, response: JSONRPCResponse) {
tracing::info!("<- response: {:?}", response);
@@ -268,11 +210,4 @@ impl MessageProcessor {
Err(error) => self.outgoing.send_error(request_id, error).await,
}
}
async fn handle_config_requirements_read(&self, request_id: RequestId) {
match self.config_api.config_requirements_read().await {
Ok(response) => self.outgoing.send_response(request_id, response).await,
Err(error) => self.outgoing.send_error(request_id, error).await,
}
}
}

View File

@@ -4,13 +4,12 @@ use codex_app_server_protocol::Model;
use codex_app_server_protocol::ReasoningEffortOption;
use codex_core::ThreadManager;
use codex_core::config::Config;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ReasoningEffortPreset;
pub async fn supported_models(thread_manager: Arc<ThreadManager>, config: &Config) -> Vec<Model> {
thread_manager
.list_models(config, RefreshStrategy::OnlineIfUncached)
.list_models(config)
.await
.into_iter()
.filter(|preset| preset.show_in_picker)
@@ -28,7 +27,6 @@ fn model_from_preset(preset: ModelPreset) -> Model {
preset.supported_reasoning_efforts,
),
default_reasoning_effort: preset.default_reasoning_effort,
supports_personality: preset.supports_personality,
is_default: preset.is_default,
}
}

View File

@@ -162,7 +162,6 @@ mod tests {
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
use codex_app_server_protocol::AccountUpdatedNotification;
use codex_app_server_protocol::AuthMode;
use codex_app_server_protocol::ConfigWarningNotification;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::RateLimitSnapshot;
use codex_app_server_protocol::RateLimitWindow;
@@ -280,28 +279,4 @@ mod tests {
"ensure the notification serializes correctly"
);
}
#[test]
fn verify_config_warning_notification_serialization() {
let notification = ServerNotification::ConfigWarning(ConfigWarningNotification {
summary: "Config error: using defaults".to_string(),
details: Some("error loading config: bad config".to_string()),
path: None,
range: None,
});
let jsonrpc_notification = OutgoingMessage::AppServerNotification(notification);
assert_eq!(
json!( {
"method": "configWarning",
"params": {
"summary": "Config error: using defaults",
"details": "error loading config: bad config",
},
}),
serde_json::to_value(jsonrpc_notification)
.expect("ensure the notification serializes correctly"),
"ensure the notification serializes correctly"
);
}
}

View File

@@ -1,7 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "common",
crate_name = "app_test_support",
crate_srcs = glob(["*.rs"]),
)

View File

@@ -49,16 +49,6 @@ impl ChatGptAuthFixture {
self
}
pub fn chatgpt_user_id(mut self, chatgpt_user_id: impl Into<String>) -> Self {
self.claims.chatgpt_user_id = Some(chatgpt_user_id.into());
self
}
pub fn chatgpt_account_id(mut self, chatgpt_account_id: impl Into<String>) -> Self {
self.claims.chatgpt_account_id = Some(chatgpt_account_id.into());
self
}
pub fn email(mut self, email: impl Into<String>) -> Self {
self.claims.email = Some(email.into());
self
@@ -79,8 +69,6 @@ impl ChatGptAuthFixture {
pub struct ChatGptIdTokenClaims {
pub email: Option<String>,
pub plan_type: Option<String>,
pub chatgpt_user_id: Option<String>,
pub chatgpt_account_id: Option<String>,
}
impl ChatGptIdTokenClaims {
@@ -97,16 +85,6 @@ impl ChatGptIdTokenClaims {
self.plan_type = Some(plan_type.into());
self
}
pub fn chatgpt_user_id(mut self, chatgpt_user_id: impl Into<String>) -> Self {
self.chatgpt_user_id = Some(chatgpt_user_id.into());
self
}
pub fn chatgpt_account_id(mut self, chatgpt_account_id: impl Into<String>) -> Self {
self.chatgpt_account_id = Some(chatgpt_account_id.into());
self
}
}
pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
@@ -115,20 +93,10 @@ pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> {
if let Some(email) = &claims.email {
payload.insert("email".to_string(), json!(email));
}
let mut auth_payload = serde_json::Map::new();
if let Some(plan_type) = &claims.plan_type {
auth_payload.insert("chatgpt_plan_type".to_string(), json!(plan_type));
}
if let Some(chatgpt_user_id) = &claims.chatgpt_user_id {
auth_payload.insert("chatgpt_user_id".to_string(), json!(chatgpt_user_id));
}
if let Some(chatgpt_account_id) = &claims.chatgpt_account_id {
auth_payload.insert("chatgpt_account_id".to_string(), json!(chatgpt_account_id));
}
if !auth_payload.is_empty() {
payload.insert(
"https://api.openai.com/auth".to_string(),
serde_json::Value::Object(auth_payload),
json!({ "chatgpt_plan_type": plan_type }),
);
}
let payload = serde_json::Value::Object(payload);

View File

@@ -17,21 +17,16 @@ pub use core_test_support::format_with_current_shell_non_login;
pub use core_test_support::test_path_buf_with_windows;
pub use core_test_support::test_tmp_path;
pub use core_test_support::test_tmp_path_buf;
pub use mcp_process::DEFAULT_CLIENT_NAME;
pub use mcp_process::McpProcess;
pub use mock_model_server::create_mock_responses_server_repeating_assistant;
pub use mock_model_server::create_mock_responses_server_sequence;
pub use mock_model_server::create_mock_responses_server_sequence_unchecked;
pub use mock_model_server::create_mock_chat_completions_server;
pub use mock_model_server::create_mock_chat_completions_server_unchecked;
pub use models_cache::write_models_cache;
pub use models_cache::write_models_cache_with_models;
pub use responses::create_apply_patch_sse_response;
pub use responses::create_exec_command_sse_response;
pub use responses::create_final_assistant_message_sse_response;
pub use responses::create_request_user_input_sse_response;
pub use responses::create_shell_command_sse_response;
pub use rollout::create_fake_rollout;
pub use rollout::create_fake_rollout_with_text_elements;
pub use rollout::rollout_path;
use serde::de::DeserializeOwned;
pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> {

View File

@@ -12,19 +12,15 @@ use tokio::process::ChildStdout;
use anyhow::Context;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::ArchiveConversationParams;
use codex_app_server_protocol::CancelLoginAccountParams;
use codex_app_server_protocol::CancelLoginChatGptParams;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientNotification;
use codex_app_server_protocol::CollaborationModeListParams;
use codex_app_server_protocol::ConfigBatchWriteParams;
use codex_app_server_protocol::ConfigReadParams;
use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::ExecRunParams;
use codex_app_server_protocol::FeedbackUploadParams;
use codex_app_server_protocol::ForkConversationParams;
use codex_app_server_protocol::GetAccountParams;
use codex_app_server_protocol::GetAuthStatusParams;
use codex_app_server_protocol::InitializeParams;
@@ -47,16 +43,12 @@ use codex_app_server_protocol::SendUserTurnParams;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::SetDefaultModelParams;
use codex_app_server_protocol::ThreadArchiveParams;
use codex_app_server_protocol::ThreadForkParams;
use codex_app_server_protocol::ThreadListParams;
use codex_app_server_protocol::ThreadLoadedListParams;
use codex_app_server_protocol::ThreadReadParams;
use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadRollbackParams;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::TurnInterruptParams;
use codex_app_server_protocol::TurnStartParams;
use codex_core::default_client::CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR;
use tokio::process::Command;
pub struct McpProcess {
@@ -68,11 +60,9 @@ pub struct McpProcess {
process: Child,
stdin: ChildStdin,
stdout: BufReader<ChildStdout>,
pending_messages: VecDeque<JSONRPCMessage>,
pending_user_messages: VecDeque<JSONRPCNotification>,
}
pub const DEFAULT_CLIENT_NAME: &str = "codex-app-server-tests";
impl McpProcess {
pub async fn new(codex_home: &Path) -> anyhow::Result<Self> {
Self::new_with_env(codex_home, &[]).await
@@ -96,7 +86,6 @@ impl McpProcess {
cmd.stderr(Stdio::piped());
cmd.env("CODEX_HOME", codex_home);
cmd.env("RUST_LOG", "debug");
cmd.env_remove(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR);
for (k, v) in env_overrides {
match v {
@@ -138,66 +127,37 @@ impl McpProcess {
process,
stdin,
stdout,
pending_messages: VecDeque::new(),
pending_user_messages: VecDeque::new(),
})
}
/// Performs the initialization handshake with the MCP server.
pub async fn initialize(&mut self) -> anyhow::Result<()> {
let initialized = self
.initialize_with_client_info(ClientInfo {
name: DEFAULT_CLIENT_NAME.to_string(),
let params = Some(serde_json::to_value(InitializeParams {
client_info: ClientInfo {
name: "codex-app-server-tests".to_string(),
title: None,
version: "0.1.0".to_string(),
})
.await?;
let JSONRPCMessage::Response(_) = initialized else {
},
})?);
let req_id = self.send_request("initialize", params).await?;
let initialized = self.read_jsonrpc_message().await?;
let JSONRPCMessage::Response(response) = initialized else {
unreachable!("expected JSONRPCMessage::Response for initialize, got {initialized:?}");
};
Ok(())
}
/// Sends initialize with the provided client info and returns the response/error message.
pub async fn initialize_with_client_info(
&mut self,
client_info: ClientInfo,
) -> anyhow::Result<JSONRPCMessage> {
let params = Some(serde_json::to_value(InitializeParams { client_info })?);
let request_id = self.send_request("initialize", params).await?;
let message = self.read_jsonrpc_message().await?;
match message {
JSONRPCMessage::Response(response) => {
if response.id != RequestId::Integer(request_id) {
anyhow::bail!(
"initialize response id mismatch: expected {}, got {:?}",
request_id,
response.id
);
}
// Send notifications/initialized to ack the response.
self.send_notification(ClientNotification::Initialized)
.await?;
Ok(JSONRPCMessage::Response(response))
}
JSONRPCMessage::Error(error) => {
if error.id != RequestId::Integer(request_id) {
anyhow::bail!(
"initialize error id mismatch: expected {}, got {:?}",
request_id,
error.id
);
}
Ok(JSONRPCMessage::Error(error))
}
JSONRPCMessage::Notification(notification) => {
anyhow::bail!("unexpected JSONRPCMessage::Notification: {notification:?}");
}
JSONRPCMessage::Request(request) => {
anyhow::bail!("unexpected JSONRPCMessage::Request: {request:?}");
}
if response.id != RequestId::Integer(req_id) {
anyhow::bail!(
"initialize response id mismatch: expected {}, got {:?}",
req_id,
response.id
);
}
// Send notifications/initialized to ack the response.
self.send_notification(ClientNotification::Initialized)
.await?;
Ok(())
}
/// Send a `newConversation` JSON-RPC request.
@@ -348,15 +308,6 @@ impl McpProcess {
self.send_request("thread/resume", params).await
}
/// Send a `thread/fork` JSON-RPC request.
pub async fn send_thread_fork_request(
&mut self,
params: ThreadForkParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/fork", params).await
}
/// Send a `thread/archive` JSON-RPC request.
pub async fn send_thread_archive_request(
&mut self,
@@ -384,24 +335,6 @@ impl McpProcess {
self.send_request("thread/list", params).await
}
/// Send a `thread/loaded/list` JSON-RPC request.
pub async fn send_thread_loaded_list_request(
&mut self,
params: ThreadLoadedListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/loaded/list", params).await
}
/// Send a `thread/read` JSON-RPC request.
pub async fn send_thread_read_request(
&mut self,
params: ThreadReadParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("thread/read", params).await
}
/// Send a `model/list` JSON-RPC request.
pub async fn send_list_models_request(
&mut self,
@@ -411,21 +344,6 @@ impl McpProcess {
self.send_request("model/list", params).await
}
/// Send an `app/list` JSON-RPC request.
pub async fn send_apps_list_request(&mut self, params: AppsListParams) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("app/list", params).await
}
/// Send a `collaborationMode/list` JSON-RPC request.
pub async fn send_list_collaboration_modes_request(
&mut self,
params: CollaborationModeListParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("collaborationMode/list", params).await
}
/// Send a `resumeConversation` JSON-RPC request.
pub async fn send_resume_conversation_request(
&mut self,
@@ -435,15 +353,6 @@ impl McpProcess {
self.send_request("resumeConversation", params).await
}
/// Send a `forkConversation` JSON-RPC request.
pub async fn send_fork_conversation_request(
&mut self,
params: ForkConversationParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("forkConversation", params).await
}
/// Send a `loginApiKey` JSON-RPC request.
pub async fn send_login_api_key_request(
&mut self,
@@ -467,12 +376,6 @@ impl McpProcess {
self.send_request("turn/start", params).await
}
/// Send an `exec/run` JSON-RPC request (v2).
pub async fn send_exec_run_request(&mut self, params: ExecRunParams) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("exec/run", params).await
}
/// Send a `turn/interrupt` JSON-RPC request (v2).
pub async fn send_turn_interrupt_request(
&mut self,
@@ -641,16 +544,27 @@ impl McpProcess {
pub async fn read_stream_until_request_message(&mut self) -> anyhow::Result<ServerRequest> {
eprintln!("in read_stream_until_request_message()");
let message = self
.read_stream_until_message(|message| matches!(message, JSONRPCMessage::Request(_)))
.await?;
loop {
let message = self.read_jsonrpc_message().await?;
let JSONRPCMessage::Request(jsonrpc_request) = message else {
unreachable!("expected JSONRPCMessage::Request, got {message:?}");
};
jsonrpc_request
.try_into()
.with_context(|| "failed to deserialize ServerRequest from JSONRPCRequest")
match message {
JSONRPCMessage::Notification(notification) => {
eprintln!("notification: {notification:?}");
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(jsonrpc_request) => {
return jsonrpc_request.try_into().with_context(
|| "failed to deserialize ServerRequest from JSONRPCRequest",
);
}
JSONRPCMessage::Error(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}");
}
JSONRPCMessage::Response(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Response: {message:?}");
}
}
}
}
pub async fn read_stream_until_response_message(
@@ -659,32 +573,52 @@ impl McpProcess {
) -> anyhow::Result<JSONRPCResponse> {
eprintln!("in read_stream_until_response_message({request_id:?})");
let message = self
.read_stream_until_message(|message| {
Self::message_request_id(message) == Some(&request_id)
})
.await?;
let JSONRPCMessage::Response(response) = message else {
unreachable!("expected JSONRPCMessage::Response, got {message:?}");
};
Ok(response)
loop {
let message = self.read_jsonrpc_message().await?;
match message {
JSONRPCMessage::Notification(notification) => {
eprintln!("notification: {notification:?}");
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
}
JSONRPCMessage::Error(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}");
}
JSONRPCMessage::Response(jsonrpc_response) => {
if jsonrpc_response.id == request_id {
return Ok(jsonrpc_response);
}
}
}
}
}
pub async fn read_stream_until_error_message(
&mut self,
request_id: RequestId,
) -> anyhow::Result<JSONRPCError> {
let message = self
.read_stream_until_message(|message| {
Self::message_request_id(message) == Some(&request_id)
})
.await?;
let JSONRPCMessage::Error(err) = message else {
unreachable!("expected JSONRPCMessage::Error, got {message:?}");
};
Ok(err)
loop {
let message = self.read_jsonrpc_message().await?;
match message {
JSONRPCMessage::Notification(notification) => {
eprintln!("notification: {notification:?}");
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
}
JSONRPCMessage::Response(_) => {
// Keep scanning; we're waiting for an error with matching id.
}
JSONRPCMessage::Error(err) => {
if err.id == request_id {
return Ok(err);
}
}
}
}
}
pub async fn read_stream_until_notification_message(
@@ -693,64 +627,46 @@ impl McpProcess {
) -> anyhow::Result<JSONRPCNotification> {
eprintln!("in read_stream_until_notification_message({method})");
let message = self
.read_stream_until_message(|message| {
matches!(
message,
JSONRPCMessage::Notification(notification) if notification.method == method
)
})
.await?;
let JSONRPCMessage::Notification(notification) = message else {
unreachable!("expected JSONRPCMessage::Notification, got {message:?}");
};
Ok(notification)
}
/// Clears any buffered messages so future reads only consider new stream items.
///
/// We call this when e.g. we want to validate against the next turn and no longer care about
/// messages buffered from the prior turn.
pub fn clear_message_buffer(&mut self) {
self.pending_messages.clear();
}
/// Reads the stream until a message matches `predicate`, buffering any non-matching messages
/// for later reads.
async fn read_stream_until_message<F>(&mut self, predicate: F) -> anyhow::Result<JSONRPCMessage>
where
F: Fn(&JSONRPCMessage) -> bool,
{
if let Some(message) = self.take_pending_message(&predicate) {
return Ok(message);
if let Some(notification) = self.take_pending_notification_by_method(method) {
return Ok(notification);
}
loop {
let message = self.read_jsonrpc_message().await?;
if predicate(&message) {
return Ok(message);
match message {
JSONRPCMessage::Notification(notification) => {
if notification.method == method {
return Ok(notification);
}
self.enqueue_user_message(notification);
}
JSONRPCMessage::Request(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}");
}
JSONRPCMessage::Error(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}");
}
JSONRPCMessage::Response(_) => {
anyhow::bail!("unexpected JSONRPCMessage::Response: {message:?}");
}
}
self.pending_messages.push_back(message);
}
}
fn take_pending_message<F>(&mut self, predicate: &F) -> Option<JSONRPCMessage>
where
F: Fn(&JSONRPCMessage) -> bool,
{
if let Some(pos) = self.pending_messages.iter().position(predicate) {
return self.pending_messages.remove(pos);
fn take_pending_notification_by_method(&mut self, method: &str) -> Option<JSONRPCNotification> {
if let Some(pos) = self
.pending_user_messages
.iter()
.position(|notification| notification.method == method)
{
return self.pending_user_messages.remove(pos);
}
None
}
fn message_request_id(message: &JSONRPCMessage) -> Option<&RequestId> {
match message {
JSONRPCMessage::Request(request) => Some(&request.id),
JSONRPCMessage::Response(response) => Some(&response.id),
JSONRPCMessage::Error(err) => Some(&err.id),
JSONRPCMessage::Notification(_) => None,
fn enqueue_user_message(&mut self, notification: JSONRPCNotification) {
if notification.method == "codex/event/user_message" {
self.pending_user_messages.push_back(notification);
}
}
}

View File

@@ -1,18 +1,17 @@
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use core_test_support::responses;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::Respond;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path_regex;
use wiremock::matchers::path;
/// Create a mock server that will provide the responses, in order, for
/// requests to the `/v1/responses` endpoint.
pub async fn create_mock_responses_server_sequence(responses: Vec<String>) -> MockServer {
let server = responses::start_mock_server().await;
/// requests to the `/v1/chat/completions` endpoint.
pub async fn create_mock_chat_completions_server(responses: Vec<String>) -> MockServer {
let server = MockServer::start().await;
let num_calls = responses.len();
let seq_responder = SeqResponder {
@@ -21,7 +20,7 @@ pub async fn create_mock_responses_server_sequence(responses: Vec<String>) -> Mo
};
Mock::given(method("POST"))
.and(path_regex(".*/responses$"))
.and(path("/v1/chat/completions"))
.respond_with(seq_responder)
.expect(num_calls as u64)
.mount(&server)
@@ -30,10 +29,10 @@ pub async fn create_mock_responses_server_sequence(responses: Vec<String>) -> Mo
server
}
/// Same as `create_mock_responses_server_sequence` but does not enforce an
/// Same as `create_mock_chat_completions_server` but does not enforce an
/// expectation on the number of calls.
pub async fn create_mock_responses_server_sequence_unchecked(responses: Vec<String>) -> MockServer {
let server = responses::start_mock_server().await;
pub async fn create_mock_chat_completions_server_unchecked(responses: Vec<String>) -> MockServer {
let server = MockServer::start().await;
let seq_responder = SeqResponder {
num_calls: AtomicUsize::new(0),
@@ -41,7 +40,7 @@ pub async fn create_mock_responses_server_sequence_unchecked(responses: Vec<Stri
};
Mock::given(method("POST"))
.and(path_regex(".*/responses$"))
.and(path("/v1/chat/completions"))
.respond_with(seq_responder)
.mount(&server)
.await;
@@ -58,24 +57,10 @@ impl Respond for SeqResponder {
fn respond(&self, _: &wiremock::Request) -> ResponseTemplate {
let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst);
match self.responses.get(call_num) {
Some(response) => responses::sse_response(response.clone()),
Some(response) => ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(response.clone(), "text/event-stream"),
None => panic!("no response for {call_num}"),
}
}
}
/// Create a mock responses API server that returns the same assistant message for every request.
pub async fn create_mock_responses_server_repeating_assistant(message: &str) -> MockServer {
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", message),
responses::ev_completed("resp-1"),
]);
Mock::given(method("POST"))
.and(path_regex(".*/responses$"))
.respond_with(responses::sse_response(body))
.mount(&server)
.await;
server
}

View File

@@ -25,9 +25,8 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
},
supported_in_api: true,
priority,
upgrade: preset.upgrade.as_ref().map(|u| u.into()),
upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()),
base_instructions: "base instructions".to_string(),
model_instructions_template: None,
supports_reasoning_summaries: false,
support_verbosity: false,
default_verbosity: None,

View File

@@ -1,4 +1,3 @@
use core_test_support::responses;
use serde_json::json;
use std::path::Path;
@@ -15,30 +14,85 @@ pub fn create_shell_command_sse_response(
"workdir": workdir.map(|w| w.to_string_lossy()),
"timeout_ms": timeout_ms
}))?;
Ok(responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, "shell_command", &tool_call_arguments),
responses::ev_completed("resp-1"),
]))
let tool_call = json!({
"choices": [
{
"delta": {
"tool_calls": [
{
"id": call_id,
"function": {
"name": "shell_command",
"arguments": tool_call_arguments
}
}
]
},
"finish_reason": "tool_calls"
}
]
});
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&tool_call)?
);
Ok(sse)
}
pub fn create_final_assistant_message_sse_response(message: &str) -> anyhow::Result<String> {
Ok(responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", message),
responses::ev_completed("resp-1"),
]))
let assistant_message = json!({
"choices": [
{
"delta": {
"content": message
},
"finish_reason": "stop"
}
]
});
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&assistant_message)?
);
Ok(sse)
}
pub fn create_apply_patch_sse_response(
patch_content: &str,
call_id: &str,
) -> anyhow::Result<String> {
Ok(responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_apply_patch_shell_command_call_via_heredoc(call_id, patch_content),
responses::ev_completed("resp-1"),
]))
// Use shell_command to call apply_patch with heredoc format
let command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF");
let tool_call_arguments = serde_json::to_string(&json!({
"command": command
}))?;
let tool_call = json!({
"choices": [
{
"delta": {
"tool_calls": [
{
"id": call_id,
"function": {
"name": "shell_command",
"arguments": tool_call_arguments
}
}
]
},
"finish_reason": "tool_calls"
}
]
});
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&tool_call)?
);
Ok(sse)
}
pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result<String> {
@@ -54,32 +108,28 @@ pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result<String>
"cmd": command.join(" "),
"yield_time_ms": 500
}))?;
Ok(responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, "exec_command", &tool_call_arguments),
responses::ev_completed("resp-1"),
]))
}
let tool_call = json!({
"choices": [
{
"delta": {
"tool_calls": [
{
"id": call_id,
"function": {
"name": "exec_command",
"arguments": tool_call_arguments
}
}
]
},
"finish_reason": "tool_calls"
}
]
});
pub fn create_request_user_input_sse_response(call_id: &str) -> anyhow::Result<String> {
let tool_call_arguments = serde_json::to_string(&json!({
"questions": [{
"id": "confirm_path",
"header": "Confirm",
"question": "Proceed with the plan?",
"options": [{
"label": "Yes (Recommended)",
"description": "Continue the current plan."
}, {
"label": "No",
"description": "Stop and revisit the approach."
}]
}]
}))?;
Ok(responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, "request_user_input", &tool_call_arguments),
responses::ev_completed("resp-1"),
]))
let sse = format!(
"data: {}\n\ndata: DONE\n\n",
serde_json::to_string(&tool_call)?
);
Ok(sse)
}

View File

@@ -6,23 +6,10 @@ use codex_protocol::protocol::SessionMetaLine;
use codex_protocol::protocol::SessionSource;
use serde_json::json;
use std::fs;
use std::fs::FileTimes;
use std::path::Path;
use std::path::PathBuf;
use uuid::Uuid;
pub fn rollout_path(codex_home: &Path, filename_ts: &str, thread_id: &str) -> PathBuf {
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
codex_home
.join("sessions")
.join(year)
.join(month)
.join(day)
.join(format!("rollout-{filename_ts}-{thread_id}.jsonl"))
}
/// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`.
///
/// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format.
@@ -43,23 +30,25 @@ pub fn create_fake_rollout(
let uuid_str = uuid.to_string();
let conversation_id = ThreadId::from_string(&uuid_str)?;
let file_path = rollout_path(codex_home, filename_ts, &uuid_str);
let dir = file_path
.parent()
.ok_or_else(|| anyhow::anyhow!("missing rollout parent directory"))?;
fs::create_dir_all(dir)?;
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
// Build JSONL lines
let meta = SessionMeta {
id: conversation_id,
forked_from_id: None,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
instructions: None,
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,
@@ -95,85 +84,6 @@ pub fn create_fake_rollout(
.to_string(),
];
fs::write(&file_path, lines.join("\n") + "\n")?;
let parsed = chrono::DateTime::parse_from_rfc3339(meta_rfc3339)?.with_timezone(&chrono::Utc);
let times = FileTimes::new().set_modified(parsed.into());
std::fs::OpenOptions::new()
.append(true)
.open(&file_path)?
.set_times(times)?;
Ok(uuid_str)
}
pub fn create_fake_rollout_with_text_elements(
codex_home: &Path,
filename_ts: &str,
meta_rfc3339: &str,
preview: &str,
text_elements: Vec<serde_json::Value>,
model_provider: Option<&str>,
git_info: Option<GitInfo>,
) -> Result<String> {
let uuid = Uuid::new_v4();
let uuid_str = uuid.to_string();
let conversation_id = ThreadId::from_string(&uuid_str)?;
// sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss)
let year = &filename_ts[0..4];
let month = &filename_ts[5..7];
let day = &filename_ts[8..10];
let dir = codex_home.join("sessions").join(year).join(month).join(day);
fs::create_dir_all(&dir)?;
let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl"));
// Build JSONL lines
let meta = SessionMeta {
id: conversation_id,
forked_from_id: None,
timestamp: meta_rfc3339.to_string(),
cwd: PathBuf::from("/"),
originator: "codex".to_string(),
cli_version: "0.0.0".to_string(),
source: SessionSource::Cli,
model_provider: model_provider.map(str::to_string),
base_instructions: None,
};
let payload = serde_json::to_value(SessionMetaLine {
meta,
git: git_info,
})?;
let lines = [
json!( {
"timestamp": meta_rfc3339,
"type": "session_meta",
"payload": payload
})
.to_string(),
json!( {
"timestamp": meta_rfc3339,
"type":"response_item",
"payload": {
"type":"message",
"role":"user",
"content":[{"type":"input_text","text": preview}]
}
})
.to_string(),
json!( {
"timestamp": meta_rfc3339,
"type":"event_msg",
"payload": {
"type":"user_message",
"message": preview,
"text_elements": text_elements,
"local_images": []
}
})
.to_string(),
];
fs::write(file_path, lines.join("\n") + "\n")?;
Ok(uuid_str)
}

View File

@@ -37,7 +37,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
{requires_line}

View File

@@ -1,7 +1,7 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_command_sse_response;
use app_test_support::format_with_current_shell;
use app_test_support::to_response;
@@ -65,7 +65,7 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
)?,
create_final_assistant_message_sse_response("Enjoy your new git repo!")?,
];
let server = create_mock_responses_server_sequence(responses).await;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
// Start MCP server and initialize.
@@ -108,17 +108,12 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
let AddConversationSubscriptionResponse { subscription_id } =
to_response::<AddConversationSubscriptionResponse>(add_listener_resp)?;
// Drop any buffered events from conversation setup to avoid
// matching an earlier task_complete.
mcp.clear_message_buffer();
// 3) sendUserMessage (should trigger notifications; we only validate an OK response)
let send_user_id = mcp
.send_send_user_message_request(SendUserMessageParams {
conversation_id,
items: vec![codex_app_server_protocol::InputItem::Text {
text: "text".to_string(),
text_elements: Vec::new(),
}],
})
.await?;
@@ -129,38 +124,13 @@ async fn test_codex_jsonrpc_conversation_flow() -> Result<()> {
.await??;
let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)?;
let task_started_notification: JSONRPCNotification = timeout(
// Verify the task_finished notification is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_started"),
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let task_started_event: Event = serde_json::from_value(
task_started_notification
.params
.clone()
.expect("task_started should have params"),
)
.expect("task_started should deserialize to Event");
// Verify the task_finished notification for this turn is received.
// Note this also ensures that the final request to the server was made.
let task_finished_notification: JSONRPCNotification = loop {
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
let event: Event = serde_json::from_value(
notification
.params
.clone()
.expect("task_complete should have params"),
)
.expect("task_complete should deserialize to Event");
if event.id == task_started_event.id {
break notification;
}
};
let serde_json::Value::Object(map) = task_finished_notification
.params
.expect("notification should have params")
@@ -227,7 +197,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
)?,
create_final_assistant_message_sse_response("done 2")?,
];
let server = create_mock_responses_server_sequence(responses).await;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
// Start MCP server and initialize.
@@ -271,7 +241,6 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
conversation_id,
items: vec![codex_app_server_protocol::InputItem::Text {
text: "run python".to_string(),
text_elements: Vec::new(),
}],
})
.await?;
@@ -314,7 +283,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
)
.await?;
// Wait for first TurnComplete
// Wait for first TaskComplete
let _ = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
@@ -327,7 +296,6 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
conversation_id,
items: vec![codex_app_server_protocol::InputItem::Text {
text: "run python again".to_string(),
text_elements: Vec::new(),
}],
cwd: working_directory.clone(),
approval_policy: AskForApproval::Never,
@@ -395,7 +363,7 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
)?,
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_responses_server_sequence(responses).await;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri())?;
let mut mcp = McpProcess::new(&codex_home).await?;
@@ -437,7 +405,6 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
conversation_id,
items: vec![InputItem::Text {
text: "first turn".to_string(),
text_elements: Vec::new(),
}],
cwd: first_cwd.clone(),
approval_policy: AskForApproval::Never,
@@ -463,14 +430,12 @@ async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<(
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
mcp.clear_message_buffer();
let second_turn_id = mcp
.send_send_user_turn_request(SendUserTurnParams {
conversation_id,
items: vec![InputItem::Text {
text: "second turn".to_string(),
text_elements: Vec::new(),
}],
cwd: second_cwd.clone(),
approval_policy: AskForApproval::Never,
@@ -534,7 +499,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -1,6 +1,7 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AddConversationSubscriptionResponse;
@@ -11,7 +12,6 @@ use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
@@ -23,9 +23,8 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_conversation_create_and_send_message_ok() -> Result<()> {
// Mock server we won't strictly rely on it, but provide one to satisfy any model wiring.
let response_body = create_final_assistant_message_sse_response("Done")?;
let server = responses::start_mock_server().await;
let response_mock = responses::mount_sse_sequence(&server, vec![response_body]).await;
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_chat_completions_server(responses).await;
// Temporary Codex home with config pointing at the mock server.
let codex_home = TempDir::new()?;
@@ -77,7 +76,6 @@ async fn test_conversation_create_and_send_message_ok() -> Result<()> {
conversation_id,
items: vec![InputItem::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
})
.await?;
@@ -88,30 +86,32 @@ async fn test_conversation_create_and_send_message_ok() -> Result<()> {
.await??;
let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(send_resp)?;
// Avoid race condition by waiting for the mock server to receive the responses request.
// avoid race condition by waiting for the mock server to receive the chat.completions request
let deadline = std::time::Instant::now() + DEFAULT_READ_TIMEOUT;
let requests = loop {
let requests = response_mock.requests();
let requests = server.received_requests().await.unwrap_or_default();
if !requests.is_empty() {
break requests;
}
if std::time::Instant::now() >= deadline {
panic!("mock server did not receive the responses request in time");
panic!("mock server did not receive the chat.completions request in time");
}
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
};
// Verify the outbound request body matches expectations for Responses.
// Verify the outbound request body matches expectations for Chat Completions.
let request = requests
.first()
.expect("mock server should have received at least one request");
let body = request.body_json();
let body = request.body_json::<serde_json::Value>()?;
assert_eq!(body["model"], json!("o3"));
let user_texts = request.message_input_texts("user");
assert!(
user_texts.iter().any(|text| text == "Hello"),
"expected user input to include Hello, got {user_texts:?}"
);
assert!(body["stream"].as_bool().unwrap_or(false));
let messages = body["messages"]
.as_array()
.expect("messages should be array");
let last = messages.last().expect("at least one message");
assert_eq!(last["role"], json!("user"));
assert_eq!(last["content"], json!("Hello"));
drop(server);
Ok(())
@@ -133,7 +133,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -1,140 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::to_response;
use codex_app_server_protocol::ForkConversationParams;
use codex_app_server_protocol::ForkConversationResponse;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::NewConversationParams; // reused for overrides shape
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::SessionConfiguredNotification;
use codex_core::protocol::EventMsg;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn fork_conversation_creates_new_rollout() -> Result<()> {
let codex_home = TempDir::new()?;
let preview = "Hello A";
let conversation_id = create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
preview,
Some("openai"),
None,
)?;
let original_path = codex_home
.path()
.join("sessions")
.join("2025")
.join("01")
.join("02")
.join(format!(
"rollout-2025-01-02T12-00-00-{conversation_id}.jsonl"
));
assert!(
original_path.exists(),
"expected original rollout to exist at {}",
original_path.display()
);
let original_contents = std::fs::read_to_string(&original_path)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let fork_req_id = mcp
.send_fork_conversation_request(ForkConversationParams {
path: Some(original_path.clone()),
conversation_id: None,
overrides: Some(NewConversationParams {
model: Some("o3".to_string()),
..Default::default()
}),
})
.await?;
// Expect a sessionConfigured notification for the forked session.
let notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("sessionConfigured"),
)
.await??;
let session_configured: ServerNotification = notification.try_into()?;
let ServerNotification::SessionConfigured(SessionConfiguredNotification {
model,
session_id,
rollout_path,
initial_messages: session_initial_messages,
..
}) = session_configured
else {
unreachable!("expected sessionConfigured notification");
};
assert_eq!(model, "o3");
assert_ne!(
session_id.to_string(),
conversation_id,
"expected a new conversation id when forking"
);
assert_ne!(
rollout_path, original_path,
"expected a new rollout path when forking"
);
assert!(
rollout_path.exists(),
"expected forked rollout to exist at {}",
rollout_path.display()
);
let session_initial_messages =
session_initial_messages.expect("expected initial messages when forking from rollout");
match session_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, preview);
}
other => panic!("unexpected initial messages from rollout fork: {other:#?}"),
}
// Then the response for forkConversation.
let fork_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fork_req_id)),
)
.await??;
let ForkConversationResponse {
conversation_id: forked_id,
model: forked_model,
initial_messages: response_initial_messages,
rollout_path: response_rollout_path,
} = to_response::<ForkConversationResponse>(fork_resp)?;
assert_eq!(forked_model, "o3");
assert_eq!(response_rollout_path, rollout_path);
assert_ne!(forked_id.to_string(), conversation_id);
let response_initial_messages =
response_initial_messages.expect("expected initial messages in fork response");
match response_initial_messages.as_slice() {
[EventMsg::UserMessage(message)] => {
assert_eq!(message.message, preview);
}
other => panic!("unexpected initial messages in fork response: {other:#?}"),
}
let after_contents = std::fs::read_to_string(&original_path)?;
assert_eq!(
after_contents, original_contents,
"fork should not mutate the original rollout file"
);
Ok(())
}

View File

@@ -18,7 +18,7 @@ use tempfile::TempDir;
use tokio::time::timeout;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_command_sse_response;
use app_test_support::to_response;
@@ -56,7 +56,7 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
std::fs::create_dir(&working_directory)?;
// Create mock server with a single SSE response: the long sleep command
let server = create_mock_responses_server_sequence(vec![create_shell_command_sse_response(
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
shell_command.clone(),
Some(&working_directory),
Some(10_000), // 10 seconds timeout in ms
@@ -105,7 +105,6 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
conversation_id,
items: vec![codex_app_server_protocol::InputItem::Text {
text: "run first sleep command".to_string(),
text_elements: Vec::new(),
}],
})
.await?;
@@ -154,7 +153,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -307,7 +307,6 @@ async fn test_list_and_resume_conversations() -> Result<()> {
content: vec![ContentItem::InputText {
text: fork_history_text.to_string(),
}],
end_turn: None,
}];
let resume_with_history_req_id = mcp
.send_resume_conversation_request(ResumeConversationParams {

View File

@@ -32,7 +32,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#,

View File

@@ -3,7 +3,6 @@ mod auth;
mod codex_message_processor_flow;
mod config;
mod create_thread;
mod fork_thread;
mod fuzzy_file_search;
mod interrupt;
mod list_resume;

View File

@@ -80,7 +80,6 @@ async fn send_user_turn_accepts_output_schema_v1() -> Result<()> {
conversation_id,
items: vec![InputItem::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
cwd: codex_home.path().to_path_buf(),
approval_policy: AskForApproval::Never,
@@ -182,7 +181,6 @@ async fn send_user_turn_output_schema_is_per_turn_v1() -> Result<()> {
conversation_id,
items: vec![InputItem::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
cwd: codex_home.path().to_path_buf(),
approval_policy: AskForApproval::Never,
@@ -230,7 +228,6 @@ async fn send_user_turn_output_schema_is_per_turn_v1() -> Result<()> {
conversation_id,
items: vec![InputItem::Text {
text: "Hello again".to_string(),
text_elements: Vec::new(),
}],
cwd: codex_home.path().to_path_buf(),
approval_policy: AskForApproval::Never,

View File

@@ -1,5 +1,7 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AddConversationSubscriptionResponse;
@@ -13,15 +15,10 @@ use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_protocol::ThreadId;
use codex_protocol::models::ContentItem;
use codex_protocol::models::DeveloperInstructions;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::RawResponseItemEvent;
use codex_protocol::protocol::SandboxPolicy;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -29,21 +26,13 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
#[tokio::test]
async fn test_send_message_success() -> Result<()> {
// Spin up a mock responses server that immediately ends the Codex turn.
// Spin up a mock completions server that immediately ends the Codex turn.
// Two Codex turns hit the mock model (session start + send-user-message). Provide two SSE responses.
let server = responses::start_mock_server().await;
let body1 = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let body2 = responses::sse(vec![
responses::ev_response_created("resp-2"),
responses::ev_assistant_message("msg-2", "Done"),
responses::ev_completed("resp-2"),
]);
let _response_mock1 = responses::mount_sse_once(&server, body1).await;
let _response_mock2 = responses::mount_sse_once(&server, body2).await;
let responses = vec![
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_chat_completions_server(responses).await;
// Create a temporary Codex home with config pointing at the mock server.
let codex_home = TempDir::new()?;
@@ -101,7 +90,6 @@ async fn send_message(
conversation_id,
items: vec![InputItem::Text {
text: message.to_string(),
text_elements: Vec::new(),
}],
})
.await?;
@@ -147,13 +135,8 @@ async fn send_message(
#[tokio::test]
async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let _response_mock = responses::mount_sse_once(&server, body).await;
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_chat_completions_server(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -195,14 +178,10 @@ async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
conversation_id,
items: vec![InputItem::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
})
.await?;
let permissions = read_raw_response_item(&mut mcp, conversation_id).await;
assert_permissions_message(&permissions);
let developer = read_raw_response_item(&mut mcp, conversation_id).await;
assert_developer_message(&developer, "Use the test harness tools.");
@@ -247,7 +226,6 @@ async fn test_send_message_session_not_found() -> Result<()> {
conversation_id: unknown,
items: vec![InputItem::Text {
text: "ping".to_string(),
text_elements: Vec::new(),
}],
})
.await?;
@@ -281,7 +259,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
@@ -291,7 +269,6 @@ stream_max_retries = 0
#[expect(clippy::expect_used)]
async fn read_raw_response_item(mcp: &mut McpProcess, conversation_id: ThreadId) -> ResponseItem {
// TODO: Switch to rawResponseItem/completed once we migrate to app server v2 in codex web.
loop {
let raw_notification: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
@@ -350,27 +327,6 @@ fn assert_instructions_message(item: &ResponseItem) {
}
}
fn assert_permissions_message(item: &ResponseItem) {
match item {
ResponseItem::Message { role, content, .. } => {
assert_eq!(role, "developer");
let texts = content_texts(content);
let expected = DeveloperInstructions::from_policy(
&SandboxPolicy::DangerFullAccess,
AskForApproval::Never,
&PathBuf::from("/tmp"),
)
.into_text();
assert_eq!(
texts,
vec![expected.as_str()],
"expected permissions developer message, got {texts:?}"
);
}
other => panic!("expected permissions message, got {other:?}"),
}
}
fn assert_developer_message(item: &ResponseItem, expected_text: &str) {
match item {
ResponseItem::Message { role, content, .. } => {
@@ -428,7 +384,7 @@ fn content_texts(content: &[ContentItem]) -> Vec<&str> {
content
.iter()
.filter_map(|item| match item {
ContentItem::InputText { text, .. } | ContentItem::OutputText { text } => {
ContentItem::InputText { text } | ContentItem::OutputText { text } => {
Some(text.as_str())
}
_ => None,

View File

@@ -1,5 +1,4 @@
use anyhow::Result;
use app_test_support::DEFAULT_CLIENT_NAME;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::GetUserAgentResponse;
@@ -26,13 +25,13 @@ async fn get_user_agent_returns_current_codex_user_agent() -> Result<()> {
.await??;
let os_info = os_info::get();
let originator = DEFAULT_CLIENT_NAME;
let originator = codex_core::default_client::originator().value.as_str();
let os_type = os_info.os_type();
let os_version = os_info.version();
let architecture = os_info.architecture().unwrap_or("unknown");
let terminal_ua = codex_core::terminal::user_agent();
let user_agent = format!(
"{originator}/0.0.0 ({os_type} {os_version}; {architecture}) {terminal_ua} ({DEFAULT_CLIENT_NAME}; 0.1.0)"
"{originator}/0.0.0 ({os_type} {os_version}; {architecture}) {terminal_ua} (codex-app-server-tests; 0.1.0)"
);
let received: GetUserAgentResponse = to_response(response)?;

View File

@@ -67,7 +67,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "http://127.0.0.1:0/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
{requires_line}

View File

@@ -1,66 +0,0 @@
use anyhow::Result;
use codex_core::config::ConfigBuilder;
use codex_core::config::types::OtelExporterKind;
use codex_core::config::types::OtelHttpProtocol;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use tempfile::TempDir;
const SERVICE_VERSION: &str = "0.0.0-test";
fn set_metrics_exporter(config: &mut codex_core::config::Config) {
config.otel.metrics_exporter = OtelExporterKind::OtlpHttp {
endpoint: "http://localhost:4318".to_string(),
headers: HashMap::new(),
protocol: OtelHttpProtocol::Json,
tls: None,
};
}
#[tokio::test]
async fn app_server_default_analytics_disabled_without_flag() -> Result<()> {
let codex_home = TempDir::new()?;
let mut config = ConfigBuilder::default()
.codex_home(codex_home.path().to_path_buf())
.build()
.await?;
set_metrics_exporter(&mut config);
config.analytics_enabled = None;
let provider = codex_core::otel_init::build_provider(
&config,
SERVICE_VERSION,
Some("codex_app_server"),
false,
)
.map_err(|err| anyhow::anyhow!(err.to_string()))?;
// With analytics unset in the config and the default flag is false, metrics are disabled.
// No provider is built.
assert_eq!(provider.is_none(), true);
Ok(())
}
#[tokio::test]
async fn app_server_default_analytics_enabled_with_flag() -> Result<()> {
let codex_home = TempDir::new()?;
let mut config = ConfigBuilder::default()
.codex_home(codex_home.path().to_path_buf())
.build()
.await?;
set_metrics_exporter(&mut config);
config.analytics_enabled = None;
let provider = codex_core::otel_init::build_provider(
&config,
SERVICE_VERSION,
Some("codex_app_server"),
true,
)
.map_err(|err| anyhow::anyhow!(err.to_string()))?;
// With analytics unset in the config and the default flag is true, metrics are enabled.
let has_metrics = provider.as_ref().and_then(|otel| otel.metrics()).is_some();
assert_eq!(has_metrics, true);
Ok(())
}

View File

@@ -1,381 +0,0 @@
use std::borrow::Cow;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use axum::Json;
use axum::Router;
use axum::extract::State;
use axum::http::HeaderMap;
use axum::http::StatusCode;
use axum::http::header::AUTHORIZATION;
use axum::routing::post;
use codex_app_server_protocol::AppInfo;
use codex_app_server_protocol::AppsListParams;
use codex_app_server_protocol::AppsListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::connectors::ConnectorInfo;
use pretty_assertions::assert_eq;
use rmcp::handler::server::ServerHandler;
use rmcp::model::JsonObject;
use rmcp::model::ListToolsResult;
use rmcp::model::Meta;
use rmcp::model::ServerCapabilities;
use rmcp::model::ServerInfo;
use rmcp::model::Tool;
use rmcp::model::ToolAnnotations;
use rmcp::transport::StreamableHttpServerConfig;
use rmcp::transport::StreamableHttpService;
use rmcp::transport::streamable_http_server::session::local::LocalSessionManager;
use serde_json::json;
use tempfile::TempDir;
use tokio::net::TcpListener;
use tokio::task::JoinHandle;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
#[tokio::test]
async fn list_apps_returns_empty_when_connectors_disabled() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_apps_list_request(AppsListParams {
limit: Some(50),
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let AppsListResponse { data, next_cursor } = to_response(response)?;
assert!(data.is_empty());
assert!(next_cursor.is_none());
Ok(())
}
#[tokio::test]
async fn list_apps_returns_connectors_with_accessible_flags() -> Result<()> {
let connectors = vec![
ConnectorInfo {
connector_id: "alpha".to_string(),
connector_name: "Alpha".to_string(),
connector_description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
install_url: None,
is_accessible: false,
},
ConnectorInfo {
connector_id: "beta".to_string(),
connector_name: "beta".to_string(),
connector_description: None,
logo_url: None,
install_url: None,
is_accessible: false,
},
];
let tools = vec![connector_tool("beta", "Beta App")?];
let (server_url, server_handle) = start_apps_server(connectors.clone(), tools).await?;
let codex_home = TempDir::new()?;
write_connectors_config(codex_home.path(), &server_url)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.chatgpt_user_id("user-123")
.chatgpt_account_id("account-123"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_apps_list_request(AppsListParams {
limit: None,
cursor: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let AppsListResponse { data, next_cursor } = to_response(response)?;
let expected = vec![
AppInfo {
id: "beta".to_string(),
name: "Beta App".to_string(),
description: None,
logo_url: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
},
AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: Some("https://example.com/alpha.png".to_string()),
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
},
];
assert_eq!(data, expected);
assert!(next_cursor.is_none());
server_handle.abort();
Ok(())
}
#[tokio::test]
async fn list_apps_paginates_results() -> Result<()> {
let connectors = vec![
ConnectorInfo {
connector_id: "alpha".to_string(),
connector_name: "Alpha".to_string(),
connector_description: Some("Alpha connector".to_string()),
logo_url: None,
install_url: None,
is_accessible: false,
},
ConnectorInfo {
connector_id: "beta".to_string(),
connector_name: "beta".to_string(),
connector_description: None,
logo_url: None,
install_url: None,
is_accessible: false,
},
];
let tools = vec![connector_tool("beta", "Beta App")?];
let (server_url, server_handle) = start_apps_server(connectors.clone(), tools).await?;
let codex_home = TempDir::new()?;
write_connectors_config(codex_home.path(), &server_url)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.account_id("account-123")
.chatgpt_user_id("user-123")
.chatgpt_account_id("account-123"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let first_request = mcp
.send_apps_list_request(AppsListParams {
limit: Some(1),
cursor: None,
})
.await?;
let first_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(first_request)),
)
.await??;
let AppsListResponse {
data: first_page,
next_cursor: first_cursor,
} = to_response(first_response)?;
let expected_first = vec![AppInfo {
id: "beta".to_string(),
name: "Beta App".to_string(),
description: None,
logo_url: None,
install_url: Some("https://chatgpt.com/apps/beta/beta".to_string()),
is_accessible: true,
}];
assert_eq!(first_page, expected_first);
let next_cursor = first_cursor.ok_or_else(|| anyhow::anyhow!("missing cursor"))?;
let second_request = mcp
.send_apps_list_request(AppsListParams {
limit: Some(1),
cursor: Some(next_cursor),
})
.await?;
let second_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(second_request)),
)
.await??;
let AppsListResponse {
data: second_page,
next_cursor: second_cursor,
} = to_response(second_response)?;
let expected_second = vec![AppInfo {
id: "alpha".to_string(),
name: "Alpha".to_string(),
description: Some("Alpha connector".to_string()),
logo_url: None,
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
is_accessible: false,
}];
assert_eq!(second_page, expected_second);
assert!(second_cursor.is_none());
server_handle.abort();
Ok(())
}
#[derive(Clone)]
struct AppsServerState {
expected_bearer: String,
expected_account_id: String,
response: serde_json::Value,
}
#[derive(Clone)]
struct AppListMcpServer {
tools: Arc<Vec<Tool>>,
}
impl AppListMcpServer {
fn new(tools: Arc<Vec<Tool>>) -> Self {
Self { tools }
}
}
impl ServerHandler for AppListMcpServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
capabilities: ServerCapabilities::builder().enable_tools().build(),
..ServerInfo::default()
}
}
fn list_tools(
&self,
_request: Option<rmcp::model::PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListToolsResult, rmcp::ErrorData>> + Send + '_
{
let tools = self.tools.clone();
async move {
Ok(ListToolsResult {
tools: (*tools).clone(),
next_cursor: None,
meta: None,
})
}
}
}
async fn start_apps_server(
connectors: Vec<ConnectorInfo>,
tools: Vec<Tool>,
) -> Result<(String, JoinHandle<()>)> {
let state = AppsServerState {
expected_bearer: "Bearer chatgpt-token".to_string(),
expected_account_id: "account-123".to_string(),
response: json!({ "connectors": connectors }),
};
let state = Arc::new(state);
let tools = Arc::new(tools);
let listener = TcpListener::bind("127.0.0.1:0").await?;
let addr = listener.local_addr()?;
let mcp_service = StreamableHttpService::new(
{
let tools = tools.clone();
move || Ok(AppListMcpServer::new(tools.clone()))
},
Arc::new(LocalSessionManager::default()),
StreamableHttpServerConfig::default(),
);
let router = Router::new()
.route("/aip/connectors/list_accessible", post(list_connectors))
.with_state(state)
.nest_service("/api/codex/apps", mcp_service);
let handle = tokio::spawn(async move {
let _ = axum::serve(listener, router).await;
});
Ok((format!("http://{addr}"), handle))
}
async fn list_connectors(
State(state): State<Arc<AppsServerState>>,
headers: HeaderMap,
) -> Result<impl axum::response::IntoResponse, StatusCode> {
let bearer_ok = headers
.get(AUTHORIZATION)
.and_then(|value| value.to_str().ok())
.is_some_and(|value| value == state.expected_bearer);
let account_ok = headers
.get("chatgpt-account-id")
.and_then(|value| value.to_str().ok())
.is_some_and(|value| value == state.expected_account_id);
if bearer_ok && account_ok {
Ok(Json(state.response.clone()))
} else {
Err(StatusCode::UNAUTHORIZED)
}
}
fn connector_tool(connector_id: &str, connector_name: &str) -> Result<Tool> {
let schema: JsonObject = serde_json::from_value(json!({
"type": "object",
"additionalProperties": false
}))?;
let mut tool = Tool::new(
Cow::Owned(format!("connector_{connector_id}")),
Cow::Borrowed("Connector test tool"),
Arc::new(schema),
);
tool.annotations = Some(ToolAnnotations::new().read_only(true));
let mut meta = Meta::new();
meta.0
.insert("connector_id".to_string(), json!(connector_id));
meta.0
.insert("connector_name".to_string(), json!(connector_name));
tool.meta = Some(meta);
Ok(tool)
}
fn write_connectors_config(codex_home: &std::path::Path, base_url: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
chatgpt_base_url = "{base_url}"
[features]
connectors = true
"#
),
)
}

View File

@@ -1,111 +0,0 @@
//! Validates that the collaboration mode list endpoint returns the expected default presets.
//!
//! The test drives the app server through the MCP harness and asserts that the list response
//! includes the plan, coding, pair programming, and execute modes with their default model and reasoning
//! effort settings, which keeps the API contract visible in one place.
#![allow(clippy::unwrap_used)]
use std::time::Duration;
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::to_response;
use codex_app_server_protocol::CollaborationModeListParams;
use codex_app_server_protocol::CollaborationModeListResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_core::models_manager::test_builtin_collaboration_mode_presets;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ModeKind;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
/// Confirms the server returns the default collaboration mode presets in a stable order.
#[tokio::test]
async fn list_collaboration_modes_returns_presets() -> Result<()> {
let codex_home = TempDir::new()?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_collaboration_modes_request(CollaborationModeListParams {})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let CollaborationModeListResponse { data: items } =
to_response::<CollaborationModeListResponse>(response)?;
let expected = [
plan_preset(),
code_preset(),
pair_programming_preset(),
execute_preset(),
];
assert_eq!(expected.len(), items.len());
for (expected_mask, actual_mask) in expected.iter().zip(items.iter()) {
assert_eq!(expected_mask.name, actual_mask.name);
assert_eq!(expected_mask.mode, actual_mask.mode);
assert_eq!(expected_mask.model, actual_mask.model);
assert_eq!(expected_mask.reasoning_effort, actual_mask.reasoning_effort);
assert_eq!(
expected_mask.developer_instructions,
actual_mask.developer_instructions
);
}
Ok(())
}
/// Builds the plan preset that the list response is expected to return.
///
/// If the defaults change in the app server, this helper should be updated alongside the
/// contract, or the test will fail in ways that imply a regression in the API.
fn plan_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Plan))
.unwrap()
}
/// Builds the pair programming preset that the list response is expected to return.
///
/// The helper keeps the expected model and reasoning defaults co-located with the test
/// so that mismatches point directly at the API contract being exercised.
fn pair_programming_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::PairProgramming))
.unwrap()
}
/// Builds the code preset that the list response is expected to return.
fn code_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Code))
.unwrap()
}
/// Builds the execute preset that the list response is expected to return.
///
/// The execute preset uses a different reasoning effort to capture the higher-effort
/// execution contract the server currently exposes.
fn execute_preset() -> CollaborationModeMask {
let presets = test_builtin_collaboration_mode_presets();
presets
.into_iter()
.find(|p| p.mode == Some(ModeKind::Execute))
.unwrap()
}

View File

@@ -18,10 +18,7 @@ use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxMode;
use codex_app_server_protocol::ToolsV2;
use codex_app_server_protocol::WriteStatus;
use codex_core::config::set_project_trust_level;
use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use codex_utils_absolute_path::AbsolutePathBuf;
use pretty_assertions::assert_eq;
use serde_json::json;
@@ -56,7 +53,6 @@ sandbox_mode = "workspace-write"
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -105,7 +101,6 @@ view_image = false
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -146,52 +141,6 @@ view_image = false
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_project_layers_for_cwd() -> Result<()> {
let codex_home = TempDir::new()?;
write_config(&codex_home, r#"model = "gpt-user""#)?;
let workspace = TempDir::new()?;
let project_config_dir = workspace.path().join(".codex");
std::fs::create_dir_all(&project_config_dir)?;
std::fs::write(
project_config_dir.join("config.toml"),
r#"
model_reasoning_effort = "high"
"#,
)?;
set_project_trust_level(codex_home.path(), workspace.path(), TrustLevel::Trusted)?;
let project_config = AbsolutePathBuf::try_from(project_config_dir)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: Some(workspace.path().to_string_lossy().into_owned()),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ConfigReadResponse {
config, origins, ..
} = to_response(resp)?;
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::High));
assert_eq!(
origins.get("model_reasoning_effort").expect("origin").name,
ConfigLayerSource::Project {
dot_codex_folder: project_config
}
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -246,7 +195,6 @@ writable_roots = [{}]
let request_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: true,
cwd: None,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -333,7 +281,6 @@ model = "gpt-old"
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
@@ -368,7 +315,6 @@ model = "gpt-old"
let verify_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let verify_resp: JSONRPCResponse = timeout(
@@ -465,7 +411,6 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> {
let read_id = mcp
.send_config_read_request(ConfigReadParams {
include_layers: false,
cwd: None,
})
.await?;
let read_resp: JSONRPCResponse = timeout(

View File

@@ -1,286 +0,0 @@
use anyhow::Context;
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::DynamicToolCallParams;
use codex_app_server_protocol::DynamicToolCallResponse;
use codex_app_server_protocol::DynamicToolSpec;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use core_test_support::responses;
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::path::Path;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::MockServer;
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10);
/// Ensures dynamic tool specs are serialized into the model request payload.
#[tokio::test]
async fn thread_start_injects_dynamic_tools_into_model_requests() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
// Use a minimal JSON schema so we can assert the tool payload round-trips.
let input_schema = json!({
"type": "object",
"properties": {
"city": { "type": "string" }
},
"required": ["city"],
"additionalProperties": false,
});
let dynamic_tool = DynamicToolSpec {
name: "demo_tool".to_string(),
description: "Demo dynamic tool".to_string(),
input_schema: input_schema.clone(),
};
// Thread start injects dynamic tools into the thread's tool registry.
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
dynamic_tools: Some(vec![dynamic_tool.clone()]),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn so a model request is issued.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
// Inspect the captured model request to assert the tool spec made it through.
let bodies = responses_bodies(&server).await?;
let body = bodies
.first()
.context("expected at least one responses request")?;
let tool = find_tool(body, &dynamic_tool.name)
.context("expected dynamic tool to be injected into request")?;
assert_eq!(
tool.get("description"),
Some(&Value::String(dynamic_tool.description.clone()))
);
assert_eq!(tool.get("parameters"), Some(&input_schema));
Ok(())
}
/// Exercises the full dynamic tool call path (server request, client response, model output).
#[tokio::test]
async fn dynamic_tool_call_round_trip_sends_output_to_model() -> Result<()> {
let call_id = "dyn-call-1";
let tool_name = "demo_tool";
let tool_args = json!({ "city": "Paris" });
let tool_call_arguments = serde_json::to_string(&tool_args)?;
// First response triggers a dynamic tool call, second closes the turn.
let responses = vec![
responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_function_call(call_id, tool_name, &tool_call_arguments),
responses::ev_completed("resp-1"),
]),
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let dynamic_tool = DynamicToolSpec {
name: tool_name.to_string(),
description: "Demo dynamic tool".to_string(),
input_schema: json!({
"type": "object",
"properties": {
"city": { "type": "string" }
},
"required": ["city"],
"additionalProperties": false,
}),
};
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
dynamic_tools: Some(vec![dynamic_tool]),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
// Start a turn so the tool call is emitted.
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Run the tool".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
// Read the tool call request from the app server.
let request = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let (request_id, params) = match request {
ServerRequest::DynamicToolCall { request_id, params } => (request_id, params),
other => panic!("expected DynamicToolCall request, got {other:?}"),
};
let expected = DynamicToolCallParams {
thread_id: thread.id,
turn_id: turn.id,
call_id: call_id.to_string(),
tool: tool_name.to_string(),
arguments: tool_args.clone(),
};
assert_eq!(params, expected);
// Respond to the tool call so the model receives a function_call_output.
let response = DynamicToolCallResponse {
output: "dynamic-ok".to_string(),
success: true,
};
mcp.send_response(request_id, serde_json::to_value(response)?)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let bodies = responses_bodies(&server).await?;
let output = bodies
.iter()
.find_map(|body| function_call_output_text(body, call_id))
.context("expected function_call_output in follow-up request")?;
assert_eq!(output, "dynamic-ok");
Ok(())
}
async fn responses_bodies(server: &MockServer) -> Result<Vec<Value>> {
let requests = server
.received_requests()
.await
.context("failed to fetch received requests")?;
requests
.into_iter()
.filter(|req| req.url.path().ends_with("/responses"))
.map(|req| {
req.body_json::<Value>()
.context("request body should be JSON")
})
.collect()
}
fn find_tool<'a>(body: &'a Value, name: &str) -> Option<&'a Value> {
body.get("tools")
.and_then(Value::as_array)
.and_then(|tools| {
tools
.iter()
.find(|tool| tool.get("name").and_then(Value::as_str) == Some(name))
})
}
fn function_call_output_text(body: &Value, call_id: &str) -> Option<String> {
body.get("input")
.and_then(Value::as_array)
.and_then(|items| {
items.iter().find(|item| {
item.get("type").and_then(Value::as_str) == Some("function_call_output")
&& item.get("call_id").and_then(Value::as_str) == Some(call_id)
})
})
.and_then(|item| item.get("output"))
.and_then(Value::as_str)
.map(str::to_string)
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,133 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::ExecRunParams;
use codex_app_server_protocol::ExecRunResponse;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
#[tokio::test]
async fn exec_run_completes_turn_and_returns_final_message() -> Result<()> {
skip_if_no_network!(Ok(()));
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
mcp.initialize().await?;
let request_id = mcp
.send_exec_run_request(ExecRunParams {
input: vec![V2UserInput::Text {
text: "Generate a title".to_string(),
text_elements: Vec::new(),
}],
model: None,
model_provider: None,
cwd: None,
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
config: None,
base_instructions: None,
developer_instructions: None,
output_schema: None,
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let response: ExecRunResponse = to_response(response)?;
assert_eq!(response.status, TurnStatus::Completed);
assert_eq!(response.last_agent_message, Some("Done".to_string()));
assert_eq!(response.error, None);
assert!(!response.thread_id.is_empty(), "thread_id should be set");
assert!(!response.turn_id.is_empty(), "turn_id should be set");
Ok(())
}
#[tokio::test]
async fn exec_run_rejects_empty_input() -> Result<()> {
skip_if_no_network!(Ok(()));
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
mcp.initialize().await?;
let request_id = mcp
.send_exec_run_request(ExecRunParams {
input: Vec::new(),
model: None,
model_provider: None,
cwd: None,
effort: None,
summary: None,
collaboration_mode: None,
personality: None,
config: None,
base_instructions: None,
developer_instructions: None,
output_schema: None,
})
.await?;
let error = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE);
assert_eq!(error.error.message, "input must not be empty".to_string());
Ok(())
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,137 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::InitializeResponse;
use codex_app_server_protocol::JSONRPCMessage;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn initialize_uses_client_info_name_as_originator() -> Result<()> {
let responses = Vec::new();
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
let message = timeout(
DEFAULT_READ_TIMEOUT,
mcp.initialize_with_client_info(ClientInfo {
name: "codex_vscode".to_string(),
title: Some("Codex VS Code Extension".to_string()),
version: "0.1.0".to_string(),
}),
)
.await??;
let JSONRPCMessage::Response(response) = message else {
anyhow::bail!("expected initialize response, got {message:?}");
};
let InitializeResponse { user_agent } = to_response::<InitializeResponse>(response)?;
assert!(user_agent.starts_with("codex_vscode/"));
Ok(())
}
#[tokio::test]
async fn initialize_respects_originator_override_env_var() -> Result<()> {
let responses = Vec::new();
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new_with_env(
codex_home.path(),
&[(
"CODEX_INTERNAL_ORIGINATOR_OVERRIDE",
Some("codex_originator_via_env_var"),
)],
)
.await?;
let message = timeout(
DEFAULT_READ_TIMEOUT,
mcp.initialize_with_client_info(ClientInfo {
name: "codex_vscode".to_string(),
title: Some("Codex VS Code Extension".to_string()),
version: "0.1.0".to_string(),
}),
)
.await??;
let JSONRPCMessage::Response(response) = message else {
anyhow::bail!("expected initialize response, got {message:?}");
};
let InitializeResponse { user_agent } = to_response::<InitializeResponse>(response)?;
assert!(user_agent.starts_with("codex_originator_via_env_var/"));
Ok(())
}
#[tokio::test]
async fn initialize_rejects_invalid_client_name() -> Result<()> {
let responses = Vec::new();
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new_with_env(
codex_home.path(),
&[("CODEX_INTERNAL_ORIGINATOR_OVERRIDE", None)],
)
.await?;
let message = timeout(
DEFAULT_READ_TIMEOUT,
mcp.initialize_with_client_info(ClientInfo {
name: "bad\rname".to_string(),
title: Some("Bad Client".to_string()),
version: "0.1.0".to_string(),
}),
)
.await??;
let JSONRPCMessage::Error(error) = message else {
anyhow::bail!("expected initialize error, got {message:?}");
};
assert_eq!(error.error.code, -32600);
assert_eq!(
error.error.message,
"Invalid clientInfo.name: 'bad\rname'. Must be a valid HTTP header value."
);
assert_eq!(error.error.data, None);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "{approval_policy}"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,21 +1,11 @@
mod account;
mod analytics;
mod app_list;
mod collaboration_mode_list;
mod config_rpc;
mod dynamic_tools;
mod exec_run;
mod initialize;
mod model_list;
mod output_schema;
mod rate_limits;
mod request_user_input;
mod review;
mod thread_archive;
mod thread_fork;
mod thread_list;
mod thread_loaded_list;
mod thread_read;
mod thread_resume;
mod thread_rollback;
mod thread_start;

View File

@@ -72,7 +72,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: true,
},
Model {
@@ -100,7 +99,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@@ -120,7 +118,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
Model {
@@ -154,7 +151,6 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
supports_personality: false,
is_default: false,
},
];

View File

@@ -61,7 +61,6 @@ async fn turn_start_accepts_output_schema_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
output_schema: Some(output_schema.clone()),
..Default::default()
@@ -143,7 +142,6 @@ async fn turn_start_output_schema_is_per_turn_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
output_schema: Some(output_schema.clone()),
..Default::default()
@@ -185,7 +183,6 @@ async fn turn_start_output_schema_is_per_turn_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello again".to_string(),
text_elements: Vec::new(),
}],
output_schema: None,
..Default::default()

View File

@@ -1,138 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_request_user_input_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn request_user_input_round_trip() -> Result<()> {
let codex_home = tempfile::TempDir::new()?;
let responses = vec![
create_request_user_input_sse_response("call1")?,
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response(thread_start_resp)?;
let turn_start_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "ask something".to_string(),
text_elements: Vec::new(),
}],
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
collaboration_mode: Some(CollaborationMode {
mode: ModeKind::Plan,
settings: Settings {
model: "mock-model".to_string(),
reasoning_effort: Some(ReasoningEffort::Medium),
developer_instructions: None,
},
}),
..Default::default()
})
.await?;
let turn_start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_start_id)),
)
.await??;
let TurnStartResponse { turn, .. } = to_response(turn_start_resp)?;
let server_req = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_request_message(),
)
.await??;
let ServerRequest::ToolRequestUserInput { request_id, params } = server_req else {
panic!("expected ToolRequestUserInput request, got: {server_req:?}");
};
assert_eq!(params.thread_id, thread.id);
assert_eq!(params.turn_id, turn.id);
assert_eq!(params.item_id, "call1");
assert_eq!(params.questions.len(), 1);
mcp.send_response(
request_id,
serde_json::json!({
"answers": {
"confirm_path": { "answers": ["yes"] }
}
}),
)
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
Ok(())
}
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "untrusted"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
collaboration_modes = true
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,6 +1,7 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
@@ -43,7 +44,10 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
"overall_confidence_score": 0.75
})
.to_string();
let server = create_mock_responses_server_repeating_assistant(&review_payload).await;
let responses = vec![create_final_assistant_message_sse_response(
&review_payload,
)?];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -131,7 +135,7 @@ async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()
#[tokio::test]
async fn review_start_rejects_empty_base_branch() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server_unchecked(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -172,7 +176,10 @@ async fn review_start_with_detached_delivery_returns_new_thread_id() -> Result<(
"overall_confidence_score": 0.5
})
.to_string();
let server = create_mock_responses_server_repeating_assistant(&review_payload).await;
let responses = vec![create_final_assistant_message_sse_response(
&review_payload,
)?];
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -212,7 +219,7 @@ async fn review_start_with_detached_delivery_returns_new_thread_id() -> Result<(
#[tokio::test]
async fn review_start_rejects_empty_commit_sha() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server_unchecked(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -247,7 +254,7 @@ async fn review_start_rejects_empty_commit_sha() -> Result<()> {
#[tokio::test]
async fn review_start_rejects_empty_custom_instructions() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server_unchecked(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -313,7 +320,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -1,142 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadForkParams;
use codex_app_server_protocol::ThreadForkResponse;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_fork_creates_new_thread_and_emits_started() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let conversation_id = create_fake_rollout(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
Some("mock_provider"),
None,
)?;
let original_path = codex_home
.path()
.join("sessions")
.join("2025")
.join("01")
.join("05")
.join(format!(
"rollout-2025-01-05T12-00-00-{conversation_id}.jsonl"
));
assert!(
original_path.exists(),
"expected original rollout to exist at {}",
original_path.display()
);
let original_contents = std::fs::read_to_string(&original_path)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let fork_id = mcp
.send_thread_fork_request(ThreadForkParams {
thread_id: conversation_id.clone(),
..Default::default()
})
.await?;
let fork_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fork_id)),
)
.await??;
let ThreadForkResponse { thread, .. } = to_response::<ThreadForkResponse>(fork_resp)?;
let after_contents = std::fs::read_to_string(&original_path)?;
assert_eq!(
after_contents, original_contents,
"fork should not mutate the original rollout file"
);
assert_ne!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
let thread_path = thread.path.clone().expect("thread path");
assert!(thread_path.is_absolute());
assert_ne!(thread_path, original_path);
assert!(thread.cwd.is_absolute());
assert_eq!(thread.source, SessionSource::VsCode);
assert_eq!(
thread.turns.len(),
1,
"expected forked thread to include one turn"
);
let turn = &thread.turns[0];
assert_eq!(turn.status, TurnStatus::Completed);
assert_eq!(turn.items.len(), 1, "expected user message item");
match &turn.items[0] {
ThreadItem::UserMessage { content, .. } => {
assert_eq!(
content,
&vec![UserInput::Text {
text: preview.to_string(),
text_elements: Vec::new(),
}]
);
}
other => panic!("expected user message item, got {other:?}"),
}
// A corresponding thread/started notification should arrive.
let notif: JSONRPCNotification = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("thread/started"),
)
.await??;
let started: ThreadStartedNotification =
serde_json::from_value(notif.params.expect("params must be present"))?;
assert_eq!(started.thread, thread);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,29 +1,17 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout;
use app_test_support::rollout_path;
use app_test_support::to_response;
use chrono::DateTime;
use chrono::Utc;
use codex_app_server_protocol::GitInfo as ApiGitInfo;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadListResponse;
use codex_app_server_protocol::ThreadSortKey;
use codex_core::ARCHIVED_SESSIONS_SUBDIR;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
use pretty_assertions::assert_eq;
use std::cmp::Reverse;
use std::fs;
use std::fs::FileTimes;
use std::fs::OpenOptions;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
use uuid::Uuid;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -38,26 +26,12 @@ async fn list_threads(
cursor: Option<String>,
limit: Option<u32>,
providers: Option<Vec<String>>,
archived: Option<bool>,
) -> Result<ThreadListResponse> {
list_threads_with_sort(mcp, cursor, limit, providers, None, archived).await
}
async fn list_threads_with_sort(
mcp: &mut McpProcess,
cursor: Option<String>,
limit: Option<u32>,
providers: Option<Vec<String>>,
sort_key: Option<ThreadSortKey>,
archived: Option<bool>,
) -> Result<ThreadListResponse> {
let request_id = mcp
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
cursor,
limit,
sort_key,
model_providers: providers,
archived,
})
.await?;
let resp: JSONRPCResponse = timeout(
@@ -108,16 +82,6 @@ fn timestamp_at(
)
}
fn set_rollout_mtime(path: &Path, updated_at_rfc3339: &str) -> Result<()> {
let parsed = DateTime::parse_from_rfc3339(updated_at_rfc3339)?.with_timezone(&Utc);
let times = FileTimes::new().set_modified(parsed.into());
OpenOptions::new()
.append(true)
.open(path)?
.set_times(times)?;
Ok(())
}
#[tokio::test]
async fn thread_list_basic_empty() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -130,7 +94,6 @@ async fn thread_list_basic_empty() -> Result<()> {
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
)
.await?;
assert!(data.is_empty());
@@ -193,7 +156,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
None,
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
)
.await?;
assert_eq!(data1.len(), 2);
@@ -201,7 +163,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.updated_at, thread.created_at);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -218,7 +179,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
Some(cursor1),
Some(2),
Some(vec!["mock_provider".to_string()]),
None,
)
.await?;
assert!(data2.len() <= 2);
@@ -226,7 +186,6 @@ async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> {
assert_eq!(thread.preview, "Hello");
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.created_at > 0);
assert_eq!(thread.updated_at, thread.created_at);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -268,7 +227,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
None,
Some(10),
Some(vec!["other_provider".to_string()]),
None,
)
.await?;
assert_eq!(data.len(), 1);
@@ -278,7 +236,6 @@ async fn thread_list_respects_provider_filter() -> Result<()> {
assert_eq!(thread.model_provider, "other_provider");
let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_ts);
assert_eq!(thread.updated_at, expected_ts);
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -318,7 +275,6 @@ async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> {
None,
Some(8),
Some(vec!["target_provider".to_string()]),
None,
)
.await?;
assert_eq!(
@@ -363,7 +319,6 @@ async fn thread_list_enforces_max_limit() -> Result<()> {
None,
Some(200),
Some(vec!["mock_provider".to_string()]),
None,
)
.await?;
assert_eq!(
@@ -409,7 +364,6 @@ async fn thread_list_stops_when_not_enough_filtered_results_exist() -> Result<()
None,
Some(10),
Some(vec!["target_provider".to_string()]),
None,
)
.await?;
assert_eq!(
@@ -456,7 +410,6 @@ async fn thread_list_includes_git_info() -> Result<()> {
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
)
.await?;
let thread = data
@@ -476,418 +429,3 @@ async fn thread_list_includes_git_info() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_list_default_sorts_by_created_at() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-01-02T12-00-00",
"2025-01-02T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-01-01T13-00-00",
"2025-01-01T13:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_c = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![id_a.as_str(), id_b.as_str(), id_c.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_sort_updated_at_orders_by_mtime() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_old = create_fake_rollout(
codex_home.path(),
"2025-01-01T10-00-00",
"2025-01-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_mid = create_fake_rollout(
codex_home.path(),
"2025-01-01T11-00-00",
"2025-01-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_new = create_fake_rollout(
codex_home.path(),
"2025-01-01T12-00-00",
"2025-01-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T10-00-00", &id_old).as_path(),
"2025-01-03T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T11-00-00", &id_mid).as_path(),
"2025-01-02T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-01-01T12-00-00", &id_new).as_path(),
"2025-01-01T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids, vec![id_old.as_str(), id_mid.as_str(), id_new.as_str()]);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_paginates_with_cursor() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_c = create_fake_rollout(
codex_home.path(),
"2025-02-01T12-00-00",
"2025-02-01T12:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &id_a).as_path(),
"2025-02-03T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T11-00-00", &id_b).as_path(),
"2025-02-02T00:00:00Z",
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T12-00-00", &id_c).as_path(),
"2025-02-01T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse {
data: page1,
next_cursor: cursor1,
} = list_threads_with_sort(
&mut mcp,
None,
Some(2),
Some(vec!["mock_provider".to_string()]),
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids_page1: Vec<_> = page1.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids_page1, vec![id_a.as_str(), id_b.as_str()]);
let cursor1 = cursor1.expect("expected nextCursor on first page");
let ThreadListResponse {
data: page2,
next_cursor: cursor2,
} = list_threads_with_sort(
&mut mcp,
Some(cursor1),
Some(2),
Some(vec!["mock_provider".to_string()]),
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids_page2: Vec<_> = page2.iter().map(|thread| thread.id.as_str()).collect();
assert_eq!(ids_page2, vec![id_c.as_str()]);
assert_eq!(cursor2, None);
Ok(())
}
#[tokio::test]
async fn thread_list_created_at_tie_breaks_by_uuid() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
let mut expected = [id_a, id_b];
expected.sort_by_key(|id| Reverse(Uuid::parse_str(id).expect("uuid should parse")));
let expected: Vec<_> = expected.iter().map(String::as_str).collect();
assert_eq!(ids, expected);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_tie_breaks_by_uuid() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let id_a = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let id_b = create_fake_rollout(
codex_home.path(),
"2025-02-01T11-00-00",
"2025-02-01T11:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
let updated_at = "2025-02-03T00:00:00Z";
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &id_a).as_path(),
updated_at,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T11-00-00", &id_b).as_path(),
updated_at,
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
let mut expected = [id_a, id_b];
expected.sort_by_key(|id| Reverse(Uuid::parse_str(id).expect("uuid should parse")));
let expected: Vec<_> = expected.iter().map(String::as_str).collect();
assert_eq!(ids, expected);
Ok(())
}
#[tokio::test]
async fn thread_list_updated_at_uses_mtime() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let thread_id = create_fake_rollout(
codex_home.path(),
"2025-02-01T10-00-00",
"2025-02-01T10:00:00Z",
"Hello",
Some("mock_provider"),
None,
)?;
set_rollout_mtime(
rollout_path(codex_home.path(), "2025-02-01T10-00-00", &thread_id).as_path(),
"2025-02-05T00:00:00Z",
)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads_with_sort(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(ThreadSortKey::UpdatedAt),
None,
)
.await?;
let thread = data
.iter()
.find(|item| item.id == thread_id)
.expect("expected thread for created rollout");
let expected_created =
chrono::DateTime::parse_from_rfc3339("2025-02-01T10:00:00Z")?.timestamp();
let expected_updated =
chrono::DateTime::parse_from_rfc3339("2025-02-05T00:00:00Z")?.timestamp();
assert_eq!(thread.created_at, expected_created);
assert_eq!(thread.updated_at, expected_updated);
Ok(())
}
#[tokio::test]
async fn thread_list_archived_filter() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let active_id = create_fake_rollout(
codex_home.path(),
"2025-03-01T10-00-00",
"2025-03-01T10:00:00Z",
"Active",
Some("mock_provider"),
None,
)?;
let archived_id = create_fake_rollout(
codex_home.path(),
"2025-03-01T09-00-00",
"2025-03-01T09:00:00Z",
"Archived",
Some("mock_provider"),
None,
)?;
let archived_dir = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR);
fs::create_dir_all(&archived_dir)?;
let archived_source = rollout_path(codex_home.path(), "2025-03-01T09-00-00", &archived_id);
let archived_dest = archived_dir.join(
archived_source
.file_name()
.expect("archived rollout should have a file name"),
);
fs::rename(&archived_source, &archived_dest)?;
let mut mcp = init_mcp(codex_home.path()).await?;
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
None,
)
.await?;
assert_eq!(data.len(), 1);
assert_eq!(data[0].id, active_id);
let ThreadListResponse { data, .. } = list_threads(
&mut mcp,
None,
Some(10),
Some(vec!["mock_provider".to_string()]),
Some(true),
)
.await?;
assert_eq!(data.len(), 1);
assert_eq!(data[0].id, archived_id);
Ok(())
}
#[tokio::test]
async fn thread_list_invalid_cursor_returns_error() -> Result<()> {
let codex_home = TempDir::new()?;
create_minimal_config(codex_home.path())?;
let mut mcp = init_mcp(codex_home.path()).await?;
let request_id = mcp
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
cursor: Some("not-a-cursor".to_string()),
limit: Some(2),
sort_key: None,
model_providers: Some(vec!["mock_provider".to_string()]),
archived: None,
})
.await?;
let error: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(error.error.code, -32600);
assert_eq!(error.error.message, "invalid cursor: not-a-cursor");
Ok(())
}

View File

@@ -1,139 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadLoadedListParams;
use codex_app_server_protocol::ThreadLoadedListResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_loaded_list_returns_loaded_thread_ids() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_id = start_thread(&mut mcp).await?;
let list_id = mcp
.send_thread_loaded_list_request(ThreadLoadedListParams::default())
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadLoadedListResponse {
mut data,
next_cursor,
} = to_response::<ThreadLoadedListResponse>(resp)?;
data.sort();
assert_eq!(data, vec![thread_id]);
assert_eq!(next_cursor, None);
Ok(())
}
#[tokio::test]
async fn thread_loaded_list_paginates() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let first = start_thread(&mut mcp).await?;
let second = start_thread(&mut mcp).await?;
let mut expected = [first, second];
expected.sort();
let list_id = mcp
.send_thread_loaded_list_request(ThreadLoadedListParams {
cursor: None,
limit: Some(1),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadLoadedListResponse {
data: first_page,
next_cursor,
} = to_response::<ThreadLoadedListResponse>(resp)?;
assert_eq!(first_page, vec![expected[0].clone()]);
assert_eq!(next_cursor, Some(expected[0].clone()));
let list_id = mcp
.send_thread_loaded_list_request(ThreadLoadedListParams {
cursor: next_cursor,
limit: Some(1),
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(list_id)),
)
.await??;
let ThreadLoadedListResponse {
data: second_page,
next_cursor,
} = to_response::<ThreadLoadedListResponse>(resp)?;
assert_eq!(second_page, vec![expected[1].clone()]);
assert_eq!(next_cursor, None);
Ok(())
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}
async fn start_thread(mcp: &mut McpProcess) -> Result<String> {
let req_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.1".to_string()),
..Default::default()
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(resp)?;
Ok(thread.id)
}

View File

@@ -1,159 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout_with_text_elements;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SessionSource;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadReadParams;
use codex_app_server_protocol::ThreadReadResponse;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use pretty_assertions::assert_eq;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_read_returns_summary_without_turns() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = [TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
text_elements
.iter()
.map(|elem| serde_json::to_value(elem).expect("serialize text element"))
.collect(),
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let read_id = mcp
.send_thread_read_request(ThreadReadParams {
thread_id: conversation_id.clone(),
include_turns: false,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let ThreadReadResponse { thread } = to_response::<ThreadReadResponse>(read_resp)?;
assert_eq!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.path.as_ref().expect("thread path").is_absolute());
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
assert_eq!(thread.git_info, None);
assert_eq!(thread.turns.len(), 0);
Ok(())
}
#[tokio::test]
async fn thread_read_can_include_turns() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let conversation_id = create_fake_rollout_with_text_elements(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
text_elements
.iter()
.map(|elem| serde_json::to_value(elem).expect("serialize text element"))
.collect(),
Some("mock_provider"),
None,
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let read_id = mcp
.send_thread_read_request(ThreadReadParams {
thread_id: conversation_id.clone(),
include_turns: true,
})
.await?;
let read_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
)
.await??;
let ThreadReadResponse { thread } = to_response::<ThreadReadResponse>(read_resp)?;
assert_eq!(thread.turns.len(), 1);
let turn = &thread.turns[0];
assert_eq!(turn.status, TurnStatus::Completed);
assert_eq!(turn.items.len(), 1, "expected user message item");
match &turn.items[0] {
ThreadItem::UserMessage { content, .. } => {
assert_eq!(
content,
&vec![UserInput::Text {
text: preview.to_string(),
text_elements: text_elements.clone().into_iter().map(Into::into).collect(),
}]
);
}
other => panic!("expected user message item, got {other:?}"),
}
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -1,7 +1,7 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_fake_rollout_with_text_elements;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::create_fake_rollout;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
@@ -11,27 +11,19 @@ use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_protocol::config_types::Personality;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const DEFAULT_BASE_INSTRUCTIONS: &str = "You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.";
#[tokio::test]
async fn thread_resume_returns_original_thread() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -67,33 +59,23 @@ async fn thread_resume_returns_original_thread() -> Result<()> {
let ThreadResumeResponse {
thread: resumed, ..
} = to_response::<ThreadResumeResponse>(resume_resp)?;
let mut expected = thread;
expected.updated_at = resumed.updated_at;
assert_eq!(resumed, expected);
assert_eq!(resumed, thread);
Ok(())
}
#[tokio::test]
async fn thread_resume_returns_rollout_history() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let preview = "Saved user message";
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".into()),
)];
let conversation_id = create_fake_rollout_with_text_elements(
let conversation_id = create_fake_rollout(
codex_home.path(),
"2025-01-05T12-00-00",
"2025-01-05T12:00:00Z",
preview,
text_elements
.iter()
.map(|elem| serde_json::to_value(elem).expect("serialize text element"))
.collect(),
Some("mock_provider"),
None,
)?;
@@ -117,7 +99,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
assert_eq!(thread.id, conversation_id);
assert_eq!(thread.preview, preview);
assert_eq!(thread.model_provider, "mock_provider");
assert!(thread.path.as_ref().expect("thread path").is_absolute());
assert!(thread.path.is_absolute());
assert_eq!(thread.cwd, PathBuf::from("/"));
assert_eq!(thread.cli_version, "0.0.0");
assert_eq!(thread.source, SessionSource::Cli);
@@ -136,8 +118,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
assert_eq!(
content,
&vec![UserInput::Text {
text: preview.to_string(),
text_elements: text_elements.clone().into_iter().map(Into::into).collect(),
text: preview.to_string()
}]
);
}
@@ -149,7 +130,7 @@ async fn thread_resume_returns_rollout_history() -> Result<()> {
#[tokio::test]
async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -169,7 +150,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let thread_path = thread.path.clone().expect("thread path");
let thread_path = thread.path.clone();
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: "not-a-valid-thread-id".to_string(),
@@ -186,16 +167,14 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let ThreadResumeResponse {
thread: resumed, ..
} = to_response::<ThreadResumeResponse>(resume_resp)?;
let mut expected = thread;
expected.updated_at = resumed.updated_at;
assert_eq!(resumed, expected);
assert_eq!(resumed, thread);
Ok(())
}
#[tokio::test]
async fn thread_resume_supports_history_and_overrides() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -223,7 +202,6 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
content: vec![ContentItem::InputText {
text: history_text.to_string(),
}],
end_turn: None,
}];
// Resume with explicit history and override the model.
@@ -253,91 +231,6 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id.clone(),
model: Some("gpt-5.2-codex".to_string()),
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let _resume: ThreadResumeResponse = to_response::<ThreadResumeResponse>(resume_resp)?;
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
assert!(
!developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"did not expect a personality update message in developer input, got {developer_texts:?}"
);
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(DEFAULT_BASE_INSTRUCTIONS),
"expected default base instructions from history, got {instructions_text:?}"
);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
@@ -351,13 +244,10 @@ sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
remote_models = false
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -1,7 +1,7 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
@@ -28,7 +28,7 @@ async fn thread_rollback_drops_last_turns_and_persists_to_rollout() -> Result<()
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -57,7 +57,6 @@ async fn thread_rollback_drops_last_turns_and_persists_to_rollout() -> Result<()
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: first_text.to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
@@ -78,7 +77,6 @@ async fn thread_rollback_drops_last_turns_and_persists_to_rollout() -> Result<()
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Second".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
@@ -117,8 +115,7 @@ async fn thread_rollback_drops_last_turns_and_persists_to_rollout() -> Result<()
assert_eq!(
content,
&vec![V2UserInput::Text {
text: first_text.to_string(),
text_elements: Vec::new(),
text: first_text.to_string()
}]
);
}
@@ -146,8 +143,7 @@ async fn thread_rollback_drops_last_turns_and_persists_to_rollout() -> Result<()
assert_eq!(
content,
&vec![V2UserInput::Text {
text: first_text.to_string(),
text_elements: Vec::new(),
text: first_text.to_string()
}]
);
}
@@ -172,7 +168,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -1,6 +1,6 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
@@ -8,9 +8,6 @@ use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_core::config::set_project_trust_level;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -20,7 +17,7 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs
#[tokio::test]
async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
// Provide a mock server and config so model wiring is valid.
let server = create_mock_responses_server_repeating_assistant("Done").await;
let server = create_mock_chat_completions_server(vec![]).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
@@ -72,47 +69,6 @@ async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_start_respects_project_config_from_cwd() -> Result<()> {
let server = create_mock_responses_server_repeating_assistant("Done").await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let workspace = TempDir::new()?;
let project_config_dir = workspace.path().join(".codex");
std::fs::create_dir_all(&project_config_dir)?;
std::fs::write(
project_config_dir.join("config.toml"),
r#"
model_reasoning_effort = "high"
"#,
)?;
set_project_trust_level(codex_home.path(), workspace.path(), TrustLevel::Trusted)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_thread_start_request(ThreadStartParams {
cwd: Some(workspace.path().to_string_lossy().into_owned()),
..Default::default()
})
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await??;
let ThreadStartResponse {
reasoning_effort, ..
} = to_response::<ThreadStartResponse>(resp)?;
assert_eq!(reasoning_effort, Some(ReasoningEffort::High));
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
@@ -129,7 +85,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -2,7 +2,7 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_shell_command_sse_response;
use app_test_support::to_response;
use codex_app_server_protocol::JSONRPCNotification;
@@ -41,7 +41,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> {
std::fs::create_dir(&working_directory)?;
// Mock server: long-running shell command then (after abort) nothing else needed.
let server = create_mock_responses_server_sequence(vec![create_shell_command_sse_response(
let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response(
shell_command.clone(),
Some(&working_directory),
Some(10_000),
@@ -73,7 +73,6 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run sleep".to_string(),
text_elements: Vec::new(),
}],
cwd: Some(working_directory.clone()),
..Default::default()
@@ -136,7 +135,7 @@ model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -3,13 +3,11 @@ use app_test_support::McpProcess;
use app_test_support::create_apply_patch_sse_response;
use app_test_support::create_exec_command_sse_response;
use app_test_support::create_final_assistant_message_sse_response;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::create_mock_chat_completions_server;
use app_test_support::create_mock_chat_completions_server_unchecked;
use app_test_support::create_shell_command_sse_response;
use app_test_support::format_with_current_shell_display;
use app_test_support::to_response;
use codex_app_server_protocol::ByteRange;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::CommandExecutionApprovalDecision;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::CommandExecutionStatus;
@@ -24,7 +22,6 @@ use codex_app_server_protocol::PatchApplyStatus;
use codex_app_server_protocol::PatchChangeKind;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::TextElement;
use codex_app_server_protocol::ThreadItem;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
@@ -34,185 +31,15 @@ use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const TEST_ORIGINATOR: &str = "codex_vscode";
#[tokio::test]
async fn turn_start_sends_originator_header() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.initialize_with_client_info(ClientInfo {
name: TEST_ORIGINATOR.to_string(),
title: Some("Codex VS Code Extension".to_string()),
version: "0.1.0".to_string(),
}),
)
.await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let requests = server
.received_requests()
.await
.expect("failed to fetch received requests");
assert!(!requests.is_empty());
for request in requests {
let originator = request
.headers
.get("originator")
.expect("originator header missing");
assert_eq!(originator.to_str()?, TEST_ORIGINATOR);
}
Ok(())
}
#[tokio::test]
async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let text_elements = vec![TextElement::new(
ByteRange { start: 0, end: 5 },
Some("<note>".to_string()),
)];
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: text_elements.clone(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let user_message_item = timeout(DEFAULT_READ_TIMEOUT, async {
loop {
let notification = mcp
.read_stream_until_notification_message("item/started")
.await?;
let params = notification.params.expect("item/started params");
let item_started: ItemStartedNotification =
serde_json::from_value(params).expect("deserialize item/started notification");
if let ThreadItem::UserMessage { .. } = item_started.item {
return Ok::<ThreadItem, anyhow::Error>(item_started.item);
}
}
})
.await??;
match user_message_item {
ThreadItem::UserMessage { content, .. } => {
assert_eq!(
content,
vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements,
}]
);
}
other => panic!("expected user message item, got {other:?}"),
}
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<()> {
@@ -223,15 +50,10 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
create_final_assistant_message_sse_response("Done")?,
create_final_assistant_message_sse_response("Done")?,
];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -256,7 +78,6 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
@@ -289,7 +110,6 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Second".to_string(),
text_elements: Vec::new(),
}],
model: Some("mock-model-override".to_string()),
..Default::default()
@@ -328,161 +148,6 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_collaboration_mode_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let collaboration_mode = CollaborationMode {
mode: ModeKind::Custom,
settings: Settings {
model: "mock-model-collab".to_string(),
reasoning_effort: Some(ReasoningEffort::High),
developer_instructions: None,
},
};
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
model: Some("mock-model-override".to_string()),
effort: Some(ReasoningEffort::Low),
summary: Some(ReasoningSummary::Auto),
output_schema: None,
collaboration_mode: Some(collaboration_mode),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let payload = request.body_json();
assert_eq!(payload["model"].as_str(), Some("mock-model-collab"));
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("exp-codex-personality".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
if developer_texts.is_empty() {
eprintln!("request body: {}", request.body_json());
}
assert!(
developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"expected personality update message in developer input, got {developer_texts:?}"
);
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
@@ -492,15 +157,10 @@ async fn turn_start_accepts_local_image_input() -> Result<()> {
];
// Use the unchecked variant because the request payload includes a LocalImage
// which the strict matcher does not currently cover.
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let server = create_mock_chat_completions_server_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -573,14 +233,9 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
)?,
create_final_assistant_message_sse_response("done 2")?,
];
let server = create_mock_responses_server_sequence(responses).await;
let server = create_mock_chat_completions_server(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -605,7 +260,6 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
@@ -651,7 +305,6 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python again".to_string(),
text_elements: Vec::new(),
}],
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess),
@@ -704,13 +357,8 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
)?,
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -733,7 +381,6 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run python".to_string(),
text_elements: Vec::new(),
}],
cwd: Some(workspace.clone()),
..Default::default()
@@ -856,13 +503,8 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
)?,
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -887,7 +529,6 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "first turn".to_string(),
text_elements: Vec::new(),
}],
cwd: Some(first_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
@@ -900,9 +541,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
.await?;
timeout(
@@ -915,7 +554,6 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
mcp.read_stream_until_notification_message("codex/event/task_complete"),
)
.await??;
mcp.clear_message_buffer();
// second turn with workspace-write and second_cwd, ensure exec begins in second_cwd
let second_turn = mcp
@@ -923,7 +561,6 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "second turn".to_string(),
text_elements: Vec::new(),
}],
cwd: Some(second_cwd.clone()),
approval_policy: Some(codex_app_server_protocol::AskForApproval::Never),
@@ -931,9 +568,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
.await?;
timeout(
@@ -1001,13 +636,8 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
create_apply_patch_sse_response(patch, "patch-call")?,
create_final_assistant_message_sse_response("patch applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1031,7 +661,6 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "apply patch".into(),
text_elements: Vec::new(),
}],
cwd: Some(workspace.clone()),
..Default::default()
@@ -1183,13 +812,8 @@ async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Res
create_apply_patch_sse_response(patch_2, "patch-call-2")?,
create_final_assistant_message_sse_response("patch 2 applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1214,7 +838,6 @@ async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Res
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "apply patch 1".into(),
text_elements: Vec::new(),
}],
cwd: Some(workspace.clone()),
..Default::default()
@@ -1291,7 +914,6 @@ async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Res
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "apply patch 2".into(),
text_elements: Vec::new(),
}],
cwd: Some(workspace.clone()),
..Default::default()
@@ -1364,13 +986,8 @@ async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
create_apply_patch_sse_response(patch, "patch-call")?,
create_final_assistant_message_sse_response("patch declined")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let server = create_mock_chat_completions_server(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1394,7 +1011,6 @@ async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "apply patch".into(),
text_elements: Vec::new(),
}],
cwd: Some(workspace.clone()),
..Default::default()
@@ -1508,14 +1124,18 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
create_exec_command_sse_response("uexec-1")?,
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
let server = create_mock_chat_completions_server(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::UnifiedExec, true)]),
)?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let config_toml = codex_home.path().join("config.toml");
let mut config_contents = std::fs::read_to_string(&config_toml)?;
config_contents.push_str(
r#"
[features]
unified_exec = true
"#,
);
std::fs::write(&config_toml, config_contents)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1538,7 +1158,6 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "run a command".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
@@ -1608,18 +1227,8 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
unreachable!("loop ensures we break on command execution items");
};
assert_eq!(completed_id, "uexec-1");
assert!(
matches!(
completed_status,
CommandExecutionStatus::Completed | CommandExecutionStatus::Failed
),
"unexpected command execution status: {completed_status:?}"
);
if completed_status == CommandExecutionStatus::Completed {
assert_eq!(exit_code, Some(0));
} else {
assert!(exit_code.is_some(), "expected exit_code for failed command");
}
assert_eq!(completed_status, CommandExecutionStatus::Completed);
assert_eq!(exit_code, Some(0));
assert_eq!(
completed_process_id.as_deref(),
Some(started_process_id.as_str())
@@ -1639,24 +1248,7 @@ fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
feature_flags: &BTreeMap<Feature, bool>,
) -> std::io::Result<()> {
let mut features = BTreeMap::from([(Feature::RemoteModels, false)]);
for (feature, enabled) in feature_flags {
features.insert(*feature, *enabled);
}
let feature_entries = features
.into_iter()
.map(|(feature, enabled)| {
let key = FEATURES
.iter()
.find(|spec| spec.id == feature)
.map(|spec| spec.key)
.unwrap_or_else(|| panic!("missing feature key for {feature:?}"));
format!("{key} = {enabled}")
})
.collect::<Vec<_>>()
.join("\n");
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
@@ -1668,13 +1260,10 @@ sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
{feature_entries}
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#

View File

@@ -1,11 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
exports_files(["apply_patch_tool_instructions.md"])
codex_rust_crate(
name = "apply-patch",
crate_name = "codex_apply_patch",
compile_data = [
"apply_patch_tool_instructions.md",
],
)

View File

@@ -1,4 +1,3 @@
use codex_utils_cargo_bin::find_resource;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::fs;
@@ -9,7 +8,7 @@ use tempfile::tempdir;
#[test]
fn test_apply_patch_scenarios() -> anyhow::Result<()> {
let scenarios_dir = find_resource!("tests/fixtures/scenarios")?;
let scenarios_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/scenarios");
for scenario in fs::read_dir(scenarios_dir)? {
let scenario = scenario?;
let path = scenario.path();

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "arg0",
crate_name = "codex_arg0",
)

View File

@@ -145,41 +145,11 @@ where
/// that `apply_patch` can be on the PATH without requiring the user to
/// install a separate `apply_patch` executable, simplifying the deployment of
/// Codex CLI.
/// Note: In debug builds the temp-dir guard is disabled to ease local testing.
///
/// IMPORTANT: This function modifies the PATH environment variable, so it MUST
/// be called before multiple threads are spawned.
pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<TempDir> {
let codex_home = codex_core::config::find_codex_home()?;
#[cfg(not(debug_assertions))]
{
// Guard against placing helpers in system temp directories outside debug builds.
let temp_root = std::env::temp_dir();
if codex_home.starts_with(&temp_root) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!(
"Refusing to create helper binaries under temporary dir {temp_root:?} (codex_home: {codex_home:?})"
),
));
}
}
std::fs::create_dir_all(&codex_home)?;
// Use a CODEX_HOME-scoped temp root to avoid cluttering the top-level directory.
let temp_root = codex_home.join("tmp").join("path");
std::fs::create_dir_all(&temp_root)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
// Ensure only the current user can access the temp directory.
std::fs::set_permissions(&temp_root, std::fs::Permissions::from_mode(0o700))?;
}
let temp_dir = tempfile::Builder::new()
.prefix("codex-arg0")
.tempdir_in(&temp_root)?;
let temp_dir = TempDir::new()?;
let path = temp_dir.path();
for filename in &[

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "async-utils",
crate_name = "codex_async_utils",
)

View File

@@ -1,7 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "backend-client",
crate_name = "codex_backend_client",
compile_data = glob(["tests/fixtures/**"]),
)

View File

@@ -73,8 +73,8 @@ impl Client {
})
}
pub fn from_auth(base_url: impl Into<String>, auth: &CodexAuth) -> Result<Self> {
let token = auth.get_token().map_err(anyhow::Error::from)?;
pub async fn from_auth(base_url: impl Into<String>, auth: &CodexAuth) -> Result<Self> {
let token = auth.get_token().await.map_err(anyhow::Error::from)?;
let mut client = Self::new(base_url)?
.with_user_agent(get_codex_user_agent())
.with_bearer_token(token);
@@ -174,7 +174,6 @@ impl Client {
limit: Option<i32>,
task_filter: Option<&str>,
environment_id: Option<&str>,
cursor: Option<&str>,
) -> Result<PaginatedListTaskListItem> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/tasks/list", self.base_url),
@@ -191,11 +190,6 @@ impl Client {
} else {
req
};
let req = if let Some(c) = cursor {
req.query(&[("cursor", c)])
} else {
req
};
let req = if let Some(id) = environment_id {
req.query(&[("environment_id", id)])
} else {

View File

@@ -1,6 +0,0 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "chatgpt",
crate_name = "codex_chatgpt",
)

View File

@@ -12,7 +12,6 @@ anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-utils-cargo-bin = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }

View File

@@ -5,7 +5,6 @@ use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
use anyhow::Context;
use serde::Serialize;
use serde::de::DeserializeOwned;
/// Make a GET request to the ChatGPT backend API.
@@ -49,37 +48,3 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
anyhow::bail!("Request failed with status {status}: {body}")
}
}
pub(crate) async fn chatgpt_post_request<T: DeserializeOwned, P: Serialize>(
config: &Config,
access_token: &str,
account_id: &str,
path: &str,
payload: &P,
) -> anyhow::Result<T> {
let chatgpt_base_url = &config.chatgpt_base_url;
let client = create_client();
let url = format!("{chatgpt_base_url}{path}");
let response = client
.post(&url)
.bearer_auth(access_token)
.header("chatgpt-account-id", account_id)
.header("Content-Type", "application/json")
.json(payload)
.send()
.await
.context("Failed to send request")?;
if response.status().is_success() {
let result: T = response
.json()
.await
.context("Failed to parse JSON response")?;
Ok(result)
} else {
let status = response.status();
let body = response.text().await.unwrap_or_default();
anyhow::bail!("Request failed with status {status}: {body}")
}
}

View File

@@ -1,4 +1,4 @@
use codex_core::AuthManager;
use codex_core::CodexAuth;
use std::path::Path;
use std::sync::LazyLock;
use std::sync::RwLock;
@@ -23,10 +23,9 @@ pub async fn init_chatgpt_token_from_auth(
codex_home: &Path,
auth_credentials_store_mode: AuthCredentialsStoreMode,
) -> std::io::Result<()> {
let auth_manager =
AuthManager::new(codex_home.to_path_buf(), false, auth_credentials_store_mode);
if let Some(auth) = auth_manager.auth().await {
let token_data = auth.get_token_data()?;
let auth = CodexAuth::from_auth_storage(codex_home, auth_credentials_store_mode)?;
if let Some(auth) = auth {
let token_data = auth.get_token_data().await?;
set_chatgpt_token_data(token_data);
}
Ok(())

View File

@@ -1,125 +0,0 @@
use codex_core::config::Config;
use codex_core::features::Feature;
use serde::Deserialize;
use serde::Serialize;
use crate::chatgpt_client::chatgpt_post_request;
use crate::chatgpt_token::get_chatgpt_token_data;
use crate::chatgpt_token::init_chatgpt_token_from_auth;
pub use codex_core::connectors::ConnectorInfo;
pub use codex_core::connectors::connector_display_label;
use codex_core::connectors::connector_install_url;
pub use codex_core::connectors::list_accessible_connectors_from_mcp_tools;
use codex_core::connectors::merge_connectors;
#[derive(Debug, Serialize)]
struct ListConnectorsRequest {
principals: Vec<Principal>,
}
#[derive(Debug, Serialize)]
struct Principal {
#[serde(rename = "type")]
principal_type: PrincipalType,
id: String,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
enum PrincipalType {
User,
}
#[derive(Debug, Deserialize)]
struct ListConnectorsResponse {
connectors: Vec<ConnectorInfo>,
}
pub async fn list_connectors(config: &Config) -> anyhow::Result<Vec<ConnectorInfo>> {
if !config.features.enabled(Feature::Connectors) {
return Ok(Vec::new());
}
let (connectors_result, accessible_result) = tokio::join!(
list_all_connectors(config),
list_accessible_connectors_from_mcp_tools(config),
);
let connectors = connectors_result?;
let accessible = accessible_result?;
Ok(merge_connectors(connectors, accessible))
}
pub async fn list_all_connectors(config: &Config) -> anyhow::Result<Vec<ConnectorInfo>> {
if !config.features.enabled(Feature::Connectors) {
return Ok(Vec::new());
}
init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode)
.await?;
let token_data =
get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?;
let user_id = token_data
.id_token
.chatgpt_user_id
.as_deref()
.ok_or_else(|| {
anyhow::anyhow!("ChatGPT user ID not available, please re-run `codex login`")
})?;
let account_id = token_data
.id_token
.chatgpt_account_id
.as_deref()
.ok_or_else(|| {
anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`")
})?;
let principal_id = format!("{user_id}__{account_id}");
let request = ListConnectorsRequest {
principals: vec![Principal {
principal_type: PrincipalType::User,
id: principal_id,
}],
};
let response: ListConnectorsResponse = chatgpt_post_request(
config,
token_data.access_token.as_str(),
account_id,
"/aip/connectors/list_accessible?skip_actions=true&external_logos=true",
&request,
)
.await?;
let mut connectors = response.connectors;
for connector in &mut connectors {
let install_url = match connector.install_url.take() {
Some(install_url) => install_url,
None => connector_install_url(&connector.connector_name, &connector.connector_id),
};
connector.connector_name =
normalize_connector_name(&connector.connector_name, &connector.connector_id);
connector.connector_description =
normalize_connector_value(connector.connector_description.as_deref());
connector.install_url = Some(install_url);
connector.is_accessible = false;
}
connectors.sort_by(|left, right| {
left.connector_name
.cmp(&right.connector_name)
.then_with(|| left.connector_id.cmp(&right.connector_id))
});
Ok(connectors)
}
fn normalize_connector_name(name: &str, connector_id: &str) -> String {
let trimmed = name.trim();
if trimmed.is_empty() {
connector_id.to_string()
} else {
trimmed.to_string()
}
}
fn normalize_connector_value(value: Option<&str>) -> Option<String> {
value
.map(str::trim)
.filter(|value| !value.is_empty())
.map(str::to_string)
}

View File

@@ -1,5 +1,4 @@
pub mod apply_command;
mod chatgpt_client;
mod chatgpt_token;
pub mod connectors;
pub mod get_task;

View File

@@ -1,6 +1,6 @@
use codex_chatgpt::apply_command::apply_diff_from_task;
use codex_chatgpt::get_task::GetTaskResponse;
use codex_utils_cargo_bin::find_resource;
use std::path::Path;
use tempfile::TempDir;
use tokio::process::Command;
@@ -68,8 +68,8 @@ async fn create_temp_git_repo() -> anyhow::Result<TempDir> {
}
async fn mock_get_task_with_fixture() -> anyhow::Result<GetTaskResponse> {
let fixture_path = find_resource!("tests/task_turn_fixture.json")?;
let fixture_content = tokio::fs::read_to_string(fixture_path).await?;
let fixture_path = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/task_turn_fixture.json");
let fixture_content = std::fs::read_to_string(fixture_path)?;
let response: GetTaskResponse = serde_json::from_str(&fixture_content)?;
Ok(response)
}

View File

@@ -1,10 +0,0 @@
load("//:defs.bzl", "codex_rust_crate", "multiplatform_binaries")
codex_rust_crate(
name = "cli",
crate_name = "codex_cli",
)
multiplatform_binaries(
name = "codex",
)

View File

@@ -30,11 +30,15 @@ codex-exec = { workspace = true }
codex-execpolicy = { workspace = true }
codex-login = { workspace = true }
codex-mcp-server = { workspace = true }
codex-process-hardening = { workspace = true }
codex-protocol = { workspace = true }
codex-responses-api-proxy = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-stdio-to-uds = { workspace = true }
codex-tui = { workspace = true }
codex-tui2 = { workspace = true }
codex-utils-absolute-path = { workspace = true }
ctor = { workspace = true }
libc = { workspace = true }
owo-colors = { workspace = true }
regex-lite = { workspace = true }

Some files were not shown because too many files have changed in this diff Show More