mirror of
https://github.com/openai/codex.git
synced 2026-05-08 13:26:34 +00:00
Compare commits
1 Commits
mchen/SAI-
...
codex/linu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e3834378f |
23
.bazelrc
23
.bazelrc
@@ -79,10 +79,6 @@ common:ci --disk_cache=
|
||||
# Shared config for the main Bazel CI workflow.
|
||||
common:ci-bazel --config=ci
|
||||
common:ci-bazel --build_metadata=TAG_workflow=bazel
|
||||
# Bazel CI cross-compiles in several legs, and the V8-backed code-mode tests
|
||||
# are not stable in that setup yet. Keep running the rest of the Rust
|
||||
# integration suites through the workspace-root launcher.
|
||||
common:ci-bazel --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::
|
||||
|
||||
# Shared config for Bazel-backed Rust linting.
|
||||
build:clippy --aspects=@rules_rust//rust:defs.bzl%rust_clippy_aspect
|
||||
@@ -157,25 +153,6 @@ common:ci-macos --config=remote
|
||||
common:ci-macos --strategy=remote
|
||||
common:ci-macos --strategy=TestRunner=darwin-sandbox,local
|
||||
|
||||
# On Windows, use Linux remote execution for build actions but keep test actions
|
||||
# on the Windows runner so Bazel's normal test sharding and flaky-test retries
|
||||
# still run against Windows binaries.
|
||||
common:ci-windows-cross --config=ci-windows
|
||||
common:ci-windows-cross --build_metadata=TAG_windows_cross_compile=true
|
||||
common:ci-windows-cross --config=remote
|
||||
common:ci-windows-cross --host_platform=//:rbe
|
||||
common:ci-windows-cross --strategy=remote
|
||||
common:ci-windows-cross --strategy=TestRunner=local
|
||||
common:ci-windows-cross --local_test_jobs=4
|
||||
common:ci-windows-cross --test_env=RUST_TEST_THREADS=1
|
||||
# Native Windows CI still covers the PowerShell tests. The cross-built gnullvm
|
||||
# binaries currently hang in PowerShell AST parser tests when those binaries are
|
||||
# run on the Windows runner.
|
||||
common:ci-windows-cross --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::,powershell
|
||||
common:ci-windows-cross --platforms=//:windows_x86_64_gnullvm
|
||||
common:ci-windows-cross --extra_execution_platforms=//:rbe,//:windows_x86_64_msvc
|
||||
common:ci-windows-cross --extra_toolchains=//:windows_gnullvm_tests_on_msvc_host_toolchain
|
||||
|
||||
# Linux-only V8 CI config.
|
||||
common:ci-v8 --config=ci
|
||||
common:ci-v8 --build_metadata=TAG_workflow=v8
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
|
||||
version = 1
|
||||
name = "codex"
|
||||
|
||||
[setup]
|
||||
script = ""
|
||||
|
||||
[[actions]]
|
||||
name = "Run"
|
||||
icon = "run"
|
||||
command = "cargo +1.93.0 run --manifest-path=codex-rs/Cargo.toml --bin codex -- -c mcp_oauth_credentials_store=file"
|
||||
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
@@ -5,9 +5,9 @@ tool entries, such as Maven, that can change independently of this repo and
|
||||
cause avoidable cache misses.
|
||||
|
||||
This script derives a smaller, cache-stable PATH that keeps the Windows
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths,
|
||||
MinGW runtime DLL paths for gnullvm-built tests, Git, PowerShell, Node, Python,
|
||||
DotSlash, and the standard Windows system directories.
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths, Git,
|
||||
PowerShell, Node, Python, DotSlash, and the standard Windows system
|
||||
directories.
|
||||
`setup-bazel-ci` runs this after exporting the MSVC environment, and the script
|
||||
publishes the result via `GITHUB_ENV` as `CODEX_BAZEL_WINDOWS_PATH` so later
|
||||
steps can pass that explicit PATH to Bazel.
|
||||
@@ -49,8 +49,6 @@ foreach ($pathEntry in ($env:PATH -split ';')) {
|
||||
$pathEntry -like '*Microsoft Visual Studio*' -or
|
||||
$pathEntry -like '*Windows Kits*' -or
|
||||
$pathEntry -like '*Microsoft SDKs*' -or
|
||||
$pathEntry -eq 'C:\mingw64\bin' -or
|
||||
$pathEntry -like 'C:\msys64\*\bin' -or
|
||||
$pathEntry -like 'C:\Program Files\Git\*' -or
|
||||
$pathEntry -like 'C:\Program Files\PowerShell\*' -or
|
||||
$pathEntry -like 'C:\hostedtoolcache\windows\node\*' -or
|
||||
@@ -87,12 +85,6 @@ if ($pwshCommand) {
|
||||
Add-StablePathEntry (Split-Path $pwshCommand.Source -Parent)
|
||||
}
|
||||
|
||||
foreach ($mingwPath in @('C:\mingw64\bin', 'C:\msys64\mingw64\bin', 'C:\msys64\ucrt64\bin')) {
|
||||
if (Test-Path $mingwPath) {
|
||||
Add-StablePathEntry $mingwPath
|
||||
}
|
||||
}
|
||||
|
||||
if ($windowsAppsPath) {
|
||||
Add-StablePathEntry $windowsAppsPath
|
||||
}
|
||||
|
||||
110
.github/scripts/run-bazel-ci.sh
vendored
110
.github/scripts/run-bazel-ci.sh
vendored
@@ -6,7 +6,6 @@ print_failed_bazel_test_logs=0
|
||||
print_failed_bazel_action_summary=0
|
||||
remote_download_toplevel=0
|
||||
windows_msvc_host_platform=0
|
||||
windows_cross_compile=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
@@ -26,10 +25,6 @@ while [[ $# -gt 0 ]]; do
|
||||
windows_msvc_host_platform=1
|
||||
shift
|
||||
;;
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -42,7 +37,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] [--windows-cross-compile] -- <bazel args> -- <targets>" >&2
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] -- <bazel args> -- <targets>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -66,11 +61,7 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
ci_config=ci-windows
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -114,8 +105,8 @@ print_bazel_test_log_tails() {
|
||||
while IFS= read -r target; do
|
||||
failed_targets+=("$target")
|
||||
done < <(
|
||||
grep -E '^(FAIL: //|ERROR: .* Testing //)' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#; s#^ERROR: .* Testing (//[^ ]+) failed:.*#\1#' \
|
||||
grep -E '^FAIL: //' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
|
||||
| sort -u
|
||||
)
|
||||
|
||||
@@ -253,12 +244,6 @@ if [[ ${#bazel_args[@]} -eq 0 || ${#bazel_targets[@]} -eq 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Fork PRs do not receive the BuildBuddy secret needed for the remote
|
||||
# cross-compile config. Preserve the previous local Windows build shape.
|
||||
windows_msvc_host_platform=1
|
||||
fi
|
||||
|
||||
post_config_bazel_args=()
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_msvc_host_platform -eq 1 ]]; then
|
||||
has_host_platform_override=0
|
||||
@@ -284,25 +269,6 @@ if [[ $remote_download_toplevel -eq 1 ]]; then
|
||||
post_config_bazel_args+=(--remote_download_toplevel)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# `--enable_platform_specific_config` expands `common:windows` on Windows
|
||||
# hosts after ordinary rc configs, which can override `ci-windows-cross`'s
|
||||
# RBE host platform. Repeat the host platform on the command line so V8 and
|
||||
# other genrules execute on Linux RBE workers instead of Git Bash locally.
|
||||
#
|
||||
# Bazel also derives the default genrule shell from the client host. Without
|
||||
# an explicit shell executable, remote Linux actions can be asked to run
|
||||
# `C:\Program Files\Git\usr\bin\bash.exe`.
|
||||
post_config_bazel_args+=(--host_platform=//:rbe --shell_executable=/bin/bash)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The Windows cross-compile config depends on remote execution. Fork PRs do
|
||||
# not receive the BuildBuddy secret, so fall back to the existing local build
|
||||
# shape and keep its lower concurrency cap.
|
||||
post_config_bazel_args+=(--jobs=8)
|
||||
fi
|
||||
|
||||
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
|
||||
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
|
||||
# each job its own repo contents cache so they do not fight over the shared
|
||||
@@ -321,57 +287,37 @@ if [[ -n "${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR:-}" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
pass_windows_build_env=1
|
||||
if [[ $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Remote build actions execute on Linux RBE workers. Passing the Windows
|
||||
# runner's build environment there makes Bazel genrules try to execute
|
||||
# C:\Program Files\Git\usr\bin\bash.exe on Linux.
|
||||
pass_windows_build_env=0
|
||||
fi
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z "${CODEX_BAZEL_WINDOWS_PATH:-}" ]]; then
|
||||
echo "CODEX_BAZEL_WINDOWS_PATH must be set for Windows Bazel CI." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
elif [[ $windows_cross_compile -eq 1 ]]; then
|
||||
# Remote build actions run on Linux RBE workers. Give their shell snippets
|
||||
# a Linux PATH while preserving CODEX_BAZEL_WINDOWS_PATH below for local
|
||||
# Windows test execution.
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=/usr/bin:/bin"
|
||||
"--host_action_env=PATH=/usr/bin:/bin"
|
||||
)
|
||||
fi
|
||||
post_config_bazel_args+=("--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}")
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
fi
|
||||
|
||||
bazel_console_log="$(mktemp)"
|
||||
|
||||
13
.github/scripts/run-bazel-query-ci.sh
vendored
13
.github/scripts/run-bazel-query-ci.sh
vendored
@@ -6,13 +6,8 @@ set -euo pipefail
|
||||
# invocation so target-discovery queries can reuse the same Bazel server.
|
||||
|
||||
query_args=()
|
||||
windows_cross_compile=0
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -25,7 +20,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 [--windows-cross-compile] [<bazel query args>...] -- <query expression>" >&2
|
||||
echo "Usage: $0 [<bazel query args>...] -- <query expression>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -37,11 +32,7 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
ci_config=ci-windows
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
133
.github/workflows/bazel.yml
vendored
133
.github/workflows/bazel.yml
vendored
@@ -17,10 +17,13 @@ concurrency:
|
||||
cancel-in-progress: ${{ github.ref_name != 'main' }}
|
||||
jobs:
|
||||
test:
|
||||
# PRs use a fast Windows cross-compiled test leg for pre-merge signal.
|
||||
# Post-merge pushes to main also run the native Windows test job below for
|
||||
# broader Windows signal without putting PR latency back on the critical
|
||||
# path. Cargo CI owns V8/code-mode test coverage for now.
|
||||
# Even though a no-cache-hit Windows build seems to exceed the 30-minute
|
||||
# limit on occasion, the more common reason for exceeding the limit is a
|
||||
# true test failure in a rust_test() marked "flaky" that gets run 3x.
|
||||
# In that case, extra time generally does not give us more signal.
|
||||
#
|
||||
# Ultimately we need true distributed builds (e.g.,
|
||||
# https://www.buildbuddy.io/docs/rbe-setup/) to speed things up.
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -44,16 +47,13 @@ jobs:
|
||||
# - os: ubuntu-24.04-arm
|
||||
# target: aarch64-unknown-linux-gnu
|
||||
|
||||
# Windows fast path: build the windows-gnullvm binaries with Linux
|
||||
# RBE, then run the resulting Windows tests on the Windows runner.
|
||||
# Cargo CI preserves V8/code-mode coverage while Bazel CI keeps broad
|
||||
# non-code-mode signal.
|
||||
# Windows
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
# Configure a human readable name for each job
|
||||
name: Bazel test on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
@@ -88,15 +88,9 @@ jobs:
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
# V8-backed code-mode tests are covered by Cargo CI. Bazel CI
|
||||
# cross-compiles in several legs, and those tests are not stable in
|
||||
# that setup yet.
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
|
||||
bazel_wrapper_args=(
|
||||
--print-failed-action-summary
|
||||
--print-failed-test-logs
|
||||
)
|
||||
bazel_test_args=(
|
||||
@@ -106,10 +100,8 @@ jobs:
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
)
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
bazel_wrapper_args+=(
|
||||
--windows-cross-compile
|
||||
--remote-download-toplevel
|
||||
)
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_test_args+=(--jobs=8)
|
||||
fi
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
@@ -138,79 +130,6 @@ jobs:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
test-windows-native-main:
|
||||
# Native Windows Bazel tests are slower and frequently approach the
|
||||
# 30-minute PR budget. Run this only for post-merge commits to main and give
|
||||
# it a larger timeout.
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 40
|
||||
runs-on: windows-latest
|
||||
name: Bazel test on windows-latest for x86_64-pc-windows-gnullvm (native main)
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
with:
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
install-test-prereqs: "true"
|
||||
|
||||
- name: bazel test //...
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
bazel_targets=(
|
||||
//...
|
||||
# Keep standalone V8 library targets out of the ordinary Bazel CI
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
# Keep this aligned with the main Bazel job. The native Windows
|
||||
# job preserves broad post-merge coverage, but code-mode/V8 tests
|
||||
# are covered by Cargo CI rather than Bazel for now.
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
|
||||
bazel_test_args=(
|
||||
test
|
||||
--test_tag_filters=-argument-comment-lint
|
||||
--test_verbose_timeout_warnings
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--build_metadata=TAG_windows_native_main=true
|
||||
)
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
--print-failed-action-summary \
|
||||
--print-failed-test-logs \
|
||||
-- \
|
||||
"${bazel_test_args[@]}" \
|
||||
-- \
|
||||
"${bazel_targets[@]}"
|
||||
|
||||
- name: Upload Bazel execution logs
|
||||
if: always() && !cancelled()
|
||||
continue-on-error: true
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: bazel-execution-logs-test-windows-native-x86_64-pc-windows-gnullvm
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
clippy:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
@@ -251,24 +170,17 @@ jobs:
|
||||
--build_metadata=TAG_job=clippy
|
||||
)
|
||||
bazel_wrapper_args=()
|
||||
bazel_target_list_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# Keep this aligned with the fast Windows Bazel test job: use
|
||||
# Linux RBE for clippy build actions while targeting Windows
|
||||
# gnullvm. Fork/community PRs without the BuildBuddy secret fall
|
||||
# back inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
bazel_target_list_args+=(--windows-cross-compile)
|
||||
if [[ -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The fork fallback can see incompatible explicit Windows-cross
|
||||
# internal test binaries in the generated target list. Preserve
|
||||
# the old local-fallback behavior there.
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
# Keep this aligned with the Windows Bazel test job. With the
|
||||
# default `//:local_windows` host platform, Windows `rust_test`
|
||||
# targets such as `//codex-rs/core:core-all-test` can be skipped
|
||||
# by `--skip_incompatible_explicit_targets`, which hides clippy
|
||||
# diagnostics from integration-test modules.
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh "${bazel_target_list_args[@]}")"
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh)"
|
||||
bazel_targets=()
|
||||
while IFS= read -r target; do
|
||||
bazel_targets+=("${target}")
|
||||
@@ -340,12 +252,7 @@ jobs:
|
||||
# Rust debug assertions explicitly.
|
||||
bazel_wrapper_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# This is build-only signal, so use the same Linux-RBE
|
||||
# cross-compile path as the fast Windows test and clippy jobs.
|
||||
# Fork/community PRs without the BuildBuddy secret fall back
|
||||
# inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
fi
|
||||
|
||||
bazel_build_args=(
|
||||
|
||||
4
.github/workflows/cargo-deny.yml
vendored
4
.github/workflows/cargo-deny.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@82eb9f621fbc699dd0918f3ea06864c14cc84246 # v2
|
||||
with:
|
||||
rust-version: 1.93.0
|
||||
rust-version: stable
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
|
||||
12
.github/workflows/issue-labeler.yml
vendored
12
.github/workflows/issue-labeler.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
6. iOS — Issues with the Codex iOS app.
|
||||
|
||||
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior. Use "memory" for agentic memory storage/retrieval and "performance" for high process memory utilization or memory leaks.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior.
|
||||
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
2. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
|
||||
@@ -68,15 +68,7 @@ jobs:
|
||||
21. session - Issues involving session or thread management, including resume, fork, archive, rename/title, thread history, rollout persistence, compaction, checkpoints, retention, and cross-session state.
|
||||
22. config - Issues involving config.toml, config keys, config key merging, config updates, profiles, hooks config, project config, agent role TOMLs, instruction/personality config, and config schema behavior.
|
||||
23. plan - Issues involving plan mode, planning workflows, or plan-specific tools/behavior.
|
||||
24. computer-use - Issues involving agentic computer use or SkyComputerUseService.
|
||||
25. browser - Issues involving agentic browser use, IAB, or the built-in browser within the Codex app.
|
||||
26. memory - Issues involving agentic memory storage and retrieval.
|
||||
27. imagen - Issues involving image generation.
|
||||
28. remote - Issues involving remote access, remote control, or SSH.
|
||||
29. performance - Issues involving slow, laggy performance, high memory utilization, or memory leaks.
|
||||
30. automations - Issues involving scheduled automation tasks or heartbeats.
|
||||
31. pets - Issues involving pets avatars and animations.
|
||||
32. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, plan, computer-use, browser, memory, imagen, remote, performance, automations, or pets.
|
||||
24. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, or plan.
|
||||
|
||||
Issue number: ${{ github.event.issue.number }}
|
||||
|
||||
|
||||
2
.github/workflows/rust-release.yml
vendored
2
.github/workflows/rust-release.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- uses: dtolnay/rust-toolchain@c2b55edffaf41a251c410bb32bed22afefa800f1 # 1.92
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
34
BUILD.bazel
34
BUILD.bazel
@@ -30,40 +30,6 @@ platform(
|
||||
parents = ["@platforms//host"],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_gnullvm",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_msvc",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
)
|
||||
|
||||
toolchain(
|
||||
name = "windows_gnullvm_tests_on_msvc_host_toolchain",
|
||||
exec_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
target_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
toolchain = "@bazel_tools//tools/test:empty_toolchain",
|
||||
toolchain_type = "@bazel_tools//tools/test:default_test_toolchain_type",
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "rbe",
|
||||
actual = "@rbe_platform",
|
||||
|
||||
@@ -6,6 +6,4 @@ ignore = [
|
||||
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
|
||||
"RUSTSEC-2024-0320", # yaml-rust via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2025-0141", # bincode via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2026-0118", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
"RUSTSEC-2026-0119", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
]
|
||||
|
||||
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
|
||||
68
codex-rs/Cargo.lock
generated
68
codex-rs/Cargo.lock
generated
@@ -1857,8 +1857,8 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-analytics",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-app-server-transport",
|
||||
"codex-arg0",
|
||||
"codex-backend-client",
|
||||
"codex-chatgpt",
|
||||
@@ -1891,17 +1891,23 @@ dependencies = [
|
||||
"codex-state",
|
||||
"codex-thread-store",
|
||||
"codex-tools",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-cli",
|
||||
"codex-utils-json-to-toml",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"core_test_support",
|
||||
"flate2",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"reqwest",
|
||||
"rmcp",
|
||||
@@ -1999,45 +2005,6 @@ dependencies = [
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-app-server-transport"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-config",
|
||||
"codex-core",
|
||||
"codex-login",
|
||||
"codex-model-provider",
|
||||
"codex-state",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"tempfile",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-apply-patch"
|
||||
version = "0.0.0"
|
||||
@@ -2220,7 +2187,6 @@ dependencies = [
|
||||
"opentelemetry_sdk",
|
||||
"pretty_assertions",
|
||||
"rand 0.9.3",
|
||||
"rcgen",
|
||||
"reqwest",
|
||||
"rustls",
|
||||
"rustls-native-certs",
|
||||
@@ -2868,7 +2834,6 @@ dependencies = [
|
||||
"codex-plugin",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-output-truncation",
|
||||
"futures",
|
||||
"pretty_assertions",
|
||||
"regex",
|
||||
@@ -2877,8 +2842,6 @@ dependencies = [
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3041,23 +3004,6 @@ dependencies = [
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-mcp"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-output-truncation",
|
||||
"pretty_assertions",
|
||||
"rmcp",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-read"
|
||||
version = "0.0.0"
|
||||
|
||||
@@ -8,7 +8,6 @@ members = [
|
||||
"ansi-escape",
|
||||
"async-utils",
|
||||
"app-server",
|
||||
"app-server-transport",
|
||||
"app-server-client",
|
||||
"app-server-protocol",
|
||||
"app-server-test-client",
|
||||
@@ -52,7 +51,6 @@ members = [
|
||||
"login",
|
||||
"codex-mcp",
|
||||
"mcp-server",
|
||||
"memories/mcp",
|
||||
"memories/read",
|
||||
"memories/write",
|
||||
"model-provider-info",
|
||||
@@ -129,7 +127,6 @@ codex-ansi-escape = { path = "ansi-escape" }
|
||||
codex-api = { path = "codex-api" }
|
||||
codex-aws-auth = { path = "aws-auth" }
|
||||
codex-app-server = { path = "app-server" }
|
||||
codex-app-server-transport = { path = "app-server-transport" }
|
||||
codex-app-server-client = { path = "app-server-client" }
|
||||
codex-app-server-protocol = { path = "app-server-protocol" }
|
||||
codex-app-server-test-client = { path = "app-server-test-client" }
|
||||
@@ -169,7 +166,6 @@ codex-keyring-store = { path = "keyring-store" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
codex-lmstudio = { path = "lmstudio" }
|
||||
codex-login = { path = "login" }
|
||||
codex-memories-mcp = { path = "memories/mcp" }
|
||||
codex-memories-read = { path = "memories/read" }
|
||||
codex-memories-write = { path = "memories/write" }
|
||||
codex-mcp = { path = "codex-mcp" }
|
||||
@@ -324,10 +320,6 @@ quick-xml = "0.38.4"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
rcgen = { version = "0.14.7", default-features = false, features = [
|
||||
"aws_lc_rs",
|
||||
"pem",
|
||||
] }
|
||||
regex = "1.12.3"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = { version = "0.12", features = ["cookies"] }
|
||||
@@ -463,7 +455,6 @@ unwrap_used = "deny"
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = [
|
||||
"codex-agent-graph-store",
|
||||
"codex-memories-mcp",
|
||||
"icu_provider",
|
||||
"openssl-sys",
|
||||
"codex-utils-readiness",
|
||||
|
||||
@@ -46,7 +46,7 @@ Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.t
|
||||
|
||||
### Notifications
|
||||
|
||||
The legacy `notify` setting is deprecated and will be removed in a future release. Existing configurations still work, but new automation should use lifecycle hooks instead. The [notify documentation](../docs/config.md#notify) explains the remaining compatibility behavior. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
|
||||
### `codex exec` to run Codex programmatically/non-interactively
|
||||
|
||||
|
||||
@@ -1816,7 +1816,6 @@ async fn reducer_ingests_skill_invoked_fact() {
|
||||
skill_name: "doc".to_string(),
|
||||
skill_scope: codex_protocol::protocol::SkillScope::User,
|
||||
skill_path,
|
||||
plugin_id: None,
|
||||
invocation_type: InvocationType::Explicit,
|
||||
}],
|
||||
})),
|
||||
@@ -1834,10 +1833,8 @@ async fn reducer_ingests_skill_invoked_fact() {
|
||||
"event_params": {
|
||||
"product_client_id": originator().value,
|
||||
"skill_scope": "user",
|
||||
"plugin_id": null,
|
||||
"repo_url": null,
|
||||
"thread_id": "thread-1",
|
||||
"turn_id": "turn-1",
|
||||
"invoke_type": "explicit",
|
||||
"model_slug": "gpt-5"
|
||||
}
|
||||
@@ -1845,41 +1842,6 @@ async fn reducer_ingests_skill_invoked_fact() {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn reducer_includes_plugin_id_for_plugin_skill_invocations() {
|
||||
let mut reducer = AnalyticsReducer::default();
|
||||
let mut events = Vec::new();
|
||||
let tracking = TrackEventsContext {
|
||||
model_slug: "gpt-5".to_string(),
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
};
|
||||
let skill_path =
|
||||
PathBuf::from("/Users/abc/.codex/plugins/cache/test/sample/skills/doc/SKILL.md");
|
||||
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::SkillInvoked(SkillInvokedInput {
|
||||
tracking,
|
||||
invocations: vec![SkillInvocation {
|
||||
skill_name: "sample:doc".to_string(),
|
||||
skill_scope: codex_protocol::protocol::SkillScope::User,
|
||||
skill_path,
|
||||
plugin_id: Some("sample@test".to_string()),
|
||||
invocation_type: InvocationType::Explicit,
|
||||
}],
|
||||
})),
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
|
||||
let payload = serde_json::to_value(&events).expect("serialize events");
|
||||
assert_eq!(
|
||||
payload[0]["event_params"]["plugin_id"],
|
||||
json!("sample@test")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn reducer_ingests_hook_run_fact() {
|
||||
let mut reducer = AnalyticsReducer::default();
|
||||
|
||||
@@ -80,10 +80,8 @@ pub(crate) struct SkillInvocationEventRequest {
|
||||
pub(crate) struct SkillInvocationEventParams {
|
||||
pub(crate) product_client_id: Option<String>,
|
||||
pub(crate) skill_scope: Option<String>,
|
||||
pub(crate) plugin_id: Option<String>,
|
||||
pub(crate) repo_url: Option<String>,
|
||||
pub(crate) thread_id: Option<String>,
|
||||
pub(crate) turn_id: Option<String>,
|
||||
pub(crate) invoke_type: Option<InvocationType>,
|
||||
pub(crate) model_slug: Option<String>,
|
||||
}
|
||||
|
||||
@@ -173,7 +173,6 @@ pub struct SkillInvocation {
|
||||
pub skill_name: String,
|
||||
pub skill_scope: SkillScope,
|
||||
pub skill_path: PathBuf,
|
||||
pub plugin_id: Option<String>,
|
||||
pub invocation_type: InvocationType,
|
||||
}
|
||||
|
||||
|
||||
@@ -496,13 +496,11 @@ impl AnalyticsReducer {
|
||||
skill_name: invocation.skill_name.clone(),
|
||||
event_params: SkillInvocationEventParams {
|
||||
thread_id: Some(tracking.thread_id.clone()),
|
||||
turn_id: Some(tracking.turn_id.clone()),
|
||||
invoke_type: Some(invocation.invocation_type),
|
||||
model_slug: Some(tracking.model_slug.clone()),
|
||||
product_client_id: Some(originator().value),
|
||||
repo_url,
|
||||
skill_scope: Some(skill_scope.to_string()),
|
||||
plugin_id: invocation.plugin_id,
|
||||
},
|
||||
},
|
||||
));
|
||||
|
||||
@@ -29,7 +29,6 @@ pub use codex_app_server::in_process::DEFAULT_IN_PROCESS_CHANNEL_CAPACITY;
|
||||
pub use codex_app_server::in_process::InProcessServerEvent;
|
||||
use codex_app_server::in_process::InProcessStartArgs;
|
||||
use codex_app_server::in_process::LogDbLayer;
|
||||
pub use codex_app_server::in_process::StateDbHandle;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientNotification;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
@@ -301,15 +300,7 @@ impl fmt::Display for TypedRequestError {
|
||||
write!(f, "{method} transport error: {source}")
|
||||
}
|
||||
Self::Server { method, source } => {
|
||||
write!(
|
||||
f,
|
||||
"{method} failed: {} (code {})",
|
||||
source.message, source.code
|
||||
)?;
|
||||
if let Some(data) = source.data.as_ref() {
|
||||
write!(f, ", data: {data}")?;
|
||||
}
|
||||
Ok(())
|
||||
write!(f, "{method} failed: {}", source.message)
|
||||
}
|
||||
Self::Deserialize { method, source } => {
|
||||
write!(f, "{method} response decode error: {source}")
|
||||
@@ -344,8 +335,6 @@ pub struct InProcessClientStartArgs {
|
||||
pub feedback: CodexFeedback,
|
||||
/// SQLite tracing layer used to flush recently emitted logs before feedback upload.
|
||||
pub log_db: Option<LogDbLayer>,
|
||||
/// Process-wide SQLite state handle shared with the embedded app-server.
|
||||
pub state_db: Option<StateDbHandle>,
|
||||
/// Environment manager used by core execution and filesystem operations.
|
||||
pub environment_manager: Arc<EnvironmentManager>,
|
||||
/// Startup warnings emitted after initialize succeeds.
|
||||
@@ -407,7 +396,6 @@ impl InProcessClientStartArgs {
|
||||
thread_config_loader,
|
||||
feedback: self.feedback,
|
||||
log_db: self.log_db,
|
||||
state_db: self.state_db,
|
||||
environment_manager: self.environment_manager,
|
||||
config_warnings: self.config_warnings,
|
||||
session_source: self.session_source,
|
||||
@@ -987,7 +975,6 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source,
|
||||
@@ -1139,7 +1126,6 @@ mod tests {
|
||||
ServerNotification::ItemCompleted(codex_app_server_protocol::ItemCompletedNotification {
|
||||
thread_id: "thread".to_string(),
|
||||
turn_id: "turn".to_string(),
|
||||
completed_at_ms: 0,
|
||||
item: codex_app_server_protocol::ThreadItem::AgentMessage {
|
||||
id: "item".to_string(),
|
||||
text: text.to_string(),
|
||||
@@ -1929,15 +1915,11 @@ mod tests {
|
||||
method: "thread/read".to_string(),
|
||||
source: JSONRPCErrorError {
|
||||
code: -32603,
|
||||
data: Some(serde_json::json!({"detail": "config lock mismatch"})),
|
||||
data: None,
|
||||
message: "internal".to_string(),
|
||||
},
|
||||
};
|
||||
assert_eq!(std::error::Error::source(&server).is_some(), false);
|
||||
assert_eq!(
|
||||
server.to_string(),
|
||||
"thread/read failed: internal (code -32603), data: {\"detail\":\"config lock mismatch\"}"
|
||||
);
|
||||
|
||||
let deserialize = TypedRequestError::Deserialize {
|
||||
method: "thread/start".to_string(),
|
||||
@@ -2013,7 +1995,6 @@ mod tests {
|
||||
codex_app_server_protocol::ItemCompletedNotification {
|
||||
thread_id: "thread".to_string(),
|
||||
turn_id: "turn".to_string(),
|
||||
completed_at_ms: 0,
|
||||
item: codex_app_server_protocol::ThreadItem::AgentMessage {
|
||||
id: "item".to_string(),
|
||||
text: "hello".to_string(),
|
||||
@@ -2064,7 +2045,6 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: environment_manager.clone(),
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::Exec,
|
||||
@@ -2104,7 +2084,6 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::Exec,
|
||||
|
||||
@@ -2217,25 +2217,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginUninstallParams": {
|
||||
"properties": {
|
||||
"pluginId": {
|
||||
@@ -2265,28 +2246,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessTerminalSize": {
|
||||
"description": "PTY size in character cells for `process/spawn` PTY sessions.",
|
||||
"properties": {
|
||||
"cols": {
|
||||
"description": "Terminal width in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"rows": {
|
||||
"description": "Terminal height in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"cols",
|
||||
"rows"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RealtimeOutputModality": {
|
||||
"enum": [
|
||||
"text",
|
||||
@@ -2872,28 +2831,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -5099,30 +5036,6 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
|
||||
@@ -1932,11 +1932,6 @@
|
||||
},
|
||||
"ItemCompletedNotification": {
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
@@ -1948,7 +1943,6 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
@@ -2036,11 +2030,6 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2050,7 +2039,6 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
@@ -2415,96 +2403,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ProcessExitedNotification": {
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputDeltaNotification": {
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"RateLimitReachedType": {
|
||||
"enum": [
|
||||
"rate_limit_reached",
|
||||
@@ -5253,48 +5151,6 @@
|
||||
"title": "Command/exec/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/outputDelta"
|
||||
],
|
||||
"title": "Process/outputDeltaNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessOutputDeltaNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Final exit notification for a `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/exited"
|
||||
],
|
||||
"title": "Process/exitedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessExitedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/exitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
@@ -6040,4 +5896,4 @@
|
||||
}
|
||||
],
|
||||
"title": "ServerNotification"
|
||||
}
|
||||
}
|
||||
@@ -762,30 +762,6 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/v2/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -4248,48 +4224,6 @@
|
||||
"title": "Command/exec/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/outputDelta"
|
||||
],
|
||||
"title": "Process/outputDeltaNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ProcessOutputDeltaNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Final exit notification for a `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/exited"
|
||||
],
|
||||
"title": "Process/exitedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ProcessExitedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/exitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
@@ -10151,11 +10085,6 @@
|
||||
"ItemCompletedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/v2/ThreadItem"
|
||||
},
|
||||
@@ -10167,7 +10096,6 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
@@ -10261,11 +10189,6 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/v2/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -10275,7 +10198,6 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
@@ -11266,7 +11188,6 @@
|
||||
"properties": {
|
||||
"additionalSpeedTiers": {
|
||||
"default": [],
|
||||
"description": "Deprecated: use `serviceTiers` instead.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -11313,13 +11234,6 @@
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"serviceTiers": {
|
||||
"default": [],
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/ModelServiceTier"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"supportedReasoningEfforts": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/ReasoningEffortOption"
|
||||
@@ -11484,25 +11398,6 @@
|
||||
"title": "ModelReroutedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ModelServiceTier": {
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"description",
|
||||
"id",
|
||||
"name"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ModelUpgradeInfo": {
|
||||
"properties": {
|
||||
"migrationMarkdown": {
|
||||
@@ -12439,31 +12334,6 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -12474,7 +12344,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/PluginShareListItem"
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -12521,40 +12391,6 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -12724,122 +12560,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessExitedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"title": "ProcessExitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"title": "ProcessOutputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ProcessTerminalSize": {
|
||||
"description": "PTY size in character cells for `process/spawn` PTY sessions.",
|
||||
"properties": {
|
||||
"cols": {
|
||||
"description": "Terminal width in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"rows": {
|
||||
"description": "Terminal height in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"cols",
|
||||
"rows"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProfileV2": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
@@ -13988,28 +13708,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -18458,4 +18156,4 @@
|
||||
},
|
||||
"title": "CodexAppServerProtocol",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -1521,30 +1521,6 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -6762,11 +6738,6 @@
|
||||
"ItemCompletedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
@@ -6778,7 +6749,6 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
@@ -6872,11 +6842,6 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -6886,7 +6851,6 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
@@ -7877,7 +7841,6 @@
|
||||
"properties": {
|
||||
"additionalSpeedTiers": {
|
||||
"default": [],
|
||||
"description": "Deprecated: use `serviceTiers` instead.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -7924,13 +7887,6 @@
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"serviceTiers": {
|
||||
"default": [],
|
||||
"items": {
|
||||
"$ref": "#/definitions/ModelServiceTier"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"supportedReasoningEfforts": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ReasoningEffortOption"
|
||||
@@ -8095,25 +8051,6 @@
|
||||
"title": "ModelReroutedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ModelServiceTier": {
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"description",
|
||||
"id",
|
||||
"name"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ModelUpgradeInfo": {
|
||||
"properties": {
|
||||
"migrationMarkdown": {
|
||||
@@ -9050,31 +8987,6 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -9085,7 +8997,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -9132,40 +9044,6 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -9335,122 +9213,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessExitedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"title": "ProcessExitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"title": "ProcessOutputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ProcessTerminalSize": {
|
||||
"description": "PTY size in character cells for `process/spawn` PTY sessions.",
|
||||
"properties": {
|
||||
"cols": {
|
||||
"description": "Terminal width in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"rows": {
|
||||
"description": "Terminal height in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"cols",
|
||||
"rows"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProfileV2": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
@@ -10599,28 +10361,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -11529,48 +11269,6 @@
|
||||
"title": "Command/exec/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/outputDelta"
|
||||
],
|
||||
"title": "Process/outputDeltaNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessOutputDeltaNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Final exit notification for a `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/exited"
|
||||
],
|
||||
"title": "Process/exitedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessExitedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/exitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
@@ -16343,4 +16041,4 @@
|
||||
},
|
||||
"title": "CodexAppServerProtocolV2",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -1370,11 +1370,6 @@
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
@@ -1386,11 +1381,10 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
"title": "ItemCompletedNotification",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -1373,11 +1373,6 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1387,10 +1382,9 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
"title": "ItemStartedNotification",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,6 @@
|
||||
"properties": {
|
||||
"additionalSpeedTiers": {
|
||||
"default": [],
|
||||
"description": "Deprecated: use `serviceTiers` instead.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -71,13 +70,6 @@
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"serviceTiers": {
|
||||
"default": [],
|
||||
"items": {
|
||||
"$ref": "#/definitions/ModelServiceTier"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"supportedReasoningEfforts": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ReasoningEffortOption"
|
||||
@@ -128,25 +120,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ModelServiceTier": {
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"description",
|
||||
"id",
|
||||
"name"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ModelUpgradeInfo": {
|
||||
"properties": {
|
||||
"migrationMarkdown": {
|
||||
|
||||
@@ -167,31 +167,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -329,7 +304,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"title": "ProcessExitedNotification",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"title": "ProcessOutputDeltaNotification",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -732,28 +732,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
@@ -862,28 +862,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -14,4 +14,4 @@ export type ResponseItem = { "type": "message", role: string, content: Array<Con
|
||||
/**
|
||||
* Set when using the Responses API.
|
||||
*/
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "context_compaction", encrypted_content?: string, } | { "type": "other" };
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "other" };
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -3,8 +3,4 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ThreadItem } from "./ThreadItem";
|
||||
|
||||
export type ItemCompletedNotification = { item: ThreadItem, threadId: string, turnId: string,
|
||||
/**
|
||||
* Unix timestamp (in milliseconds) when this item lifecycle completed.
|
||||
*/
|
||||
completedAtMs: number, };
|
||||
export type ItemCompletedNotification = { item: ThreadItem, threadId: string, turnId: string, };
|
||||
|
||||
@@ -3,8 +3,4 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ThreadItem } from "./ThreadItem";
|
||||
|
||||
export type ItemStartedNotification = { item: ThreadItem, threadId: string, turnId: string,
|
||||
/**
|
||||
* Unix timestamp (in milliseconds) when this item lifecycle started.
|
||||
*/
|
||||
startedAtMs: number, };
|
||||
export type ItemStartedNotification = { item: ThreadItem, threadId: string, turnId: string, };
|
||||
|
||||
@@ -4,12 +4,7 @@
|
||||
import type { InputModality } from "../InputModality";
|
||||
import type { ReasoningEffort } from "../ReasoningEffort";
|
||||
import type { ModelAvailabilityNux } from "./ModelAvailabilityNux";
|
||||
import type { ModelServiceTier } from "./ModelServiceTier";
|
||||
import type { ModelUpgradeInfo } from "./ModelUpgradeInfo";
|
||||
import type { ReasoningEffortOption } from "./ReasoningEffortOption";
|
||||
|
||||
export type Model = { id: string, model: string, upgrade: string | null, upgradeInfo: ModelUpgradeInfo | null, availabilityNux: ModelAvailabilityNux | null, displayName: string, description: string, hidden: boolean, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean,
|
||||
/**
|
||||
* Deprecated: use `serviceTiers` instead.
|
||||
*/
|
||||
additionalSpeedTiers: Array<string>, serviceTiers: Array<ModelServiceTier>, isDefault: boolean, };
|
||||
export type Model = { id: string, model: string, upgrade: string | null, upgradeInfo: ModelUpgradeInfo | null, availabilityNux: ModelAvailabilityNux | null, displayName: string, description: string, hidden: boolean, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean, additionalSpeedTiers: Array<string>, isDefault: boolean, };
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type ModelServiceTier = { id: string, name: string, description: string, };
|
||||
@@ -1,7 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { AbsolutePathBuf } from "../AbsolutePathBuf";
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
|
||||
export type PluginShareListItem = { plugin: PluginSummary, shareUrl: string, localPluginPath: AbsolutePathBuf | null, };
|
||||
@@ -1,6 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { PluginShareListItem } from "./PluginShareListItem";
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
|
||||
export type PluginShareListResponse = { data: Array<PluginShareListItem>, };
|
||||
export type PluginShareListResponse = { data: Array<PluginSummary>, };
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadParams = { remoteMarketplaceName: string, remotePluginId: string, skillName: string, };
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadResponse = { contents: string | null, };
|
||||
@@ -1,42 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* Final process exit notification for `process/spawn`.
|
||||
*/
|
||||
export type ProcessExitedNotification = {
|
||||
/**
|
||||
* Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
*/
|
||||
processHandle: string,
|
||||
/**
|
||||
* Process exit code.
|
||||
*/
|
||||
exitCode: number,
|
||||
/**
|
||||
* Buffered stdout capture.
|
||||
*
|
||||
* Empty when stdout was streamed via `process/outputDelta`.
|
||||
*/
|
||||
stdout: string,
|
||||
/**
|
||||
* Whether stdout reached `outputBytesCap`.
|
||||
*
|
||||
* In streaming mode, stdout is empty and cap state is also reported on the
|
||||
* final stdout `process/outputDelta` notification.
|
||||
*/
|
||||
stdoutCapReached: boolean,
|
||||
/**
|
||||
* Buffered stderr capture.
|
||||
*
|
||||
* Empty when stderr was streamed via `process/outputDelta`.
|
||||
*/
|
||||
stderr: string,
|
||||
/**
|
||||
* Whether stderr reached `outputBytesCap`.
|
||||
*
|
||||
* In streaming mode, stderr is empty and cap state is also reported on the
|
||||
* final stderr `process/outputDelta` notification.
|
||||
*/
|
||||
stderrCapReached: boolean, };
|
||||
@@ -1,26 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ProcessOutputStream } from "./ProcessOutputStream";
|
||||
|
||||
/**
|
||||
* Base64-encoded output chunk emitted for a streaming `process/spawn` request.
|
||||
*/
|
||||
export type ProcessOutputDeltaNotification = {
|
||||
/**
|
||||
* Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
*/
|
||||
processHandle: string,
|
||||
/**
|
||||
* Output stream this chunk belongs to.
|
||||
*/
|
||||
stream: ProcessOutputStream,
|
||||
/**
|
||||
* Base64-encoded output bytes.
|
||||
*/
|
||||
deltaBase64: string,
|
||||
/**
|
||||
* True on the final streamed chunk for this stream when output was
|
||||
* truncated by `outputBytesCap`.
|
||||
*/
|
||||
capReached: boolean, };
|
||||
@@ -1,8 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* Stream label for `process/outputDelta` notifications.
|
||||
*/
|
||||
export type ProcessOutputStream = "stdout" | "stderr";
|
||||
@@ -1,16 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* PTY size in character cells for `process/spawn` PTY sessions.
|
||||
*/
|
||||
export type ProcessTerminalSize = {
|
||||
/**
|
||||
* Terminal height in character cells.
|
||||
*/
|
||||
rows: number,
|
||||
/**
|
||||
* Terminal width in character cells.
|
||||
*/
|
||||
cols: number, };
|
||||
@@ -245,7 +245,6 @@ export type { ModelProviderCapabilitiesReadParams } from "./ModelProviderCapabil
|
||||
export type { ModelProviderCapabilitiesReadResponse } from "./ModelProviderCapabilitiesReadResponse";
|
||||
export type { ModelRerouteReason } from "./ModelRerouteReason";
|
||||
export type { ModelReroutedNotification } from "./ModelReroutedNotification";
|
||||
export type { ModelServiceTier } from "./ModelServiceTier";
|
||||
export type { ModelUpgradeInfo } from "./ModelUpgradeInfo";
|
||||
export type { ModelVerification } from "./ModelVerification";
|
||||
export type { ModelVerificationNotification } from "./ModelVerificationNotification";
|
||||
@@ -284,22 +283,15 @@ export type { PluginReadParams } from "./PluginReadParams";
|
||||
export type { PluginReadResponse } from "./PluginReadResponse";
|
||||
export type { PluginShareDeleteParams } from "./PluginShareDeleteParams";
|
||||
export type { PluginShareDeleteResponse } from "./PluginShareDeleteResponse";
|
||||
export type { PluginShareListItem } from "./PluginShareListItem";
|
||||
export type { PluginShareListParams } from "./PluginShareListParams";
|
||||
export type { PluginShareListResponse } from "./PluginShareListResponse";
|
||||
export type { PluginShareSaveParams } from "./PluginShareSaveParams";
|
||||
export type { PluginShareSaveResponse } from "./PluginShareSaveResponse";
|
||||
export type { PluginSkillReadParams } from "./PluginSkillReadParams";
|
||||
export type { PluginSkillReadResponse } from "./PluginSkillReadResponse";
|
||||
export type { PluginSource } from "./PluginSource";
|
||||
export type { PluginSummary } from "./PluginSummary";
|
||||
export type { PluginUninstallParams } from "./PluginUninstallParams";
|
||||
export type { PluginUninstallResponse } from "./PluginUninstallResponse";
|
||||
export type { PluginsMigration } from "./PluginsMigration";
|
||||
export type { ProcessExitedNotification } from "./ProcessExitedNotification";
|
||||
export type { ProcessOutputDeltaNotification } from "./ProcessOutputDeltaNotification";
|
||||
export type { ProcessOutputStream } from "./ProcessOutputStream";
|
||||
export type { ProcessTerminalSize } from "./ProcessTerminalSize";
|
||||
export type { ProfileV2 } from "./ProfileV2";
|
||||
export type { RateLimitReachedType } from "./RateLimitReachedType";
|
||||
export type { RateLimitSnapshot } from "./RateLimitSnapshot";
|
||||
|
||||
@@ -80,7 +80,6 @@ pub enum ClientRequestSerializationScope {
|
||||
Thread { thread_id: String },
|
||||
ThreadPath { path: PathBuf },
|
||||
CommandExecProcess { process_id: String },
|
||||
Process { process_handle: String },
|
||||
FuzzyFileSearchSession { session_id: String },
|
||||
FsWatch { watch_id: String },
|
||||
McpOauth { server_name: String },
|
||||
@@ -128,11 +127,6 @@ macro_rules! serialization_scope_expr {
|
||||
process_id: $actual_params.$field.clone(),
|
||||
})
|
||||
};
|
||||
($actual_params:ident, process_handle($params:ident . $field:ident)) => {
|
||||
Some(ClientRequestSerializationScope::Process {
|
||||
process_handle: $actual_params.$field.clone(),
|
||||
})
|
||||
};
|
||||
($actual_params:ident, fuzzy_session_id($params:ident . $field:ident)) => {
|
||||
Some(ClientRequestSerializationScope::FuzzyFileSearchSession {
|
||||
session_id: $actual_params.$field.clone(),
|
||||
@@ -618,11 +612,6 @@ client_request_definitions! {
|
||||
serialization: global("config"),
|
||||
response: v2::PluginReadResponse,
|
||||
},
|
||||
PluginSkillRead => "plugin/skill/read" {
|
||||
params: v2::PluginSkillReadParams,
|
||||
serialization: global("config"),
|
||||
response: v2::PluginSkillReadResponse,
|
||||
},
|
||||
PluginShareSave => "plugin/share/save" {
|
||||
params: v2::PluginShareSaveParams,
|
||||
serialization: global("config"),
|
||||
@@ -906,34 +895,6 @@ client_request_definitions! {
|
||||
serialization: command_process_id(params.process_id),
|
||||
response: v2::CommandExecResizeResponse,
|
||||
},
|
||||
#[experimental("process/spawn")]
|
||||
/// Spawn a standalone process (argv vector) without a Codex sandbox.
|
||||
ProcessSpawn => "process/spawn" {
|
||||
params: v2::ProcessSpawnParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessSpawnResponse,
|
||||
},
|
||||
#[experimental("process/writeStdin")]
|
||||
/// Write stdin bytes to a running `process/spawn` session or close stdin.
|
||||
ProcessWriteStdin => "process/writeStdin" {
|
||||
params: v2::ProcessWriteStdinParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessWriteStdinResponse,
|
||||
},
|
||||
#[experimental("process/kill")]
|
||||
/// Terminate a running `process/spawn` session by client-supplied `processHandle`.
|
||||
ProcessKill => "process/kill" {
|
||||
params: v2::ProcessKillParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessKillResponse,
|
||||
},
|
||||
#[experimental("process/resizePty")]
|
||||
/// Resize a running PTY-backed `process/spawn` session by client-supplied `processHandle`.
|
||||
ProcessResizePty => "process/resizePty" {
|
||||
params: v2::ProcessResizePtyParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessResizePtyResponse,
|
||||
},
|
||||
|
||||
ConfigRead => "config/read" {
|
||||
params: v2::ConfigReadParams,
|
||||
@@ -1435,12 +1396,6 @@ server_notification_definitions! {
|
||||
PlanDelta => "item/plan/delta" (v2::PlanDeltaNotification),
|
||||
/// Stream base64-encoded stdout/stderr chunks for a running `command/exec` session.
|
||||
CommandExecOutputDelta => "command/exec/outputDelta" (v2::CommandExecOutputDeltaNotification),
|
||||
/// Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.
|
||||
#[experimental("process/outputDelta")]
|
||||
ProcessOutputDelta => "process/outputDelta" (v2::ProcessOutputDeltaNotification),
|
||||
/// Final exit notification for a `process/spawn` session.
|
||||
#[experimental("process/exited")]
|
||||
ProcessExited => "process/exited" (v2::ProcessExitedNotification),
|
||||
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
|
||||
TerminalInteraction => "item/commandExecution/terminalInteraction" (v2::TerminalInteractionNotification),
|
||||
/// Deprecated legacy apply_patch output stream notification.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::protocol::common::ServerNotification;
|
||||
use crate::protocol::item_builders::build_command_execution_begin_item;
|
||||
use crate::protocol::item_builders::build_command_execution_end_item;
|
||||
use crate::protocol::item_builders::build_file_change_begin_item;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use crate::protocol::v2::AgentMessageDeltaNotification;
|
||||
use crate::protocol::v2::CollabAgentState;
|
||||
@@ -12,6 +13,9 @@ use crate::protocol::v2::DynamicToolCallStatus;
|
||||
use crate::protocol::v2::FileChangePatchUpdatedNotification;
|
||||
use crate::protocol::v2::ItemCompletedNotification;
|
||||
use crate::protocol::v2::ItemStartedNotification;
|
||||
use crate::protocol::v2::McpToolCallError;
|
||||
use crate::protocol::v2::McpToolCallResult;
|
||||
use crate::protocol::v2::McpToolCallStatus;
|
||||
use crate::protocol::v2::PlanDeltaNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryPartAddedNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryTextDeltaNotification;
|
||||
@@ -20,6 +24,7 @@ use crate::protocol::v2::TerminalInteractionNotification;
|
||||
use crate::protocol::v2::ThreadItem;
|
||||
use codex_protocol::dynamic_tools::DynamicToolCallOutputContentItem as CoreDynamicToolCallOutputContentItem;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Build the v2 app-server notification that directly corresponds to a single core event.
|
||||
@@ -69,7 +74,64 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id: response.turn_id,
|
||||
item,
|
||||
completed_at_ms: response.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::McpToolCallBegin(begin_event) => {
|
||||
let item = ThreadItem::McpToolCall {
|
||||
id: begin_event.call_id,
|
||||
server: begin_event.invocation.server,
|
||||
tool: begin_event.invocation.tool,
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: begin_event.invocation.arguments.unwrap_or(JsonValue::Null),
|
||||
mcp_app_resource_uri: begin_event.mcp_app_resource_uri,
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::McpToolCallEnd(end_event) => {
|
||||
let status = if end_event.is_success() {
|
||||
McpToolCallStatus::Completed
|
||||
} else {
|
||||
McpToolCallStatus::Failed
|
||||
};
|
||||
let duration_ms = i64::try_from(end_event.duration.as_millis()).ok();
|
||||
let (result, error) = match &end_event.result {
|
||||
Ok(value) => (
|
||||
Some(Box::new(McpToolCallResult {
|
||||
content: value.content.clone(),
|
||||
structured_content: value.structured_content.clone(),
|
||||
meta: value.meta.clone(),
|
||||
})),
|
||||
None,
|
||||
),
|
||||
Err(message) => (
|
||||
None,
|
||||
Some(McpToolCallError {
|
||||
message: message.clone(),
|
||||
}),
|
||||
),
|
||||
};
|
||||
let item = ThreadItem::McpToolCall {
|
||||
id: end_event.call_id,
|
||||
server: end_event.invocation.server,
|
||||
tool: end_event.invocation.tool,
|
||||
status,
|
||||
arguments: end_event.invocation.arguments.unwrap_or(JsonValue::Null),
|
||||
mcp_app_resource_uri: end_event.mcp_app_resource_uri,
|
||||
result,
|
||||
error,
|
||||
duration_ms,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentSpawnBegin(begin_event) => {
|
||||
@@ -88,7 +150,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentSpawnEnd(end_event) => {
|
||||
@@ -127,7 +188,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentInteractionBegin(begin_event) => {
|
||||
@@ -147,7 +207,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentInteractionEnd(end_event) => {
|
||||
@@ -175,7 +234,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabWaitingBegin(begin_event) => {
|
||||
@@ -199,7 +257,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabWaitingEnd(end_event) => {
|
||||
@@ -235,7 +292,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabCloseBegin(begin_event) => {
|
||||
@@ -254,7 +310,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabCloseEnd(end_event) => {
|
||||
@@ -287,7 +342,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabResumeBegin(begin_event) => {
|
||||
@@ -306,7 +360,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabResumeEnd(end_event) => {
|
||||
@@ -339,7 +392,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::AgentMessageContentDelta(event) => {
|
||||
@@ -389,7 +441,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: item_started_event.item.into(),
|
||||
started_at_ms: item_started_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::ItemCompleted(item_completed_event) => {
|
||||
@@ -397,7 +448,13 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: item_completed_event.item.into(),
|
||||
completed_at_ms: item_completed_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::PatchApplyBegin(patch_begin_event) => {
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_file_change_begin_item(&patch_begin_event),
|
||||
})
|
||||
}
|
||||
EventMsg::PatchApplyUpdated(event) => {
|
||||
@@ -413,7 +470,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_command_execution_begin_item(&exec_command_begin_event),
|
||||
started_at_ms: exec_command_begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::ExecCommandOutputDelta(exec_command_output_delta_event) => {
|
||||
@@ -442,7 +498,6 @@ pub fn item_event_to_server_notification(
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_command_execution_end_item(&exec_command_end_event),
|
||||
completed_at_ms: exec_command_end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
_ => unreachable!("unsupported item event"),
|
||||
@@ -453,11 +508,17 @@ pub fn item_event_to_server_notification(
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::mcp::CallToolResult;
|
||||
use codex_protocol::protocol::CollabResumeBeginEvent;
|
||||
use codex_protocol::protocol::CollabResumeEndEvent;
|
||||
use codex_protocol::protocol::ExecCommandOutputDeltaEvent;
|
||||
use codex_protocol::protocol::ExecOutputStream;
|
||||
use codex_protocol::protocol::McpInvocation;
|
||||
use codex_protocol::protocol::McpToolCallBeginEvent;
|
||||
use codex_protocol::protocol::McpToolCallEndEvent;
|
||||
use pretty_assertions::assert_eq;
|
||||
use rmcp::model::Content;
|
||||
use std::time::Duration;
|
||||
|
||||
fn assert_item_started_server_notification(
|
||||
notification: ServerNotification,
|
||||
@@ -495,7 +556,6 @@ mod tests {
|
||||
fn collab_resume_begin_maps_to_item_started_resume_agent() {
|
||||
let event = CollabResumeBeginEvent {
|
||||
call_id: "call-1".to_string(),
|
||||
started_at_ms: 123,
|
||||
sender_thread_id: ThreadId::new(),
|
||||
receiver_thread_id: ThreadId::new(),
|
||||
receiver_agent_nickname: None,
|
||||
@@ -512,7 +572,6 @@ mod tests {
|
||||
ItemStartedNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
started_at_ms: event.started_at_ms,
|
||||
item: ThreadItem::CollabAgentToolCall {
|
||||
id: event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
@@ -532,7 +591,6 @@ mod tests {
|
||||
fn collab_resume_end_maps_to_item_completed_resume_agent() {
|
||||
let event = CollabResumeEndEvent {
|
||||
call_id: "call-2".to_string(),
|
||||
completed_at_ms: 456,
|
||||
sender_thread_id: ThreadId::new(),
|
||||
receiver_thread_id: ThreadId::new(),
|
||||
receiver_agent_nickname: None,
|
||||
@@ -551,7 +609,6 @@ mod tests {
|
||||
ItemCompletedNotification {
|
||||
thread_id: "thread-2".to_string(),
|
||||
turn_id: "turn-2".to_string(),
|
||||
completed_at_ms: event.completed_at_ms,
|
||||
item: ThreadItem::CollabAgentToolCall {
|
||||
id: event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
@@ -572,6 +629,179 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_begin_maps_to_item_started_notification_with_args() {
|
||||
let begin_event = McpToolCallBeginEvent {
|
||||
call_id: "call_123".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: Some(serde_json::json!({"server": ""})),
|
||||
},
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallBegin(begin_event.clone()),
|
||||
"thread-1",
|
||||
"turn_1",
|
||||
);
|
||||
assert_item_started_server_notification(
|
||||
notification,
|
||||
ItemStartedNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn_1".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: begin_event.call_id,
|
||||
server: begin_event.invocation.server,
|
||||
tool: begin_event.invocation.tool,
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: serde_json::json!({"server": ""}),
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_begin_maps_to_item_started_notification_without_args() {
|
||||
let begin_event = McpToolCallBeginEvent {
|
||||
call_id: "call_456".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: None,
|
||||
},
|
||||
mcp_app_resource_uri: None,
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallBegin(begin_event.clone()),
|
||||
"thread-2",
|
||||
"turn_2",
|
||||
);
|
||||
assert_item_started_server_notification(
|
||||
notification,
|
||||
ItemStartedNotification {
|
||||
thread_id: "thread-2".to_string(),
|
||||
turn_id: "turn_2".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: begin_event.call_id,
|
||||
server: begin_event.invocation.server,
|
||||
tool: begin_event.invocation.tool,
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_end_maps_to_item_completed_notification_on_success() {
|
||||
let content = vec![
|
||||
serde_json::to_value(Content::text("{\"resources\":[]}"))
|
||||
.expect("content should serialize"),
|
||||
];
|
||||
let result = CallToolResult {
|
||||
content: content.clone(),
|
||||
is_error: Some(false),
|
||||
structured_content: None,
|
||||
meta: Some(serde_json::json!({
|
||||
"ui/resourceUri": "ui://widget/list-resources.html"
|
||||
})),
|
||||
};
|
||||
|
||||
let end_event = McpToolCallEndEvent {
|
||||
call_id: "call_789".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: Some(serde_json::json!({"server": ""})),
|
||||
},
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
duration: Duration::from_nanos(92708),
|
||||
result: Ok(result),
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallEnd(end_event.clone()),
|
||||
"thread-3",
|
||||
"turn_3",
|
||||
);
|
||||
assert_item_completed_server_notification(
|
||||
notification,
|
||||
ItemCompletedNotification {
|
||||
thread_id: "thread-3".to_string(),
|
||||
turn_id: "turn_3".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: end_event.call_id,
|
||||
server: end_event.invocation.server,
|
||||
tool: end_event.invocation.tool,
|
||||
status: McpToolCallStatus::Completed,
|
||||
arguments: serde_json::json!({"server": ""}),
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
result: Some(Box::new(McpToolCallResult {
|
||||
content,
|
||||
structured_content: None,
|
||||
meta: Some(serde_json::json!({
|
||||
"ui/resourceUri": "ui://widget/list-resources.html"
|
||||
})),
|
||||
})),
|
||||
error: None,
|
||||
duration_ms: Some(0),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_end_maps_to_item_completed_notification_on_error() {
|
||||
let end_event = McpToolCallEndEvent {
|
||||
call_id: "call_err".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: None,
|
||||
},
|
||||
mcp_app_resource_uri: None,
|
||||
duration: Duration::from_millis(1),
|
||||
result: Err("boom".to_string()),
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallEnd(end_event.clone()),
|
||||
"thread-4",
|
||||
"turn_4",
|
||||
);
|
||||
assert_item_completed_server_notification(
|
||||
notification,
|
||||
ItemCompletedNotification {
|
||||
thread_id: "thread-4".to_string(),
|
||||
turn_id: "turn_4".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: end_event.call_id,
|
||||
server: end_event.invocation.server,
|
||||
tool: end_event.invocation.tool,
|
||||
status: McpToolCallStatus::Failed,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: None,
|
||||
error: Some(McpToolCallError {
|
||||
message: "boom".to_string(),
|
||||
}),
|
||||
duration_ms: Some(1),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exec_command_output_delta_maps_to_command_execution_output_delta() {
|
||||
let notification = item_event_to_server_notification(
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
//! Shared builders for app-server [`ThreadItem`] values derived from compatibility events.
|
||||
//! Shared builders for synthetic [`ThreadItem`] values emitted by the app-server layer.
|
||||
//!
|
||||
//! Most live tool items now come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! These builders remain for approval flows, rebuilt legacy history, and other pre-execution
|
||||
//! paths where the underlying tool has not started or never starts at all.
|
||||
//! These items do not come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! Instead, the app-server synthesizes them so clients can render a coherent lifecycle for
|
||||
//! approvals and other pre-execution flows before the underlying tool has started or when the
|
||||
//! tool never starts at all.
|
||||
//!
|
||||
//! Keeping these builders in one place is useful for two reasons:
|
||||
//! - Live notifications and rebuilt `thread/read` history both need to construct the same
|
||||
|
||||
@@ -356,10 +356,7 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
@@ -380,10 +377,7 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
@@ -1356,7 +1350,6 @@ mod tests {
|
||||
id: "user-item-id".to_string(),
|
||||
content: Vec::new(),
|
||||
}),
|
||||
started_at_ms: 0,
|
||||
}),
|
||||
EventMsg::TurnComplete(TurnCompleteEvent {
|
||||
turn_id: turn_id.to_string(),
|
||||
@@ -1821,7 +1814,6 @@ mod tests {
|
||||
call_id: "exec-1".into(),
|
||||
process_id: Some("pid-1".into()),
|
||||
turn_id: "turn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["echo".into(), "hello world".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
@@ -1985,7 +1977,6 @@ mod tests {
|
||||
codex_protocol::dynamic_tools::DynamicToolCallRequest {
|
||||
call_id: "dyn-1".into(),
|
||||
turn_id: "turn-1".into(),
|
||||
started_at_ms: 0,
|
||||
namespace: Some("codex_app".into()),
|
||||
tool: "lookup_ticket".into(),
|
||||
arguments: serde_json::json!({"id":"ABC-123"}),
|
||||
@@ -1994,7 +1985,6 @@ mod tests {
|
||||
EventMsg::DynamicToolCallResponse(DynamicToolCallResponseEvent {
|
||||
call_id: "dyn-1".into(),
|
||||
turn_id: "turn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
namespace: Some("codex_app".into()),
|
||||
tool: "lookup_ticket".into(),
|
||||
arguments: serde_json::json!({"id":"ABC-123"}),
|
||||
@@ -2050,7 +2040,6 @@ mod tests {
|
||||
call_id: "exec-declined".into(),
|
||||
process_id: Some("pid-2".into()),
|
||||
turn_id: "turn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["ls".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown { cmd: "ls".into() }],
|
||||
@@ -2298,7 +2287,6 @@ mod tests {
|
||||
call_id: "exec-late".into(),
|
||||
process_id: Some("pid-42".into()),
|
||||
turn_id: "turn-a".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["echo".into(), "done".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
@@ -2390,7 +2378,6 @@ mod tests {
|
||||
call_id: "exec-unknown-turn".into(),
|
||||
process_id: Some("pid-42".into()),
|
||||
turn_id: "turn-missing".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["echo".into(), "done".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
@@ -2739,7 +2726,6 @@ mod tests {
|
||||
}),
|
||||
EventMsg::CollabResumeEnd(codex_protocol::protocol::CollabResumeEndEvent {
|
||||
call_id: "resume-1".into(),
|
||||
completed_at_ms: 0,
|
||||
sender_thread_id: ThreadId::try_from("00000000-0000-0000-0000-000000000001")
|
||||
.expect("valid sender thread id"),
|
||||
receiver_thread_id: ThreadId::try_from("00000000-0000-0000-0000-000000000002")
|
||||
@@ -2796,7 +2782,6 @@ mod tests {
|
||||
}),
|
||||
EventMsg::CollabAgentSpawnEnd(codex_protocol::protocol::CollabAgentSpawnEndEvent {
|
||||
call_id: "spawn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
sender_thread_id,
|
||||
new_thread_id: Some(spawned_thread_id),
|
||||
new_agent_nickname: Some("Scout".into()),
|
||||
@@ -2858,7 +2843,6 @@ mod tests {
|
||||
EventMsg::CollabAgentInteractionBegin(
|
||||
codex_protocol::protocol::CollabAgentInteractionBeginEvent {
|
||||
call_id: "send-1".into(),
|
||||
started_at_ms: 0,
|
||||
sender_thread_id: sender,
|
||||
receiver_thread_id: receiver,
|
||||
prompt: "new task".into(),
|
||||
@@ -2867,7 +2851,6 @@ mod tests {
|
||||
EventMsg::CollabAgentInteractionEnd(
|
||||
codex_protocol::protocol::CollabAgentInteractionEndEvent {
|
||||
call_id: "send-1".into(),
|
||||
completed_at_ms: 0,
|
||||
sender_thread_id: sender,
|
||||
receiver_thread_id: receiver,
|
||||
receiver_agent_nickname: None,
|
||||
|
||||
@@ -5,7 +5,6 @@ use std::path::PathBuf;
|
||||
|
||||
use crate::RequestId;
|
||||
use crate::protocol::common::AuthMode;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use codex_experimental_api_macros::ExperimentalApi;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::account::ProviderAccount;
|
||||
@@ -31,8 +30,6 @@ use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::config_types::WebSearchToolConfig;
|
||||
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
|
||||
use codex_protocol::items::McpToolCallError as CoreMcpToolCallError;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::mcp::CallToolResult as CoreMcpCallToolResult;
|
||||
use codex_protocol::mcp::Resource as McpResource;
|
||||
@@ -2511,15 +2508,6 @@ impl From<CoreModelAvailabilityNux> for ModelAvailabilityNux {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ModelServiceTier {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -2538,11 +2526,8 @@ pub struct Model {
|
||||
pub input_modalities: Vec<InputModality>,
|
||||
#[serde(default)]
|
||||
pub supports_personality: bool,
|
||||
/// Deprecated: use `serviceTiers` instead.
|
||||
#[serde(default)]
|
||||
pub additional_speed_tiers: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub service_tiers: Vec<ModelServiceTier>,
|
||||
// Only one model should be marked as default.
|
||||
pub is_default: bool,
|
||||
}
|
||||
@@ -2797,24 +2782,6 @@ impl From<CoreMcpCallToolResult> for McpServerToolCallResponse {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpCallToolResult> for McpToolCallResult {
|
||||
fn from(result: CoreMcpCallToolResult) -> Self {
|
||||
Self {
|
||||
content: result.content,
|
||||
structured_content: result.structured_content,
|
||||
meta: result.meta,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallError> for McpToolCallError {
|
||||
fn from(error: CoreMcpToolCallError) -> Self {
|
||||
Self {
|
||||
message: error.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -3570,204 +3537,6 @@ pub enum CommandExecOutputStream {
|
||||
Stderr,
|
||||
}
|
||||
|
||||
/// PTY size in character cells for `process/spawn` PTY sessions.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessTerminalSize {
|
||||
/// Terminal height in character cells.
|
||||
pub rows: u16,
|
||||
/// Terminal width in character cells.
|
||||
pub cols: u16,
|
||||
}
|
||||
|
||||
/// Spawn a standalone process (argv vector) without a Codex sandbox on the host
|
||||
/// where the app server is running.
|
||||
///
|
||||
/// `process/spawn` returns after the process has started and the connection-scoped
|
||||
/// `processHandle` has been registered. Process output and exit are reported via
|
||||
/// `process/outputDelta` and `process/exited` notifications.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessSpawnParams {
|
||||
/// Command argv vector. Empty arrays are rejected.
|
||||
pub command: Vec<String>,
|
||||
/// Client-supplied, connection-scoped process handle.
|
||||
///
|
||||
/// Duplicate active handles are rejected on the same connection. The same
|
||||
/// handle can be reused after the prior process exits.
|
||||
pub process_handle: String,
|
||||
/// Absolute working directory for the process.
|
||||
pub cwd: AbsolutePathBuf,
|
||||
/// Enable PTY mode.
|
||||
///
|
||||
/// This implies `streamStdin` and `streamStdoutStderr`.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub tty: bool,
|
||||
/// Allow follow-up `process/writeStdin` requests to write stdin bytes.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub stream_stdin: bool,
|
||||
/// Stream stdout/stderr via `process/outputDelta` notifications.
|
||||
///
|
||||
/// Streamed bytes are not duplicated into the `process/exited` notification.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub stream_stdout_stderr: bool,
|
||||
/// Optional per-stream stdout/stderr capture cap in bytes.
|
||||
///
|
||||
/// When omitted, the server default applies. Set to `null` to disable the
|
||||
/// cap.
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "super::serde_helpers::deserialize_double_option",
|
||||
serialize_with = "super::serde_helpers::serialize_double_option",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(type = "number | null")]
|
||||
#[ts(optional = nullable)]
|
||||
pub output_bytes_cap: Option<Option<usize>>,
|
||||
/// Optional timeout in milliseconds.
|
||||
///
|
||||
/// When omitted, the server default applies. Set to `null` to disable the
|
||||
/// timeout.
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "super::serde_helpers::deserialize_double_option",
|
||||
serialize_with = "super::serde_helpers::serialize_double_option",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(type = "number | null")]
|
||||
#[ts(optional = nullable)]
|
||||
pub timeout_ms: Option<Option<i64>>,
|
||||
/// Optional environment overrides merged into the app-server process
|
||||
/// environment.
|
||||
///
|
||||
/// Matching names override inherited values. Set a key to `null` to unset
|
||||
/// an inherited variable.
|
||||
#[ts(optional = nullable)]
|
||||
pub env: Option<HashMap<String, Option<String>>>,
|
||||
/// Optional initial PTY size in character cells. Only valid when `tty` is
|
||||
/// true.
|
||||
#[ts(optional = nullable)]
|
||||
pub size: Option<ProcessTerminalSize>,
|
||||
}
|
||||
|
||||
/// Successful response for `process/spawn`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessSpawnResponse {}
|
||||
|
||||
/// Write stdin bytes to a running `process/spawn` session, close stdin, or
|
||||
/// both.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessWriteStdinParams {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// Optional base64-encoded stdin bytes to write.
|
||||
#[ts(optional = nullable)]
|
||||
pub delta_base64: Option<String>,
|
||||
/// Close stdin after writing `deltaBase64`, if present.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub close_stdin: bool,
|
||||
}
|
||||
|
||||
/// Empty success response for `process/writeStdin`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessWriteStdinResponse {}
|
||||
|
||||
/// Terminate a running `process/spawn` session.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessKillParams {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
}
|
||||
|
||||
/// Empty success response for `process/kill`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessKillResponse {}
|
||||
|
||||
/// Resize a running PTY-backed `process/spawn` session.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessResizePtyParams {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// New PTY size in character cells.
|
||||
pub size: ProcessTerminalSize,
|
||||
}
|
||||
|
||||
/// Empty success response for `process/resizePty`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessResizePtyResponse {}
|
||||
|
||||
/// Stream label for `process/outputDelta` notifications.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ProcessOutputStream {
|
||||
/// stdout stream. PTY mode multiplexes terminal output here.
|
||||
Stdout,
|
||||
/// stderr stream.
|
||||
Stderr,
|
||||
}
|
||||
|
||||
/// Base64-encoded output chunk emitted for a streaming `process/spawn` request.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessOutputDeltaNotification {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// Output stream this chunk belongs to.
|
||||
pub stream: ProcessOutputStream,
|
||||
/// Base64-encoded output bytes.
|
||||
pub delta_base64: String,
|
||||
/// True on the final streamed chunk for this stream when output was
|
||||
/// truncated by `outputBytesCap`.
|
||||
pub cap_reached: bool,
|
||||
}
|
||||
|
||||
/// Final process exit notification for `process/spawn`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessExitedNotification {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// Process exit code.
|
||||
pub exit_code: i32,
|
||||
/// Buffered stdout capture.
|
||||
///
|
||||
/// Empty when stdout was streamed via `process/outputDelta`.
|
||||
pub stdout: String,
|
||||
/// Whether stdout reached `outputBytesCap`.
|
||||
///
|
||||
/// In streaming mode, stdout is empty and cap state is also reported on the
|
||||
/// final stdout `process/outputDelta` notification.
|
||||
pub stdout_cap_reached: bool,
|
||||
/// Buffered stderr capture.
|
||||
///
|
||||
/// Empty when stderr was streamed via `process/outputDelta`.
|
||||
pub stderr: String,
|
||||
/// Whether stderr reached `outputBytesCap`.
|
||||
///
|
||||
/// In streaming mode, stderr is empty and cap state is also reported on the
|
||||
/// final stderr `process/outputDelta` notification.
|
||||
pub stderr_cap_reached: bool,
|
||||
}
|
||||
|
||||
// === Threads, Turns, and Items ===
|
||||
// Thread APIs
|
||||
#[derive(
|
||||
@@ -3841,9 +3610,8 @@ pub struct ThreadStartParams {
|
||||
#[experimental("thread/start.experimentalRawEvents")]
|
||||
#[serde(default)]
|
||||
pub experimental_raw_events: bool,
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on resume/fork/read.
|
||||
#[experimental("thread/start.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -3973,9 +3741,8 @@ pub struct ThreadResumeParams {
|
||||
#[experimental("thread/resume.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
#[experimental("thread/resume.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -4079,9 +3846,8 @@ pub struct ThreadForkParams {
|
||||
#[experimental("thread/fork.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
#[experimental("thread/fork.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -4843,22 +4609,6 @@ pub struct PluginReadResponse {
|
||||
pub plugin: PluginDetail,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadParams {
|
||||
pub remote_marketplace_name: String,
|
||||
pub remote_plugin_id: String,
|
||||
pub skill_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadResponse {
|
||||
pub contents: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -4885,7 +4635,7 @@ pub struct PluginShareListParams {}
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListResponse {
|
||||
pub data: Vec<PluginShareListItem>,
|
||||
pub data: Vec<PluginSummary>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -4900,15 +4650,6 @@ pub struct PluginShareDeleteParams {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareDeleteResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListItem {
|
||||
pub plugin: PluginSummary,
|
||||
pub share_url: String,
|
||||
pub local_plugin_path: Option<AbsolutePathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(rename_all = "snake_case")]
|
||||
@@ -6696,10 +6437,6 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
query: search.query,
|
||||
action: Some(WebSearchAction::from(search.action)),
|
||||
},
|
||||
CoreTurnItem::ImageView(image) => ThreadItem::ImageView {
|
||||
id: image.id,
|
||||
path: image.path,
|
||||
},
|
||||
CoreTurnItem::ImageGeneration(image) => ThreadItem::ImageGeneration {
|
||||
id: image.id,
|
||||
status: image.status,
|
||||
@@ -6707,32 +6444,6 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
result: image.result,
|
||||
saved_path: image.saved_path,
|
||||
},
|
||||
CoreTurnItem::FileChange(file_change) => ThreadItem::FileChange {
|
||||
id: file_change.id,
|
||||
changes: convert_patch_changes(&file_change.changes),
|
||||
status: file_change
|
||||
.status
|
||||
.as_ref()
|
||||
.map(PatchApplyStatus::from)
|
||||
.unwrap_or(PatchApplyStatus::InProgress),
|
||||
},
|
||||
CoreTurnItem::McpToolCall(mcp) => {
|
||||
let duration_ms = mcp
|
||||
.duration
|
||||
.and_then(|duration| i64::try_from(duration.as_millis()).ok());
|
||||
|
||||
ThreadItem::McpToolCall {
|
||||
id: mcp.id,
|
||||
server: mcp.server,
|
||||
tool: mcp.tool,
|
||||
status: McpToolCallStatus::from(mcp.status),
|
||||
arguments: mcp.arguments,
|
||||
mcp_app_resource_uri: mcp.mcp_app_resource_uri,
|
||||
result: mcp.result.map(McpToolCallResult::from).map(Box::new),
|
||||
error: mcp.error.map(McpToolCallError::from),
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
CoreTurnItem::ContextCompaction(compaction) => {
|
||||
ThreadItem::ContextCompaction { id: compaction.id }
|
||||
}
|
||||
@@ -6842,16 +6553,6 @@ impl From<&CorePatchApplyStatus> for PatchApplyStatus {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallStatus> for McpToolCallStatus {
|
||||
fn from(value: CoreMcpToolCallStatus) -> Self {
|
||||
match value {
|
||||
CoreMcpToolCallStatus::InProgress => McpToolCallStatus::InProgress,
|
||||
CoreMcpToolCallStatus::Completed => McpToolCallStatus::Completed,
|
||||
CoreMcpToolCallStatus::Failed => McpToolCallStatus::Failed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -7138,9 +6839,6 @@ pub struct ItemStartedNotification {
|
||||
pub item: ThreadItem,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
/// Unix timestamp (in milliseconds) when this item lifecycle started.
|
||||
#[ts(type = "number")]
|
||||
pub started_at_ms: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -7203,9 +6901,6 @@ pub struct ItemCompletedNotification {
|
||||
pub item: ThreadItem,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
/// Unix timestamp (in milliseconds) when this item lifecycle completed.
|
||||
#[ts(type = "number")]
|
||||
pub completed_at_ms: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -8358,15 +8053,10 @@ mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::items::AgentMessageContent;
|
||||
use codex_protocol::items::AgentMessageItem;
|
||||
use codex_protocol::items::FileChangeItem;
|
||||
use codex_protocol::items::ImageViewItem;
|
||||
use codex_protocol::items::McpToolCallItem;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::ReasoningItem;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::items::UserMessageItem;
|
||||
use codex_protocol::items::WebSearchItem;
|
||||
use codex_protocol::mcp::CallToolResult;
|
||||
use codex_protocol::models::WebSearchAction as CoreWebSearchAction;
|
||||
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
@@ -8376,7 +8066,6 @@ mod tests {
|
||||
use serde_json::json;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
fn absolute_path_string(path: &str) -> String {
|
||||
let path = format!("/{}", path.trim_start_matches('/'));
|
||||
@@ -9357,97 +9046,6 @@ mod tests {
|
||||
assert_eq!(decoded, params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_spawn_params_round_trips_without_sandbox_policy() {
|
||||
let params = ProcessSpawnParams {
|
||||
command: vec!["sleep".to_string(), "30".to_string()],
|
||||
process_handle: "sleep-1".to_string(),
|
||||
cwd: test_absolute_path(),
|
||||
tty: false,
|
||||
stream_stdin: false,
|
||||
stream_stdout_stderr: false,
|
||||
output_bytes_cap: None,
|
||||
timeout_ms: None,
|
||||
env: None,
|
||||
size: None,
|
||||
};
|
||||
|
||||
let value = serde_json::to_value(¶ms).expect("serialize process/spawn params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
"env": null,
|
||||
"size": null,
|
||||
})
|
||||
);
|
||||
|
||||
let decoded =
|
||||
serde_json::from_value::<ProcessSpawnParams>(value).expect("deserialize round-trip");
|
||||
assert_eq!(decoded, params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_spawn_params_distinguish_omitted_null_and_value_limits() {
|
||||
let base = json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
});
|
||||
|
||||
let expected_omitted = ProcessSpawnParams {
|
||||
command: vec!["sleep".to_string(), "30".to_string()],
|
||||
process_handle: "sleep-1".to_string(),
|
||||
cwd: test_absolute_path(),
|
||||
tty: false,
|
||||
stream_stdin: false,
|
||||
stream_stdout_stderr: false,
|
||||
output_bytes_cap: None,
|
||||
timeout_ms: None,
|
||||
env: None,
|
||||
size: None,
|
||||
};
|
||||
let decoded =
|
||||
serde_json::from_value::<ProcessSpawnParams>(base).expect("deserialize omitted limits");
|
||||
assert_eq!(decoded, expected_omitted);
|
||||
|
||||
let decoded = serde_json::from_value::<ProcessSpawnParams>(json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
"outputBytesCap": null,
|
||||
"timeoutMs": null,
|
||||
}))
|
||||
.expect("deserialize disabled limits");
|
||||
assert_eq!(
|
||||
decoded,
|
||||
ProcessSpawnParams {
|
||||
output_bytes_cap: Some(None),
|
||||
timeout_ms: Some(None),
|
||||
..expected_omitted.clone()
|
||||
}
|
||||
);
|
||||
|
||||
let decoded = serde_json::from_value::<ProcessSpawnParams>(json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
"outputBytesCap": 123,
|
||||
"timeoutMs": 456,
|
||||
}))
|
||||
.expect("deserialize explicit limits");
|
||||
assert_eq!(
|
||||
decoded,
|
||||
ProcessSpawnParams {
|
||||
output_bytes_cap: Some(Some(123)),
|
||||
timeout_ms: Some(Some(456)),
|
||||
..expected_omitted
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_exec_params_round_trips_disable_output_cap() {
|
||||
let params = CommandExecParams {
|
||||
@@ -9680,110 +9278,6 @@ mod tests {
|
||||
assert_eq!(decoded, notification);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_control_params_round_trip() {
|
||||
let write = ProcessWriteStdinParams {
|
||||
process_handle: "proc-7".to_string(),
|
||||
delta_base64: None,
|
||||
close_stdin: true,
|
||||
};
|
||||
let value = serde_json::to_value(&write).expect("serialize process/writeStdin params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-7",
|
||||
"deltaBase64": null,
|
||||
"closeStdin": true,
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessWriteStdinParams>(value)
|
||||
.expect("deserialize process/writeStdin params");
|
||||
assert_eq!(decoded, write);
|
||||
|
||||
let resize = ProcessResizePtyParams {
|
||||
process_handle: "proc-7".to_string(),
|
||||
size: ProcessTerminalSize {
|
||||
rows: 50,
|
||||
cols: 160,
|
||||
},
|
||||
};
|
||||
let value = serde_json::to_value(&resize).expect("serialize process/resizePty params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-7",
|
||||
"size": {
|
||||
"rows": 50,
|
||||
"cols": 160,
|
||||
},
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessResizePtyParams>(value)
|
||||
.expect("deserialize process/resizePty params");
|
||||
assert_eq!(decoded, resize);
|
||||
|
||||
let kill = ProcessKillParams {
|
||||
process_handle: "proc-7".to_string(),
|
||||
};
|
||||
let value = serde_json::to_value(&kill).expect("serialize process/kill params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-7",
|
||||
})
|
||||
);
|
||||
let decoded =
|
||||
serde_json::from_value::<ProcessKillParams>(value).expect("deserialize process/kill");
|
||||
assert_eq!(decoded, kill);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_notifications_round_trip() {
|
||||
let delta = ProcessOutputDeltaNotification {
|
||||
process_handle: "proc-1".to_string(),
|
||||
stream: ProcessOutputStream::Stdout,
|
||||
delta_base64: "AQI=".to_string(),
|
||||
cap_reached: false,
|
||||
};
|
||||
let value = serde_json::to_value(&delta).expect("serialize process/outputDelta");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-1",
|
||||
"stream": "stdout",
|
||||
"deltaBase64": "AQI=",
|
||||
"capReached": false,
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessOutputDeltaNotification>(value)
|
||||
.expect("deserialize process/outputDelta");
|
||||
assert_eq!(decoded, delta);
|
||||
|
||||
let exited = ProcessExitedNotification {
|
||||
process_handle: "proc-1".to_string(),
|
||||
exit_code: 0,
|
||||
stdout: "out".to_string(),
|
||||
stdout_cap_reached: false,
|
||||
stderr: "err".to_string(),
|
||||
stderr_cap_reached: true,
|
||||
};
|
||||
let value = serde_json::to_value(&exited).expect("serialize process/exited");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-1",
|
||||
"exitCode": 0,
|
||||
"stdout": "out",
|
||||
"stdoutCapReached": false,
|
||||
"stderr": "err",
|
||||
"stderrCapReached": true,
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessExitedNotification>(value)
|
||||
.expect("deserialize process/exited");
|
||||
assert_eq!(decoded, exited);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_execution_output_delta_round_trips() {
|
||||
let notification = CommandExecutionOutputDeltaNotification {
|
||||
@@ -10839,111 +10333,6 @@ mod tests {
|
||||
}),
|
||||
}
|
||||
);
|
||||
|
||||
let image_view_item = TurnItem::ImageView(ImageViewItem {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(image_view_item),
|
||||
ThreadItem::ImageView {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
}
|
||||
);
|
||||
|
||||
let file_change_item = TurnItem::FileChange(FileChangeItem {
|
||||
id: "patch-1".to_string(),
|
||||
changes: [(
|
||||
PathBuf::from("README.md"),
|
||||
codex_protocol::protocol::FileChange::Add {
|
||||
content: "hello\n".to_string(),
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
status: Some(codex_protocol::protocol::PatchApplyStatus::Completed),
|
||||
auto_approved: None,
|
||||
stdout: Some("Done!".to_string()),
|
||||
stderr: Some(String::new()),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(file_change_item),
|
||||
ThreadItem::FileChange {
|
||||
id: "patch-1".to_string(),
|
||||
changes: vec![FileUpdateChange {
|
||||
path: "README.md".to_string(),
|
||||
kind: PatchChangeKind::Add,
|
||||
diff: "hello\n".to_string(),
|
||||
}],
|
||||
status: PatchApplyStatus::Completed,
|
||||
}
|
||||
);
|
||||
|
||||
let mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
status: CoreMcpToolCallStatus::InProgress,
|
||||
result: None,
|
||||
error: None,
|
||||
duration: None,
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
}
|
||||
);
|
||||
|
||||
let completed_mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
status: CoreMcpToolCallStatus::Completed,
|
||||
result: Some(CallToolResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
is_error: Some(false),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
}),
|
||||
error: None,
|
||||
duration: Some(Duration::from_millis(42)),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(completed_mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::Completed,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: Some(Box::new(McpToolCallResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
})),
|
||||
error: None,
|
||||
duration_ms: Some(42),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -11278,23 +10667,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_skill_read_params_serialization_uses_remote_plugin_id() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginSkillReadParams {
|
||||
remote_marketplace_name: "chatgpt-global".to_string(),
|
||||
remote_plugin_id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
skill_name: "plan-work".to_string(),
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"remoteMarketplaceName": "chatgpt-global",
|
||||
"remotePluginId": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"skillName": "plan-work",
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_params_and_response_serialization_use_camel_case_fields() {
|
||||
let plugin_path = if cfg!(windows) {
|
||||
@@ -11360,41 +10732,33 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_list_response_serializes_share_items() {
|
||||
fn plugin_share_list_response_serializes_plugin_summaries() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginShareListResponse {
|
||||
data: vec![PluginShareListItem {
|
||||
plugin: PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
availability: PluginAvailability::Available,
|
||||
interface: None,
|
||||
},
|
||||
share_url: "https://chatgpt.example/plugins/share/share-key-1".to_string(),
|
||||
local_plugin_path: None,
|
||||
data: vec![PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
availability: PluginAvailability::Available,
|
||||
interface: None,
|
||||
}],
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"data": [{
|
||||
"plugin": {
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"availability": "AVAILABLE",
|
||||
"interface": null,
|
||||
},
|
||||
"shareUrl": "https://chatgpt.example/plugins/share/share-key-1",
|
||||
"localPluginPath": null,
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"availability": "AVAILABLE",
|
||||
"interface": null,
|
||||
}],
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "app-server-transport",
|
||||
crate_name = "codex_app_server_transport",
|
||||
)
|
||||
@@ -1,58 +0,0 @@
|
||||
[package]
|
||||
name = "codex-app-server-transport"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_app_server_transport"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
axum = { workspace = true, default-features = false, features = [
|
||||
"http1",
|
||||
"json",
|
||||
"tokio",
|
||||
"ws",
|
||||
] }
|
||||
base64 = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-api = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-model-provider = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-rustls-provider = { workspace = true }
|
||||
constant_time_eq = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
time = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
url = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = { workspace = true }
|
||||
codex-config = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
@@ -1,20 +0,0 @@
|
||||
mod outgoing_message;
|
||||
mod transport;
|
||||
|
||||
pub use outgoing_message::ConnectionId;
|
||||
pub use outgoing_message::OutgoingError;
|
||||
pub use outgoing_message::OutgoingMessage;
|
||||
pub use outgoing_message::OutgoingResponse;
|
||||
pub use outgoing_message::QueuedOutgoingMessage;
|
||||
pub use transport::AppServerTransport;
|
||||
pub use transport::AppServerTransportParseError;
|
||||
pub use transport::CHANNEL_CAPACITY;
|
||||
pub use transport::ConnectionOrigin;
|
||||
pub use transport::RemoteControlHandle;
|
||||
pub use transport::TransportEvent;
|
||||
pub use transport::app_server_control_socket_path;
|
||||
pub use transport::auth;
|
||||
pub use transport::start_control_socket_acceptor;
|
||||
pub use transport::start_remote_control;
|
||||
pub use transport::start_stdio_connection;
|
||||
pub use transport::start_websocket_acceptor;
|
||||
@@ -1,58 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::Result;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub struct ConnectionId(pub u64);
|
||||
|
||||
impl fmt::Display for ConnectionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing message from the server to the client.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum OutgoingMessage {
|
||||
Request(ServerRequest),
|
||||
/// AppServerNotification is specific to the case where this is run as an
|
||||
/// "app server" as opposed to an MCP server.
|
||||
AppServerNotification(ServerNotification),
|
||||
Response(OutgoingResponse),
|
||||
Error(OutgoingError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingResponse {
|
||||
pub id: RequestId,
|
||||
pub result: Result,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingError {
|
||||
pub error: JSONRPCErrorError,
|
||||
pub id: RequestId,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct QueuedOutgoingMessage {
|
||||
pub message: OutgoingMessage,
|
||||
pub write_complete_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl QueuedOutgoingMessage {
|
||||
pub fn new(message: OutgoingMessage) -> Self {
|
||||
Self {
|
||||
message,
|
||||
write_complete_tx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,478 +0,0 @@
|
||||
pub mod auth;
|
||||
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::OutgoingError;
|
||||
use crate::outgoing_message::OutgoingMessage;
|
||||
use crate::outgoing_message::QueuedOutgoingMessage;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::error;
|
||||
use tracing::warn;
|
||||
|
||||
/// Size of the bounded channels used to communicate between tasks. The value
|
||||
/// is a balance between throughput and memory usage - 128 messages should be
|
||||
/// plenty for an interactive CLI.
|
||||
pub const CHANNEL_CAPACITY: usize = 128;
|
||||
|
||||
mod remote_control;
|
||||
mod stdio;
|
||||
mod unix_socket;
|
||||
#[cfg(test)]
|
||||
mod unix_socket_tests;
|
||||
mod websocket;
|
||||
|
||||
pub use remote_control::RemoteControlHandle;
|
||||
pub use remote_control::start_remote_control;
|
||||
pub use stdio::start_stdio_connection;
|
||||
pub use unix_socket::start_control_socket_acceptor;
|
||||
pub use websocket::start_websocket_acceptor;
|
||||
|
||||
const OVERLOADED_ERROR_CODE: i64 = -32001;
|
||||
|
||||
const APP_SERVER_CONTROL_SOCKET_DIR_NAME: &str = "app-server-control";
|
||||
const APP_SERVER_CONTROL_SOCKET_FILE_NAME: &str = "app-server-control.sock";
|
||||
|
||||
pub fn app_server_control_socket_path(codex_home: &Path) -> std::io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(
|
||||
codex_home
|
||||
.join(APP_SERVER_CONTROL_SOCKET_DIR_NAME)
|
||||
.join(APP_SERVER_CONTROL_SOCKET_FILE_NAME),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum AppServerTransport {
|
||||
Stdio,
|
||||
UnixSocket { socket_path: AbsolutePathBuf },
|
||||
WebSocket { bind_address: SocketAddr },
|
||||
Off,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum AppServerTransportParseError {
|
||||
UnsupportedListenUrl(String),
|
||||
InvalidUnixSocketPath { listen_url: String, message: String },
|
||||
InvalidWebSocketListenUrl(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AppServerTransportParseError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AppServerTransportParseError::UnsupportedListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"unsupported --listen URL `{listen_url}`; expected `stdio://`, `unix://`, `unix://PATH`, `ws://IP:PORT`, or `off`"
|
||||
),
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url,
|
||||
message,
|
||||
} => write!(
|
||||
f,
|
||||
"invalid unix socket --listen URL `{listen_url}`; failed to resolve socket path: {message}"
|
||||
),
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"invalid websocket --listen URL `{listen_url}`; expected `ws://IP:PORT`"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for AppServerTransportParseError {}
|
||||
|
||||
impl AppServerTransport {
|
||||
pub const DEFAULT_LISTEN_URL: &'static str = "stdio://";
|
||||
|
||||
pub fn from_listen_url(listen_url: &str) -> Result<Self, AppServerTransportParseError> {
|
||||
if listen_url == Self::DEFAULT_LISTEN_URL {
|
||||
return Ok(Self::Stdio);
|
||||
}
|
||||
|
||||
if let Some(raw_socket_path) = listen_url.strip_prefix("unix://") {
|
||||
let socket_path = if raw_socket_path.is_empty() {
|
||||
let codex_home = find_codex_home().map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: format!("failed to resolve CODEX_HOME: {err}"),
|
||||
}
|
||||
})?;
|
||||
app_server_control_socket_path(&codex_home).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
} else {
|
||||
AbsolutePathBuf::relative_to_current_dir(raw_socket_path).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
};
|
||||
return Ok(Self::UnixSocket { socket_path });
|
||||
}
|
||||
|
||||
if listen_url == "off" {
|
||||
return Ok(Self::Off);
|
||||
}
|
||||
|
||||
if let Some(socket_addr) = listen_url.strip_prefix("ws://") {
|
||||
let bind_address = socket_addr.parse::<SocketAddr>().map_err(|_| {
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url.to_string())
|
||||
})?;
|
||||
return Ok(Self::WebSocket { bind_address });
|
||||
}
|
||||
|
||||
Err(AppServerTransportParseError::UnsupportedListenUrl(
|
||||
listen_url.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AppServerTransport {
|
||||
type Err = AppServerTransportParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::from_listen_url(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TransportEvent {
|
||||
ConnectionOpened {
|
||||
connection_id: ConnectionId,
|
||||
origin: ConnectionOrigin,
|
||||
writer: mpsc::Sender<QueuedOutgoingMessage>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
},
|
||||
ConnectionClosed {
|
||||
connection_id: ConnectionId,
|
||||
},
|
||||
IncomingMessage {
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConnectionOrigin {
|
||||
Stdio,
|
||||
InProcess,
|
||||
WebSocket,
|
||||
RemoteControl,
|
||||
}
|
||||
|
||||
impl ConnectionOrigin {
|
||||
pub fn allows_device_key_requests(self) -> bool {
|
||||
// Device-key endpoints are only for local connections that own the app-server instance.
|
||||
// Do not include remote transports such as SSH or remote-control websocket connections.
|
||||
matches!(self, Self::Stdio | Self::InProcess)
|
||||
}
|
||||
}
|
||||
|
||||
static CONNECTION_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
fn next_connection_id() -> ConnectionId {
|
||||
ConnectionId(CONNECTION_ID_COUNTER.fetch_add(1, Ordering::Relaxed))
|
||||
}
|
||||
|
||||
async fn forward_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
payload: &str,
|
||||
) -> bool {
|
||||
match serde_json::from_str::<JSONRPCMessage>(payload) {
|
||||
Ok(message) => {
|
||||
enqueue_incoming_message(transport_event_tx, writer, connection_id, message).await
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Failed to deserialize JSONRPCMessage: {err}");
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn enqueue_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
) -> bool {
|
||||
let event = TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message,
|
||||
};
|
||||
match transport_event_tx.try_send(event) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Request(request),
|
||||
})) => {
|
||||
let overload_error = OutgoingMessage::Error(OutgoingError {
|
||||
id: request.id,
|
||||
error: JSONRPCErrorError {
|
||||
code: OVERLOADED_ERROR_CODE,
|
||||
message: "Server overloaded; retry later.".to_string(),
|
||||
data: None,
|
||||
},
|
||||
});
|
||||
match writer.try_send(QueuedOutgoingMessage::new(overload_error)) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(_overload_error)) => {
|
||||
warn!(
|
||||
"dropping overload response for connection {:?}: outbound queue is full",
|
||||
connection_id
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Full(event)) => transport_event_tx.send(event).await.is_ok(),
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_outgoing_message(outgoing_message: OutgoingMessage) -> Option<String> {
|
||||
let value = match serde_json::to_value(outgoing_message) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("Failed to convert OutgoingMessage to JSON value: {err}");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
match serde_json::to_string(&value) {
|
||||
Ok(json) => Some(json),
|
||||
Err(err) => {
|
||||
error!("Failed to serialize JSONRPCMessage: {err}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[test]
|
||||
fn listen_off_parses_as_off_transport() {
|
||||
assert_eq!(
|
||||
AppServerTransport::from_listen_url("off"),
|
||||
Ok(AppServerTransport::Off)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_returns_overload_error_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
assert!(
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request).await
|
||||
);
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should stay queued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let overload = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should receive overload error");
|
||||
let overload_json =
|
||||
serde_json::to_value(overload.message).expect("serialize overload error");
|
||||
assert_eq!(
|
||||
overload_json,
|
||||
json!({
|
||||
"id": 7,
|
||||
"error": {
|
||||
"code": OVERLOADED_ERROR_CODE,
|
||||
"message": "Server overloaded; retry later."
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_response_waits_instead_of_dropping_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, _writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let response = JSONRPCMessage::Response(JSONRPCResponse {
|
||||
id: RequestId::Integer(7),
|
||||
result: json!({"ok": true}),
|
||||
});
|
||||
let transport_event_tx_for_enqueue = transport_event_tx.clone();
|
||||
let writer_tx_for_enqueue = writer_tx.clone();
|
||||
let enqueue_handle = tokio::spawn(async move {
|
||||
enqueue_incoming_message(
|
||||
&transport_event_tx_for_enqueue,
|
||||
&writer_tx_for_enqueue,
|
||||
connection_id,
|
||||
response,
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should be dequeued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let enqueue_result = enqueue_handle.await.expect("enqueue task should not panic");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let forwarded_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("response should be forwarded instead of dropped");
|
||||
match forwarded_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message: JSONRPCMessage::Response(JSONRPCResponse { id, result }),
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(id, RequestId::Integer(7));
|
||||
assert_eq!(result, json!({"ok": true}));
|
||||
}
|
||||
_ => panic!("expected forwarded response message"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_does_not_block_when_writer_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, _transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
}),
|
||||
})
|
||||
.await
|
||||
.expect("transport queue should accept first message");
|
||||
|
||||
writer_tx
|
||||
.send(QueuedOutgoingMessage::new(
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "queued".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
))
|
||||
.await
|
||||
.expect("writer queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
|
||||
let enqueue_result = timeout(
|
||||
Duration::from_millis(100),
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request),
|
||||
)
|
||||
.await
|
||||
.expect("enqueue should not block while writer queue is full");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let queued_outgoing = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("writer queue should still contain original message");
|
||||
let queued_json =
|
||||
serde_json::to_value(queued_outgoing.message).expect("serialize queued message");
|
||||
assert_eq!(
|
||||
queued_json,
|
||||
json!({
|
||||
"method": "configWarning",
|
||||
"params": {
|
||||
"summary": "queued",
|
||||
"details": null,
|
||||
},
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,7 @@ axum = { workspace = true, default-features = false, features = [
|
||||
"ws",
|
||||
] }
|
||||
codex-analytics = { workspace = true }
|
||||
codex-api = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-cloud-requirements = { workspace = true }
|
||||
codex-config = { workspace = true }
|
||||
@@ -57,7 +58,6 @@ codex-model-provider = { workspace = true }
|
||||
codex-models-manager = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-app-server-transport = { workspace = true }
|
||||
codex-feedback = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-rollout = { workspace = true }
|
||||
@@ -65,11 +65,18 @@ codex-sandboxing = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-thread-store = { workspace = true }
|
||||
codex-tools = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-json-to-toml = { workspace = true }
|
||||
codex-utils-rustls-provider = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
constant_time_eq = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
@@ -86,6 +93,7 @@ tokio = { workspace = true, features = [
|
||||
"signal",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] }
|
||||
url = { workspace = true }
|
||||
@@ -103,7 +111,6 @@ core_test_support = { workspace = true }
|
||||
codex-model-provider-info = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
flate2 = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
opentelemetry = { workspace = true }
|
||||
opentelemetry_sdk = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -181,12 +181,6 @@ Example with notification opt-out:
|
||||
- `command/exec/resize` — resize a running PTY-backed `command/exec` session by `processId`; returns `{}`.
|
||||
- `command/exec/terminate` — terminate a running `command/exec` session by `processId`; returns `{}`.
|
||||
- `command/exec/outputDelta` — notification emitted for base64-encoded stdout/stderr chunks from a streaming `command/exec` session.
|
||||
- `process/spawn` — experimental; spawn a standalone process without the Codex sandbox on the host where the app server is running; returns after the process starts and emits `process/outputDelta` and `process/exited` notifications.
|
||||
- `process/writeStdin` — experimental; write base64-decoded stdin bytes to a running `process/spawn` session or close stdin; returns `{}`.
|
||||
- `process/resizePty` — experimental; resize a running PTY-backed `process/spawn` session by `processHandle`; returns `{}`.
|
||||
- `process/kill` — experimental; terminate a running `process/spawn` session by `processHandle`; returns `{}`.
|
||||
- `process/outputDelta` — experimental; notification emitted for base64-encoded stdout/stderr chunks from a streaming `process/spawn` session.
|
||||
- `process/exited` — experimental; notification emitted when a `process/spawn` session exits.
|
||||
- `fs/readFile` — read an absolute file path and return `{ dataBase64 }`.
|
||||
- `fs/writeFile` — write an absolute file path from base64-encoded `{ dataBase64 }`; returns `{}`.
|
||||
- `fs/createDirectory` — create an absolute directory path; `recursive` defaults to `true`.
|
||||
@@ -209,7 +203,6 @@ Example with notification opt-out:
|
||||
- `marketplace/upgrade` — upgrade all configured Git plugin marketplaces, or one named marketplace when `marketplaceName` is provided. Returns selected marketplace names, upgraded roots, and per-marketplace errors.
|
||||
- `plugin/list` — list discovered plugin marketplaces and plugin state, including effective marketplace install/auth policy metadata, plugin `availability` (`AVAILABLE` by default or `DISABLED_BY_ADMIN` for remote plugins blocked upstream), fail-open `marketplaceLoadErrors` entries for marketplace files that could not be parsed or loaded, and best-effort `featuredPluginIds` for the official curated marketplace. `interface.category` uses the marketplace category when present; otherwise it falls back to the plugin manifest category (**under development; do not call from production clients yet**).
|
||||
- `plugin/read` — read one plugin by `marketplacePath` plus `pluginName`, returning marketplace info, a list-style `summary`, manifest descriptions/interface metadata, and bundled skills/apps/MCP server names. Returned plugin skills include their current `enabled` state after local config filtering. Plugin app summaries also include `needsAuth` when the server can determine connector accessibility (**under development; do not call from production clients yet**).
|
||||
- `plugin/skill/read` — read remote plugin skill markdown on demand by `remoteMarketplaceName`, `remotePluginId`, and `skillName`. This lets clients preview uninstalled remote plugin skills without downloading the plugin bundle.
|
||||
- `skills/changed` — notification emitted when watched local skill files change.
|
||||
- `app/list` — list available apps.
|
||||
- `device/key/create` — create or load a controller-local device signing key for an account/client binding. This local-key API is available only over local transports such as stdio and in-process; remote transports reject it. Hardware-backed providers are the target protection class; an OS-protected non-extractable fallback is allowed only with `protectionPolicy: "allow_os_protected_nonextractable"` and returns the reported `protectionClass`.
|
||||
@@ -313,6 +306,8 @@ To branch from a stored session, call `thread/fork` with the `thread.id`. This c
|
||||
|
||||
Like `thread/resume`, experimental clients can pass `excludeTurns: true` to `thread/fork` to return only thread metadata in `thread.turns` and page history with `thread/turns/list`. In that mode the server skips replaying restored `thread/tokenUsage/updated`, which keeps the fork path from rebuilding turns just to attribute historical usage.
|
||||
|
||||
Experimental API: `thread/start`, `thread/resume`, and `thread/fork` accept `persistExtendedHistory: true` to persist a richer subset of ThreadItems for non-lossy history when calling `thread/read`, `thread/resume`, and `thread/fork` later. This does not backfill events that were not persisted previously.
|
||||
|
||||
### Example: List threads (with pagination & filters)
|
||||
|
||||
`thread/list` lets you render a history UI. Results default to `createdAt` (newest first) descending. Pass any combination of:
|
||||
@@ -407,7 +402,7 @@ Later, after the idle unload timeout:
|
||||
|
||||
### Example: Read a thread
|
||||
|
||||
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want thread history loaded into `thread.turns`. The returned thread includes `agentNickname` and `agentRole` for AgentControl-spawned thread sub-agents when available.
|
||||
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want the full rollout history loaded into `thread.turns`. The returned thread includes `agentNickname` and `agentRole` for AgentControl-spawned thread sub-agents when available.
|
||||
|
||||
```json
|
||||
{ "method": "thread/read", "id": 22, "params": { "threadId": "thr_123" } }
|
||||
@@ -936,7 +931,6 @@ Run a standalone command (argv vector) in the server’s sandbox without creatin
|
||||
} }
|
||||
```
|
||||
|
||||
- Prefer using `process/spawn` when you want an explicitly unsandboxed process execution API with immediate spawn acknowledgement, handle-based control, output notifications, and an exit notification.
|
||||
- For clients that are already sandboxed externally, set the legacy `sandboxPolicy` to `{"type":"externalSandbox","networkAccess":"enabled"}` (or omit `networkAccess` to keep it restricted). Codex will not enforce its own sandbox in this mode; it tells the model it has full file-system access and passes the `networkAccess` state through `environment_context`.
|
||||
|
||||
Notes:
|
||||
@@ -1008,83 +1002,6 @@ Streaming stdin/stdout uses base64 so PTY sessions can carry arbitrary bytes:
|
||||
- `command/exec.params.env` overrides the server-computed environment per key; set a key to `null` to unset an inherited variable.
|
||||
- `command/exec/resize` is only supported for PTY-backed `command/exec` sessions.
|
||||
|
||||
### Example: Process lifecycle execution
|
||||
|
||||
Use `process/spawn` to start a standalone argv-based process without the Codex sandbox on the host where the app server is running. The `process/*` API is experimental and requires `initialize.params.capabilities.experimentalApi: true`. The spawn response means the process has started and the `processHandle` is registered; completion is reported later through `process/exited`.
|
||||
|
||||
```json
|
||||
{ "method": "process/spawn", "id": 40, "params": {
|
||||
"command": ["cargo", "check"],
|
||||
"processHandle": "cargo-check-1",
|
||||
"cwd": "/Users/me/project", // required absolute path
|
||||
"env": { "RUST_LOG": null }, // optional; override or unset app-server env vars
|
||||
"outputBytesCap": 1048576, // optional; omit for default, null disables
|
||||
"timeoutMs": 10000 // optional; omit for default, null disables
|
||||
} }
|
||||
{ "id": 40, "result": {} }
|
||||
{ "method": "process/exited", "params": {
|
||||
"processHandle": "cargo-check-1",
|
||||
"exitCode": 0,
|
||||
"stdout": "...",
|
||||
"stdoutCapReached": false,
|
||||
"stderr": "",
|
||||
"stderrCapReached": false
|
||||
} }
|
||||
```
|
||||
|
||||
For interactive or streaming processes, set `tty: true` or `streamStdoutStderr: true` and route output notifications by `processHandle`:
|
||||
|
||||
```json
|
||||
{ "method": "process/spawn", "id": 41, "params": {
|
||||
"command": ["bash", "-i"],
|
||||
"processHandle": "bash-1",
|
||||
"cwd": "/Users/me/project",
|
||||
"tty": true,
|
||||
"size": { "rows": 40, "cols": 120 },
|
||||
"outputBytesCap": null,
|
||||
"timeoutMs": null
|
||||
} }
|
||||
{ "id": 41, "result": {} }
|
||||
{ "method": "process/outputDelta", "params": {
|
||||
"processHandle": "bash-1",
|
||||
"stream": "stdout",
|
||||
"deltaBase64": "YmFzaC00LjQkIA==",
|
||||
"capReached": false
|
||||
} }
|
||||
{ "method": "process/writeStdin", "id": 42, "params": {
|
||||
"processHandle": "bash-1",
|
||||
"deltaBase64": "cHdkCg=="
|
||||
} }
|
||||
{ "id": 42, "result": {} }
|
||||
{ "method": "process/resizePty", "id": 43, "params": {
|
||||
"processHandle": "bash-1",
|
||||
"size": { "rows": 48, "cols": 160 }
|
||||
} }
|
||||
{ "id": 43, "result": {} }
|
||||
{ "method": "process/kill", "id": 44, "params": {
|
||||
"processHandle": "bash-1"
|
||||
} }
|
||||
{ "id": 44, "result": {} }
|
||||
{ "method": "process/exited", "params": {
|
||||
"processHandle": "bash-1",
|
||||
"exitCode": 137,
|
||||
"stdout": "",
|
||||
"stdoutCapReached": false,
|
||||
"stderr": "",
|
||||
"stderrCapReached": false
|
||||
} }
|
||||
```
|
||||
|
||||
- Empty `command` arrays and empty `processHandle` strings are rejected.
|
||||
- `cwd` is required and must be absolute.
|
||||
- `process/spawn` is intentionally unsandboxed and does not define sandbox-selection fields such as `sandboxPolicy` or `permissionProfile`.
|
||||
- Duplicate active `processHandle` values are rejected on the same connection; the same handle can be reused after the prior process exits.
|
||||
- `tty: true` implies PTY mode plus `streamStdin: true` and `streamStdoutStderr: true`.
|
||||
- `process/writeStdin` accepts either `deltaBase64`, `closeStdin`, or both.
|
||||
- When omitted, `timeoutMs` and `outputBytesCap` fall back to server defaults. Set either field to `null` to disable that limit for terminal-style sessions.
|
||||
- `outputBytesCap` applies independently to `stdout` and `stderr`; `process/exited.stdoutCapReached` and `stderrCapReached` report whether each stream reached the cap. Streamed bytes are not duplicated into `process/exited`.
|
||||
- `process/outputDelta` and `process/exited` notifications are connection-scoped. If the originating connection closes, the server terminates the process.
|
||||
|
||||
### Example: Filesystem utilities
|
||||
|
||||
These methods operate on absolute paths on the host filesystem and cover reading, writing, directory traversal, copying, removal, and change notifications.
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use crate::codex_message_processor::read_rollout_items_from_rollout;
|
||||
use crate::codex_message_processor::read_summary_from_rollout;
|
||||
use crate::codex_message_processor::summary_to_thread;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::outgoing_message::ClientRequestResult;
|
||||
use crate::outgoing_message::ThreadScopedOutgoingMessageSender;
|
||||
use crate::request_processors::populate_thread_turns_from_history;
|
||||
use crate::request_processors::thread_from_stored_thread;
|
||||
use crate::server_request_error::is_turn_transition_server_request_error;
|
||||
use crate::thread_state::ThreadState;
|
||||
use crate::thread_state::TurnSummary;
|
||||
@@ -28,6 +29,7 @@ use codex_app_server_protocol::ExecPolicyAmendment as V2ExecPolicyAmendment;
|
||||
use codex_app_server_protocol::FileChangeApprovalDecision;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
use codex_app_server_protocol::FileUpdateChange;
|
||||
use codex_app_server_protocol::GrantedPermissionProfile as V2GrantedPermissionProfile;
|
||||
use codex_app_server_protocol::GuardianWarningNotification;
|
||||
use codex_app_server_protocol::HookCompletedNotification;
|
||||
@@ -44,6 +46,7 @@ use codex_app_server_protocol::ModelVerificationNotification;
|
||||
use codex_app_server_protocol::NetworkApprovalContext as V2NetworkApprovalContext;
|
||||
use codex_app_server_protocol::NetworkPolicyAmendment as V2NetworkPolicyAmendment;
|
||||
use codex_app_server_protocol::NetworkPolicyRuleAction as V2NetworkPolicyRuleAction;
|
||||
use codex_app_server_protocol::PatchApplyStatus;
|
||||
use codex_app_server_protocol::PermissionsRequestApprovalParams;
|
||||
use codex_app_server_protocol::PermissionsRequestApprovalResponse;
|
||||
use codex_app_server_protocol::RawResponseItemCompletedNotification;
|
||||
@@ -63,7 +66,6 @@ use codex_app_server_protocol::ThreadRealtimeStartedNotification;
|
||||
use codex_app_server_protocol::ThreadRealtimeTranscriptDeltaNotification;
|
||||
use codex_app_server_protocol::ThreadRealtimeTranscriptDoneNotification;
|
||||
use codex_app_server_protocol::ThreadRollbackResponse;
|
||||
use codex_app_server_protocol::ThreadStatus;
|
||||
use codex_app_server_protocol::ThreadTokenUsage;
|
||||
use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification;
|
||||
use codex_app_server_protocol::ToolRequestUserInputOption;
|
||||
@@ -80,11 +82,16 @@ use codex_app_server_protocol::TurnPlanUpdatedNotification;
|
||||
use codex_app_server_protocol::TurnStartedNotification;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::WarningNotification;
|
||||
use codex_app_server_protocol::build_file_change_approval_request_item;
|
||||
use codex_app_server_protocol::build_file_change_end_item;
|
||||
use codex_app_server_protocol::build_item_from_guardian_event;
|
||||
use codex_app_server_protocol::build_turns_from_rollout_items;
|
||||
use codex_app_server_protocol::convert_patch_changes;
|
||||
use codex_app_server_protocol::guardian_auto_approval_review_notification;
|
||||
use codex_app_server_protocol::item_event_to_server_notification;
|
||||
use codex_core::CodexThread;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::find_thread_name_by_id;
|
||||
use codex_core::review_format::format_review_findings_block;
|
||||
use codex_core::review_prompts;
|
||||
use codex_protocol::ThreadId;
|
||||
@@ -112,12 +119,12 @@ use codex_sandboxing::policy_transforms::intersect_permission_profiles;
|
||||
use codex_shell_command::parse_command::shlex_join;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use std::time::UNIX_EPOCH;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::oneshot;
|
||||
use tracing::error;
|
||||
use tracing::warn;
|
||||
|
||||
enum CommandExecutionApprovalPresentation {
|
||||
Network(V2NetworkApprovalContext),
|
||||
@@ -143,6 +150,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
thread_watch_manager: ThreadWatchManager,
|
||||
thread_list_state_permit: Arc<tokio::sync::Semaphore>,
|
||||
fallback_model_provider: String,
|
||||
codex_home: &Path,
|
||||
) {
|
||||
let Event {
|
||||
id: event_turn_id,
|
||||
@@ -516,7 +524,28 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let permission_guard = thread_watch_manager
|
||||
.note_permission_requested(&conversation_id.to_string())
|
||||
.await;
|
||||
// Until we migrate the core to be aware of a first class FileChangeItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
let item_id = event.call_id.clone();
|
||||
let patch_changes = convert_patch_changes(&event.changes);
|
||||
let first_start = {
|
||||
let mut state = thread_state.lock().await;
|
||||
state
|
||||
.turn_summary
|
||||
.file_change_started
|
||||
.insert(item_id.clone())
|
||||
};
|
||||
if first_start {
|
||||
let item = build_file_change_approval_request_item(&event);
|
||||
let notification = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemStarted(notification))
|
||||
.await;
|
||||
}
|
||||
|
||||
let params = FileChangeRequestApprovalParams {
|
||||
thread_id: conversation_id.to_string(),
|
||||
@@ -530,10 +559,14 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.await;
|
||||
tokio::spawn(async move {
|
||||
on_file_change_request_approval_response(
|
||||
event_turn_id,
|
||||
conversation_id,
|
||||
item_id,
|
||||
patch_changes,
|
||||
pending_request_id,
|
||||
rx,
|
||||
conversation,
|
||||
outgoing,
|
||||
thread_state.clone(),
|
||||
permission_guard,
|
||||
)
|
||||
@@ -813,7 +846,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let notification = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: turn_id.clone(),
|
||||
started_at_ms: request.started_at_ms,
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
@@ -834,11 +866,9 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
crate::dynamic_tools::on_call_response(call_id, rx, conversation).await;
|
||||
});
|
||||
}
|
||||
EventMsg::McpToolCallBegin(_) | EventMsg::McpToolCallEnd(_) => {
|
||||
// Deprecated MCP tool-call events are still fanned out for legacy clients.
|
||||
// App-server v2 receives the canonical TurnItem::McpToolCall lifecycle instead.
|
||||
}
|
||||
msg @ (EventMsg::DynamicToolCallResponse(_)
|
||||
| EventMsg::McpToolCallBegin(_)
|
||||
| EventMsg::McpToolCallEnd(_)
|
||||
| EventMsg::CollabAgentSpawnBegin(_)
|
||||
| EventMsg::CollabAgentSpawnEnd(_)
|
||||
| EventMsg::CollabAgentInteractionBegin(_)
|
||||
@@ -954,7 +984,28 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
EventMsg::ViewImageToolCall(_) => {}
|
||||
EventMsg::ViewImageToolCall(view_image_event) => {
|
||||
let item = ThreadItem::ImageView {
|
||||
id: view_image_event.call_id.clone(),
|
||||
path: view_image_event.path.clone(),
|
||||
};
|
||||
let started = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
item: item.clone(),
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemStarted(started))
|
||||
.await;
|
||||
let completed = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemCompleted(completed))
|
||||
.await;
|
||||
}
|
||||
EventMsg::EnteredReviewMode(review_request) => {
|
||||
let review = review_request
|
||||
.user_facing_hint
|
||||
@@ -966,7 +1017,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let started = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
started_at_ms: now_unix_timestamp_ms(),
|
||||
item: item.clone(),
|
||||
};
|
||||
outgoing
|
||||
@@ -975,7 +1025,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let completed = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
completed_at_ms: now_unix_timestamp_ms(),
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
@@ -1025,7 +1074,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let started = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
started_at_ms: now_unix_timestamp_ms(),
|
||||
item: item.clone(),
|
||||
};
|
||||
outgoing
|
||||
@@ -1034,7 +1082,6 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let completed = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
completed_at_ms: now_unix_timestamp_ms(),
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
@@ -1057,9 +1104,40 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
EventMsg::PatchApplyBegin(_) | EventMsg::PatchApplyEnd(_) => {
|
||||
// Core still fans out these deprecated events for legacy clients;
|
||||
// v2 clients receive the canonical FileChange item instead.
|
||||
EventMsg::PatchApplyBegin(patch_begin_event) => {
|
||||
// Until we migrate the core to be aware of a first class FileChangeItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
let item_id = patch_begin_event.call_id.clone();
|
||||
|
||||
let first_start = {
|
||||
let mut state = thread_state.lock().await;
|
||||
state
|
||||
.turn_summary
|
||||
.file_change_started
|
||||
.insert(item_id.clone())
|
||||
};
|
||||
if first_start {
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::PatchApplyBegin(patch_begin_event),
|
||||
&conversation_id.to_string(),
|
||||
&event_turn_id,
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
}
|
||||
}
|
||||
EventMsg::PatchApplyEnd(patch_end_event) => {
|
||||
// Until we migrate the core to be aware of a first class FileChangeItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
let item_id = patch_end_event.call_id.clone();
|
||||
complete_file_change_item(
|
||||
conversation_id,
|
||||
item_id,
|
||||
build_file_change_end_item(&patch_end_event),
|
||||
event_turn_id.clone(),
|
||||
&outgoing,
|
||||
&thread_state,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
EventMsg::ExecCommandBegin(exec_command_begin_event) => {
|
||||
if matches!(
|
||||
@@ -1161,43 +1239,69 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
return;
|
||||
}
|
||||
};
|
||||
let fallback_cwd = conversation.config_snapshot().await.cwd;
|
||||
let stored_thread = match conversation
|
||||
.read_thread(
|
||||
/*include_archived*/ true, /*include_history*/ true,
|
||||
)
|
||||
.await
|
||||
let Some(rollout_path) = conversation.rollout_path() else {
|
||||
outgoing
|
||||
.send_error(
|
||||
request_id,
|
||||
invalid_request("thread has no persisted rollout"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
};
|
||||
let response = match read_summary_from_rollout(
|
||||
rollout_path.as_path(),
|
||||
fallback_model_provider.as_str(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(stored_thread) => stored_thread,
|
||||
Ok(summary) => {
|
||||
let fallback_cwd = conversation.config_snapshot().await.cwd;
|
||||
let mut thread = summary_to_thread(summary, &fallback_cwd);
|
||||
match read_rollout_items_from_rollout(rollout_path.as_path()).await {
|
||||
Ok(items) => {
|
||||
thread.turns = build_turns_from_rollout_items(&items);
|
||||
thread.status = thread_watch_manager
|
||||
.loaded_status_for_thread(&thread.id)
|
||||
.await;
|
||||
match find_thread_name_by_id(codex_home, &conversation_id).await {
|
||||
Ok(name) => {
|
||||
thread.name = name;
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"Failed to read thread name for {conversation_id}: {err}"
|
||||
);
|
||||
}
|
||||
}
|
||||
ThreadRollbackResponse { thread }
|
||||
}
|
||||
Err(err) => {
|
||||
outgoing
|
||||
.send_error(
|
||||
request_id.clone(),
|
||||
internal_error(format!(
|
||||
"failed to load rollout `{}`: {err}",
|
||||
rollout_path.display()
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
outgoing
|
||||
.send_error(
|
||||
request_id.clone(),
|
||||
internal_error(format!(
|
||||
"failed to read thread {conversation_id} after rollback: {err}"
|
||||
"failed to load rollout `{}`: {err}",
|
||||
rollout_path.display()
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let loaded_status = thread_watch_manager
|
||||
.loaded_status_for_thread(&conversation_id.to_string())
|
||||
.await;
|
||||
let response = match thread_rollback_response_from_stored_thread(
|
||||
stored_thread,
|
||||
fallback_model_provider.as_str(),
|
||||
&fallback_cwd,
|
||||
loaded_status,
|
||||
) {
|
||||
Ok(response) => response,
|
||||
Err(err) => {
|
||||
outgoing
|
||||
.send_error(request_id.clone(), internal_error(err))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
@@ -1321,6 +1425,31 @@ async fn emit_turn_completed_with_status(
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn complete_file_change_item(
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
item: ThreadItem,
|
||||
turn_id: String,
|
||||
outgoing: &ThreadScopedOutgoingMessageSender,
|
||||
thread_state: &Arc<Mutex<ThreadState>>,
|
||||
) {
|
||||
thread_state
|
||||
.lock()
|
||||
.await
|
||||
.turn_summary
|
||||
.file_change_started
|
||||
.remove(&item_id);
|
||||
|
||||
let notification = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id,
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemCompleted(notification))
|
||||
.await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn start_command_execution_item(
|
||||
conversation_id: &ThreadId,
|
||||
@@ -1344,7 +1473,6 @@ async fn start_command_execution_item(
|
||||
let notification = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id,
|
||||
started_at_ms: now_unix_timestamp_ms(),
|
||||
item: ThreadItem::CommandExecution {
|
||||
id: item_id,
|
||||
command,
|
||||
@@ -1404,7 +1532,6 @@ async fn complete_command_execution_item(
|
||||
let notification = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id,
|
||||
completed_at_ms: now_unix_timestamp_ms(),
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
@@ -1452,7 +1579,6 @@ pub(crate) async fn maybe_emit_hook_prompt_item_completed(
|
||||
let notification = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: turn_id.to_string(),
|
||||
completed_at_ms: now_unix_timestamp_ms(),
|
||||
item: ThreadItem::HookPrompt {
|
||||
id: hook_prompt.id,
|
||||
fragments: hook_prompt
|
||||
@@ -1547,25 +1673,6 @@ async fn handle_thread_rollback_failed(
|
||||
}
|
||||
}
|
||||
|
||||
fn thread_rollback_response_from_stored_thread(
|
||||
stored_thread: codex_thread_store::StoredThread,
|
||||
fallback_model_provider: &str,
|
||||
fallback_cwd: &AbsolutePathBuf,
|
||||
loaded_status: ThreadStatus,
|
||||
) -> std::result::Result<ThreadRollbackResponse, String> {
|
||||
let thread_id = stored_thread.thread_id;
|
||||
let (mut thread, history) =
|
||||
thread_from_stored_thread(stored_thread, fallback_model_provider, fallback_cwd);
|
||||
let Some(history) = history else {
|
||||
return Err(format!(
|
||||
"thread {thread_id} did not include persisted history after rollback"
|
||||
));
|
||||
};
|
||||
populate_thread_turns_from_history(&mut thread, &history.items, /*active_turn*/ None);
|
||||
thread.status = loaded_status;
|
||||
Ok(ThreadRollbackResponse { thread })
|
||||
}
|
||||
|
||||
async fn respond_to_pending_interrupts(
|
||||
thread_state: &Arc<Mutex<ThreadState>>,
|
||||
outgoing: &ThreadScopedOutgoingMessageSender,
|
||||
@@ -1895,28 +2002,38 @@ fn render_review_output_text(output: &ReviewOutputEvent) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn map_file_change_approval_decision(decision: FileChangeApprovalDecision) -> ReviewDecision {
|
||||
fn map_file_change_approval_decision(
|
||||
decision: FileChangeApprovalDecision,
|
||||
) -> (ReviewDecision, Option<PatchApplyStatus>) {
|
||||
match decision {
|
||||
FileChangeApprovalDecision::Accept => ReviewDecision::Approved,
|
||||
FileChangeApprovalDecision::AcceptForSession => ReviewDecision::ApprovedForSession,
|
||||
FileChangeApprovalDecision::Decline => ReviewDecision::Denied,
|
||||
FileChangeApprovalDecision::Cancel => ReviewDecision::Abort,
|
||||
FileChangeApprovalDecision::Accept => (ReviewDecision::Approved, None),
|
||||
FileChangeApprovalDecision::AcceptForSession => (ReviewDecision::ApprovedForSession, None),
|
||||
FileChangeApprovalDecision::Decline => {
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
FileChangeApprovalDecision::Cancel => {
|
||||
(ReviewDecision::Abort, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn on_file_change_request_approval_response(
|
||||
event_turn_id: String,
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
changes: Vec<FileUpdateChange>,
|
||||
pending_request_id: RequestId,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
codex: Arc<CodexThread>,
|
||||
outgoing: ThreadScopedOutgoingMessageSender,
|
||||
thread_state: Arc<Mutex<ThreadState>>,
|
||||
permission_guard: ThreadWatchActiveGuard,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
resolve_server_request_on_thread_listener(&thread_state, pending_request_id).await;
|
||||
drop(permission_guard);
|
||||
let decision = match response {
|
||||
let (decision, completion_status) = match response {
|
||||
Ok(Ok(value)) => {
|
||||
let response = serde_json::from_value::<FileChangeRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
@@ -1926,19 +2043,39 @@ async fn on_file_change_request_approval_response(
|
||||
}
|
||||
});
|
||||
|
||||
map_file_change_approval_decision(response.decision)
|
||||
let (decision, completion_status) =
|
||||
map_file_change_approval_decision(response.decision);
|
||||
// Allow EventMsg::PatchApplyEnd to emit ItemCompleted for accepted patches.
|
||||
// Only short-circuit on declines/cancels/failures.
|
||||
(decision, completion_status)
|
||||
}
|
||||
Ok(Err(err)) if is_turn_transition_server_request_error(&err) => return,
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
ReviewDecision::Denied
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Failed))
|
||||
}
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
ReviewDecision::Denied
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Failed))
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(status) = completion_status {
|
||||
complete_file_change_item(
|
||||
conversation_id,
|
||||
item_id.clone(),
|
||||
ThreadItem::FileChange {
|
||||
id: item_id.clone(),
|
||||
changes,
|
||||
status,
|
||||
},
|
||||
event_turn_id.clone(),
|
||||
&outgoing,
|
||||
&thread_state,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Err(err) = codex
|
||||
.submit(Op::PatchApproval {
|
||||
id: item_id,
|
||||
@@ -2075,13 +2212,6 @@ async fn on_command_execution_request_approval_response(
|
||||
}
|
||||
}
|
||||
|
||||
fn now_unix_timestamp_ms() -> i64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|duration| duration.as_millis() as i64)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -2093,7 +2223,6 @@ mod tests {
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::bail;
|
||||
use chrono::Utc;
|
||||
use codex_app_server_protocol::AutoReviewDecisionSource;
|
||||
use codex_app_server_protocol::GuardianApprovalReviewStatus;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
@@ -2110,28 +2239,20 @@ mod tests {
|
||||
use codex_protocol::permissions::FileSystemSpecialPath;
|
||||
use codex_protocol::plan_tool::PlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::protocol::AgentMessageEvent;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::CreditsSnapshot;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::GuardianAssessmentEvent;
|
||||
use codex_protocol::protocol::GuardianAssessmentStatus;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
use codex_protocol::protocol::RateLimitWindow;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::TokenUsage;
|
||||
use codex_protocol::protocol::TokenUsageInfo;
|
||||
use codex_protocol::protocol::UserMessageEvent;
|
||||
use codex_thread_store::StoredThread;
|
||||
use codex_thread_store::StoredThreadHistory;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::test_support::PathBufExt;
|
||||
use codex_utils_absolute_path::test_support::test_path_buf;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
@@ -2156,71 +2277,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rollback_response_rebuilds_pathless_thread_from_stored_history() -> Result<()> {
|
||||
let thread_id = ThreadId::from_string("00000000-0000-0000-0000-000000000789")?;
|
||||
let created_at = Utc::now();
|
||||
let history_items = vec![
|
||||
RolloutItem::EventMsg(EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "before rollback".to_string(),
|
||||
images: None,
|
||||
local_images: Vec::new(),
|
||||
text_elements: Vec::new(),
|
||||
})),
|
||||
RolloutItem::EventMsg(EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "after rollback".to_string(),
|
||||
phase: None,
|
||||
memory_citation: None,
|
||||
})),
|
||||
];
|
||||
let stored_thread = StoredThread {
|
||||
thread_id,
|
||||
rollout_path: None,
|
||||
forked_from_id: None,
|
||||
preview: "fallback preview".to_string(),
|
||||
name: Some("Rollback thread".to_string()),
|
||||
model_provider: "openai".to_string(),
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
created_at,
|
||||
updated_at: created_at,
|
||||
archived_at: None,
|
||||
cwd: test_path_buf("/tmp").abs().into(),
|
||||
cli_version: "0.0.0".to_string(),
|
||||
source: SessionSource::Cli,
|
||||
agent_nickname: None,
|
||||
agent_role: None,
|
||||
agent_path: None,
|
||||
git_info: None,
|
||||
approval_mode: AskForApproval::OnRequest,
|
||||
sandbox_policy: SandboxPolicy::new_read_only_policy(),
|
||||
token_usage: None,
|
||||
first_user_message: Some("before rollback".to_string()),
|
||||
history: Some(StoredThreadHistory {
|
||||
thread_id,
|
||||
items: history_items,
|
||||
}),
|
||||
};
|
||||
let fallback_cwd = test_path_buf("/tmp").abs();
|
||||
|
||||
let response = thread_rollback_response_from_stored_thread(
|
||||
stored_thread,
|
||||
"fallback-provider",
|
||||
&fallback_cwd,
|
||||
ThreadStatus::NotLoaded,
|
||||
)
|
||||
.expect("rollback response should rebuild from stored history");
|
||||
|
||||
assert_eq!(response.thread.id, thread_id.to_string());
|
||||
assert_eq!(response.thread.path, None);
|
||||
assert_eq!(response.thread.preview, "before rollback");
|
||||
assert_eq!(response.thread.name.as_deref(), Some("Rollback thread"));
|
||||
assert_eq!(response.thread.status, ThreadStatus::NotLoaded);
|
||||
assert_eq!(response.thread.turns.len(), 1);
|
||||
assert_eq!(response.thread.turns[0].items.len(), 2);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn turn_complete_event(turn_id: &str) -> TurnCompleteEvent {
|
||||
TurnCompleteEvent {
|
||||
turn_id: turn_id.to_string(),
|
||||
@@ -2303,6 +2359,7 @@ mod tests {
|
||||
thread_state: Arc<Mutex<ThreadState>>,
|
||||
thread_watch_manager: ThreadWatchManager,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
codex_home: PathBuf,
|
||||
}
|
||||
|
||||
impl GuardianAssessmentTestContext {
|
||||
@@ -2322,6 +2379,7 @@ mod tests {
|
||||
self.thread_watch_manager.clone(),
|
||||
Arc::new(tokio::sync::Semaphore::new(/*permits*/ 1)),
|
||||
"test-provider".to_string(),
|
||||
&self.codex_home,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -2629,7 +2687,12 @@ mod tests {
|
||||
thread_id: conversation_id,
|
||||
thread: conversation,
|
||||
..
|
||||
} = thread_manager.start_thread(config.clone()).await?;
|
||||
} = thread_manager
|
||||
.start_thread(
|
||||
config.clone(),
|
||||
codex_core::thread_store_from_config(&config),
|
||||
)
|
||||
.await?;
|
||||
let thread_state = new_thread_state();
|
||||
let thread_watch_manager = ThreadWatchManager::new();
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
@@ -2656,6 +2719,7 @@ mod tests {
|
||||
"http://localhost".to_string(),
|
||||
Some(false),
|
||||
),
|
||||
codex_home: codex_home.path().to_path_buf(),
|
||||
};
|
||||
|
||||
guardian_context
|
||||
@@ -2827,9 +2891,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn file_change_accept_for_session_maps_to_approved_for_session() {
|
||||
let decision =
|
||||
let (decision, completion_status) =
|
||||
map_file_change_approval_decision(FileChangeApprovalDecision::AcceptForSession);
|
||||
assert_eq!(decision, ReviewDecision::ApprovedForSession);
|
||||
assert_eq!(completion_status, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
11116
codex-rs/app-server/src/codex_message_processor.rs
Normal file
11116
codex-rs/app-server/src/codex_message_processor.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,66 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::AppsListResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_chatgpt::connectors;
|
||||
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
|
||||
pub(super) fn merge_loaded_apps(
|
||||
all_connectors: Option<&[AppInfo]>,
|
||||
accessible_connectors: Option<&[AppInfo]>,
|
||||
) -> Vec<AppInfo> {
|
||||
let all_connectors_loaded = all_connectors.is_some();
|
||||
let all = all_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
let accessible = accessible_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
connectors::merge_connectors_with_accessible(all, accessible, all_connectors_loaded)
|
||||
}
|
||||
|
||||
pub(super) fn should_send_app_list_updated_notification(
|
||||
connectors: &[AppInfo],
|
||||
accessible_loaded: bool,
|
||||
all_loaded: bool,
|
||||
) -> bool {
|
||||
connectors.iter().any(|connector| connector.is_accessible) || (accessible_loaded && all_loaded)
|
||||
}
|
||||
|
||||
pub(super) fn paginate_apps(
|
||||
connectors: &[AppInfo],
|
||||
start: usize,
|
||||
limit: Option<u32>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let total = connectors.len();
|
||||
if start > total {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("cursor {start} exceeds total apps {total}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = connectors[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(AppsListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
pub(super) async fn send_app_list_updated_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
data: Vec<AppInfo>,
|
||||
) {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
}
|
||||
@@ -0,0 +1,149 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppSummary;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_core::config::Config;
|
||||
use codex_exec_server::EnvironmentManager;
|
||||
use codex_plugin::AppConnectorId;
|
||||
use tracing::warn;
|
||||
|
||||
pub(super) async fn load_plugin_app_summaries(
|
||||
config: &Config,
|
||||
plugin_apps: &[AppConnectorId],
|
||||
environment_manager: &EnvironmentManager,
|
||||
) -> Vec<AppSummary> {
|
||||
if plugin_apps.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let connectors =
|
||||
match connectors::list_all_connectors_with_options(config, /*force_refetch*/ false).await {
|
||||
Ok(connectors) => connectors,
|
||||
Err(err) => {
|
||||
warn!("failed to load app metadata for plugin/read: {err:#}");
|
||||
connectors::list_cached_all_connectors(config)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let plugin_connectors = connectors::connectors_for_plugin_apps(connectors, plugin_apps);
|
||||
|
||||
let accessible_connectors =
|
||||
match connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
config,
|
||||
/*force_refetch*/ false,
|
||||
environment_manager,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(status) if status.codex_apps_ready => status.connectors,
|
||||
Ok(_) => {
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("failed to load app auth state for plugin/read: {err:#}");
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
};
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
plugin_connectors
|
||||
.into_iter()
|
||||
.map(|connector| {
|
||||
let needs_auth = !accessible_ids.contains(connector.id.as_str());
|
||||
AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(super) fn plugin_apps_needing_auth(
|
||||
all_connectors: &[AppInfo],
|
||||
accessible_connectors: &[AppInfo],
|
||||
plugin_apps: &[AppConnectorId],
|
||||
codex_apps_ready: bool,
|
||||
) -> Vec<AppSummary> {
|
||||
if !codex_apps_ready {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
let plugin_app_ids = plugin_apps
|
||||
.iter()
|
||||
.map(|connector_id| connector_id.0.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
all_connectors
|
||||
.iter()
|
||||
.filter(|connector| {
|
||||
plugin_app_ids.contains(connector.id.as_str())
|
||||
&& !accessible_ids.contains(connector.id.as_str())
|
||||
})
|
||||
.cloned()
|
||||
.map(|connector| AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth: true,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_plugin::AppConnectorId;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::plugin_apps_needing_auth;
|
||||
|
||||
#[test]
|
||||
fn plugin_apps_needing_auth_returns_empty_when_codex_apps_is_not_ready() {
|
||||
let all_connectors = vec![AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha connector".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
branding: None,
|
||||
app_metadata: None,
|
||||
labels: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
plugin_display_names: Vec::new(),
|
||||
}];
|
||||
|
||||
assert_eq!(
|
||||
plugin_apps_needing_auth(
|
||||
&all_connectors,
|
||||
&[],
|
||||
&[AppConnectorId("alpha".to_string())],
|
||||
/*codex_apps_ready*/ false,
|
||||
),
|
||||
Vec::new()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_config::types::McpServerConfig;
|
||||
use codex_core::config::Config;
|
||||
use codex_mcp::McpOAuthLoginSupport;
|
||||
use codex_mcp::oauth_login_support;
|
||||
use codex_mcp::resolve_oauth_scopes;
|
||||
use codex_mcp::should_retry_without_scopes;
|
||||
use codex_rmcp_client::perform_oauth_login_silent;
|
||||
use tracing::warn;
|
||||
|
||||
use super::CodexMessageProcessor;
|
||||
|
||||
impl CodexMessageProcessor {
|
||||
pub(super) async fn start_plugin_mcp_oauth_logins(
|
||||
&self,
|
||||
config: &Config,
|
||||
plugin_mcp_servers: HashMap<String, McpServerConfig>,
|
||||
) {
|
||||
for (name, server) in plugin_mcp_servers {
|
||||
let oauth_config = match oauth_login_support(&server.transport).await {
|
||||
McpOAuthLoginSupport::Supported(config) => config,
|
||||
McpOAuthLoginSupport::Unsupported => continue,
|
||||
McpOAuthLoginSupport::Unknown(err) => {
|
||||
warn!(
|
||||
"MCP server may or may not require login for plugin install {name}: {err}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let resolved_scopes = resolve_oauth_scopes(
|
||||
/*explicit_scopes*/ None,
|
||||
server.scopes.clone(),
|
||||
oauth_config.discovered_scopes.clone(),
|
||||
);
|
||||
|
||||
let store_mode = config.mcp_oauth_credentials_store_mode;
|
||||
let callback_port = config.mcp_oauth_callback_port;
|
||||
let callback_url = config.mcp_oauth_callback_url.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let notification_name = name.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let first_attempt = perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers.clone(),
|
||||
oauth_config.env_http_headers.clone(),
|
||||
&resolved_scopes.scopes,
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let final_result = match first_attempt {
|
||||
Err(err) if should_retry_without_scopes(&resolved_scopes, &err) => {
|
||||
perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers,
|
||||
oauth_config.env_http_headers,
|
||||
&[],
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
result => result,
|
||||
};
|
||||
|
||||
let (success, error) = match final_result {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,249 +3,15 @@ use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use codex_app_server_protocol::PluginAvailability;
|
||||
use codex_app_server_protocol::PluginInstallPolicy;
|
||||
use codex_config::types::McpServerConfig;
|
||||
use codex_core_plugins::remote::is_valid_remote_plugin_id;
|
||||
use codex_core_plugins::remote::validate_remote_plugin_id;
|
||||
use codex_mcp::McpOAuthLoginSupport;
|
||||
use codex_mcp::oauth_login_support;
|
||||
use codex_mcp::should_retry_without_scopes;
|
||||
use codex_rmcp_client::perform_oauth_login_silent;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct PluginRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
fn plugin_skills_to_info(
|
||||
skills: &[codex_core::skills::SkillMetadata],
|
||||
disabled_skill_paths: &HashSet<AbsolutePathBuf>,
|
||||
) -> Vec<SkillSummary> {
|
||||
skills
|
||||
.iter()
|
||||
.map(|skill| SkillSummary {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
short_description: skill.short_description.clone(),
|
||||
interface: skill.interface.clone().map(|interface| {
|
||||
codex_app_server_protocol::SkillInterface {
|
||||
display_name: interface.display_name,
|
||||
short_description: interface.short_description,
|
||||
icon_small: interface.icon_small,
|
||||
icon_large: interface.icon_large,
|
||||
brand_color: interface.brand_color,
|
||||
default_prompt: interface.default_prompt,
|
||||
}
|
||||
}),
|
||||
path: Some(skill.path_to_skills_md.clone()),
|
||||
enabled: !disabled_skill_paths.contains(&skill.path_to_skills_md),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn local_plugin_interface_to_info(interface: PluginManifestInterface) -> PluginInterface {
|
||||
PluginInterface {
|
||||
display_name: interface.display_name,
|
||||
short_description: interface.short_description,
|
||||
long_description: interface.long_description,
|
||||
developer_name: interface.developer_name,
|
||||
category: interface.category,
|
||||
capabilities: interface.capabilities,
|
||||
website_url: interface.website_url,
|
||||
privacy_policy_url: interface.privacy_policy_url,
|
||||
terms_of_service_url: interface.terms_of_service_url,
|
||||
default_prompt: interface.default_prompt,
|
||||
brand_color: interface.brand_color,
|
||||
composer_icon: interface.composer_icon,
|
||||
composer_icon_url: None,
|
||||
logo: interface.logo,
|
||||
logo_url: None,
|
||||
screenshots: interface.screenshots,
|
||||
screenshot_urls: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn marketplace_plugin_source_to_info(source: MarketplacePluginSource) -> PluginSource {
|
||||
match source {
|
||||
MarketplacePluginSource::Local { path } => PluginSource::Local { path },
|
||||
MarketplacePluginSource::Git {
|
||||
url,
|
||||
path,
|
||||
ref_name,
|
||||
sha,
|
||||
} => PluginSource::Git {
|
||||
url,
|
||||
path,
|
||||
ref_name,
|
||||
sha,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl PluginRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
analytics_events_client,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_list(
|
||||
impl CodexMessageProcessor {
|
||||
pub(super) async fn plugin_list(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_read(
|
||||
&self,
|
||||
params: PluginReadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_read_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_skill_read(
|
||||
&self,
|
||||
params: PluginSkillReadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_skill_read_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_share_save(
|
||||
&self,
|
||||
params: PluginShareSaveParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_share_save_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_share_list(
|
||||
&self,
|
||||
params: PluginShareListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_share_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_share_delete(
|
||||
&self,
|
||||
params: PluginShareDeleteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_share_delete_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_install(
|
||||
&self,
|
||||
params: PluginInstallParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_install_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_uninstall(
|
||||
&self,
|
||||
params: PluginUninstallParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_uninstall_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) fn effective_plugins_changed_callback(
|
||||
&self,
|
||||
config: Config,
|
||||
) -> Arc<dyn Fn() + Send + Sync> {
|
||||
let thread_manager = Arc::clone(&self.thread_manager);
|
||||
Arc::new(move || {
|
||||
Self::spawn_effective_plugins_changed_task(Arc::clone(&thread_manager), config.clone());
|
||||
})
|
||||
}
|
||||
|
||||
fn on_effective_plugins_changed(&self, config: Config) {
|
||||
Self::spawn_effective_plugins_changed_task(Arc::clone(&self.thread_manager), config);
|
||||
}
|
||||
|
||||
fn spawn_effective_plugins_changed_task(thread_manager: Arc<ThreadManager>, config: Config) {
|
||||
tokio::spawn(async move {
|
||||
thread_manager.plugins_manager().clear_cache();
|
||||
thread_manager.skills_manager().clear_cache();
|
||||
if thread_manager.list_thread_ids().await.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Err(err) =
|
||||
McpRequestProcessor::queue_mcp_server_refresh_for_config(&thread_manager, &config)
|
||||
.await
|
||||
{
|
||||
warn!("failed to queue MCP refresh after effective plugins changed: {err:?}");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn clear_plugin_related_caches(&self) {
|
||||
self.thread_manager.plugins_manager().clear_cache();
|
||||
self.thread_manager.skills_manager().clear_cache();
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
) {
|
||||
let result = self.plugin_list_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_list_response(
|
||||
@@ -405,6 +171,15 @@ impl PluginRequestProcessor {
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_read(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginReadParams,
|
||||
) {
|
||||
let result = self.plugin_read_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_read_response(
|
||||
&self,
|
||||
params: PluginReadParams,
|
||||
@@ -442,9 +217,12 @@ impl PluginRequestProcessor {
|
||||
.await
|
||||
.map_err(|err| Self::marketplace_error(err, "read plugin details"))?;
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let app_summaries =
|
||||
load_plugin_app_summaries(&config, &outcome.plugin.apps, &environment_manager)
|
||||
.await;
|
||||
let app_summaries = plugin_app_helpers::load_plugin_app_summaries(
|
||||
&config,
|
||||
&outcome.plugin.apps,
|
||||
&environment_manager,
|
||||
)
|
||||
.await;
|
||||
let visible_skills = outcome
|
||||
.plugin
|
||||
.skills
|
||||
@@ -483,15 +261,15 @@ impl PluginRequestProcessor {
|
||||
if !config.features.enabled(Feature::Plugins)
|
||||
|| !config.features.enabled(Feature::RemotePlugin)
|
||||
{
|
||||
return Err(invalid_request(format!(
|
||||
"remote plugin read is not enabled for marketplace {remote_marketplace_name}"
|
||||
)));
|
||||
return Err(invalid_request("remote plugin read is not enabled"));
|
||||
}
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
chatgpt_base_url: config.chatgpt_base_url.clone(),
|
||||
};
|
||||
validate_remote_plugin_id(&plugin_name)?;
|
||||
if plugin_name.is_empty() || !is_valid_remote_plugin_id(&plugin_name) {
|
||||
return Err(invalid_request("invalid remote plugin id"));
|
||||
}
|
||||
let remote_detail = codex_core_plugins::remote::fetch_remote_plugin_detail(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
@@ -509,8 +287,12 @@ impl PluginRequestProcessor {
|
||||
.map(codex_plugin::AppConnectorId)
|
||||
.collect::<Vec<_>>();
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let app_summaries =
|
||||
load_plugin_app_summaries(&config, &plugin_apps, &environment_manager).await;
|
||||
let app_summaries = plugin_app_helpers::load_plugin_app_summaries(
|
||||
&config,
|
||||
&plugin_apps,
|
||||
&environment_manager,
|
||||
)
|
||||
.await;
|
||||
remote_plugin_detail_to_info(remote_detail, app_summaries)
|
||||
}
|
||||
};
|
||||
@@ -518,50 +300,13 @@ impl PluginRequestProcessor {
|
||||
Ok(PluginReadResponse { plugin })
|
||||
}
|
||||
|
||||
async fn plugin_skill_read_response(
|
||||
pub(super) async fn plugin_share_save(
|
||||
&self,
|
||||
params: PluginSkillReadParams,
|
||||
) -> Result<PluginSkillReadResponse, JSONRPCErrorError> {
|
||||
let PluginSkillReadParams {
|
||||
remote_marketplace_name,
|
||||
remote_plugin_id,
|
||||
skill_name,
|
||||
} = params;
|
||||
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
if !config.features.enabled(Feature::Plugins)
|
||||
|| !config.features.enabled(Feature::RemotePlugin)
|
||||
{
|
||||
return Err(invalid_request(format!(
|
||||
"remote plugin skill read is not enabled for marketplace {remote_marketplace_name}"
|
||||
)));
|
||||
}
|
||||
validate_remote_plugin_id(&remote_plugin_id)?;
|
||||
if skill_name.is_empty() {
|
||||
return Err(invalid_request(
|
||||
"invalid remote plugin skill name: cannot be empty",
|
||||
));
|
||||
}
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
chatgpt_base_url: config.chatgpt_base_url.clone(),
|
||||
};
|
||||
let remote_skill_detail = codex_core_plugins::remote::fetch_remote_plugin_skill_detail(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
&remote_marketplace_name,
|
||||
&remote_plugin_id,
|
||||
&skill_name,
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
remote_plugin_catalog_error_to_jsonrpc(err, "read remote plugin skill details")
|
||||
})?;
|
||||
|
||||
Ok(PluginSkillReadResponse {
|
||||
contents: remote_skill_detail.contents,
|
||||
})
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginShareSaveParams,
|
||||
) {
|
||||
let result = self.plugin_share_save_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_share_save_response(
|
||||
@@ -585,23 +330,29 @@ impl PluginRequestProcessor {
|
||||
let result = codex_core_plugins::remote::save_remote_plugin_share(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
config.codex_home.as_path(),
|
||||
&plugin_path,
|
||||
plugin_path.as_path(),
|
||||
remote_plugin_id.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| remote_plugin_catalog_error_to_jsonrpc(err, "save remote plugin share"))?;
|
||||
let remote_plugin_id = result.remote_plugin_id;
|
||||
self.clear_plugin_related_caches();
|
||||
Ok(PluginShareSaveResponse {
|
||||
remote_plugin_id,
|
||||
remote_plugin_id: result.remote_plugin_id,
|
||||
share_url: result.share_url.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_share_list(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
_params: PluginShareListParams,
|
||||
) {
|
||||
let result = self.plugin_share_list_response().await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_share_list_response(
|
||||
&self,
|
||||
_params: PluginShareListParams,
|
||||
) -> Result<PluginShareListResponse, JSONRPCErrorError> {
|
||||
let (config, auth) = self.load_plugin_share_config_and_auth().await?;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
@@ -610,28 +361,24 @@ impl PluginRequestProcessor {
|
||||
let data = codex_core_plugins::remote::list_remote_plugin_shares(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
config.codex_home.as_path(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| remote_plugin_catalog_error_to_jsonrpc(err, "list remote plugin shares"))?
|
||||
.into_iter()
|
||||
.map(|summary| {
|
||||
let RemoteCatalogPluginShareSummary {
|
||||
summary,
|
||||
share_url,
|
||||
local_plugin_path,
|
||||
} = summary;
|
||||
let plugin = remote_plugin_summary_to_info(summary);
|
||||
PluginShareListItem {
|
||||
plugin,
|
||||
share_url: share_url.unwrap_or_default(),
|
||||
local_plugin_path,
|
||||
}
|
||||
})
|
||||
.map(remote_plugin_summary_to_info)
|
||||
.collect();
|
||||
Ok(PluginShareListResponse { data })
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_share_delete(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginShareDeleteParams,
|
||||
) {
|
||||
let result = self.plugin_share_delete_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_share_delete_response(
|
||||
&self,
|
||||
params: PluginShareDeleteParams,
|
||||
@@ -648,7 +395,6 @@ impl PluginRequestProcessor {
|
||||
codex_core_plugins::remote::delete_remote_plugin_share(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
config.codex_home.as_path(),
|
||||
&remote_plugin_id,
|
||||
)
|
||||
.await
|
||||
@@ -670,6 +416,15 @@ impl PluginRequestProcessor {
|
||||
Ok((config, auth))
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_install(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginInstallParams,
|
||||
) {
|
||||
let result = self.plugin_install_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_install_response(
|
||||
&self,
|
||||
params: PluginInstallParams,
|
||||
@@ -759,11 +514,13 @@ impl PluginRequestProcessor {
|
||||
if !config.features.enabled(Feature::Plugins)
|
||||
|| !config.features.enabled(Feature::RemotePlugin)
|
||||
{
|
||||
return Err(invalid_request(format!(
|
||||
"remote plugin install is not enabled for marketplace {remote_marketplace_name}"
|
||||
)));
|
||||
return Err(invalid_request("remote plugin install is not enabled"));
|
||||
}
|
||||
if remote_plugin_id.is_empty() || !is_valid_remote_plugin_id(&remote_plugin_id) {
|
||||
return Err(invalid_request(
|
||||
"invalid remote plugin id: only ASCII letters, digits, `_`, `-`, and `~` are allowed",
|
||||
));
|
||||
}
|
||||
validate_remote_plugin_id(&remote_plugin_id)?;
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
@@ -923,7 +680,7 @@ impl PluginRequestProcessor {
|
||||
);
|
||||
}
|
||||
|
||||
plugin_apps_needing_auth(
|
||||
plugin_app_helpers::plugin_apps_needing_auth(
|
||||
&all_connectors,
|
||||
&accessible_connectors,
|
||||
plugin_apps,
|
||||
@@ -931,82 +688,13 @@ impl PluginRequestProcessor {
|
||||
)
|
||||
}
|
||||
|
||||
async fn start_plugin_mcp_oauth_logins(
|
||||
pub(super) async fn plugin_uninstall(
|
||||
&self,
|
||||
config: &Config,
|
||||
plugin_mcp_servers: HashMap<String, McpServerConfig>,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginUninstallParams,
|
||||
) {
|
||||
for (name, server) in plugin_mcp_servers {
|
||||
let oauth_config = match oauth_login_support(&server.transport).await {
|
||||
McpOAuthLoginSupport::Supported(config) => config,
|
||||
McpOAuthLoginSupport::Unsupported => continue,
|
||||
McpOAuthLoginSupport::Unknown(err) => {
|
||||
warn!(
|
||||
"MCP server may or may not require login for plugin install {name}: {err}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let resolved_scopes = resolve_oauth_scopes(
|
||||
/*explicit_scopes*/ None,
|
||||
server.scopes.clone(),
|
||||
oauth_config.discovered_scopes.clone(),
|
||||
);
|
||||
|
||||
let store_mode = config.mcp_oauth_credentials_store_mode;
|
||||
let callback_port = config.mcp_oauth_callback_port;
|
||||
let callback_url = config.mcp_oauth_callback_url.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let notification_name = name.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let first_attempt = perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers.clone(),
|
||||
oauth_config.env_http_headers.clone(),
|
||||
&resolved_scopes.scopes,
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let final_result = match first_attempt {
|
||||
Err(err) if should_retry_without_scopes(&resolved_scopes, &err) => {
|
||||
perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers,
|
||||
oauth_config.env_http_headers,
|
||||
&[],
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
result => result,
|
||||
};
|
||||
|
||||
let (success, error) = match final_result {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
}
|
||||
let result = self.plugin_uninstall_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_uninstall_response(
|
||||
@@ -1015,11 +703,13 @@ impl PluginRequestProcessor {
|
||||
) -> Result<PluginUninstallResponse, JSONRPCErrorError> {
|
||||
let PluginUninstallParams { plugin_id } = params;
|
||||
if codex_plugin::PluginId::parse(&plugin_id).is_err()
|
||||
&& !is_valid_remote_plugin_id(&plugin_id)
|
||||
&& (plugin_id.is_empty() || !is_valid_remote_plugin_id(&plugin_id))
|
||||
{
|
||||
return Err(invalid_request("invalid remote plugin id"));
|
||||
return Err(invalid_request(
|
||||
"invalid plugin id: expected a local plugin id or remote plugin id",
|
||||
));
|
||||
}
|
||||
if is_valid_remote_plugin_id(&plugin_id) {
|
||||
if !plugin_id.is_empty() && is_valid_remote_plugin_id(&plugin_id) {
|
||||
return self.remote_plugin_uninstall_response(plugin_id).await;
|
||||
}
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
@@ -1110,7 +800,9 @@ impl PluginRequestProcessor {
|
||||
{
|
||||
return Err(invalid_request("remote plugin uninstall is not enabled"));
|
||||
}
|
||||
validate_remote_plugin_id(&plugin_id)?;
|
||||
if plugin_id.is_empty() || !is_valid_remote_plugin_id(&plugin_id) {
|
||||
return Err(invalid_request("invalid remote plugin id"));
|
||||
}
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
@@ -1146,106 +838,10 @@ impl PluginRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_plugin_app_summaries(
|
||||
config: &Config,
|
||||
plugin_apps: &[codex_plugin::AppConnectorId],
|
||||
environment_manager: &EnvironmentManager,
|
||||
) -> Vec<AppSummary> {
|
||||
if plugin_apps.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let connectors =
|
||||
match connectors::list_all_connectors_with_options(config, /*force_refetch*/ false).await {
|
||||
Ok(connectors) => connectors,
|
||||
Err(err) => {
|
||||
warn!("failed to load app metadata for plugin/read: {err:#}");
|
||||
connectors::list_cached_all_connectors(config)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let plugin_connectors = connectors::connectors_for_plugin_apps(connectors, plugin_apps);
|
||||
|
||||
let accessible_connectors =
|
||||
match connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
config,
|
||||
/*force_refetch*/ false,
|
||||
environment_manager,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(status) if status.codex_apps_ready => status.connectors,
|
||||
Ok(_) => {
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("failed to load app auth state for plugin/read: {err:#}");
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
};
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
plugin_connectors
|
||||
.into_iter()
|
||||
.map(|connector| {
|
||||
let needs_auth = !accessible_ids.contains(connector.id.as_str());
|
||||
AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn plugin_apps_needing_auth(
|
||||
all_connectors: &[AppInfo],
|
||||
accessible_connectors: &[AppInfo],
|
||||
plugin_apps: &[codex_plugin::AppConnectorId],
|
||||
codex_apps_ready: bool,
|
||||
) -> Vec<AppSummary> {
|
||||
if !codex_apps_ready {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
let plugin_app_ids = plugin_apps
|
||||
.iter()
|
||||
.map(|connector_id| connector_id.0.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
all_connectors
|
||||
.iter()
|
||||
.filter(|connector| {
|
||||
plugin_app_ids.contains(connector.id.as_str())
|
||||
&& !accessible_ids.contains(connector.id.as_str())
|
||||
})
|
||||
.cloned()
|
||||
.map(|connector| AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth: true,
|
||||
})
|
||||
.collect()
|
||||
fn is_valid_remote_plugin_id(plugin_name: &str) -> bool {
|
||||
plugin_name
|
||||
.chars()
|
||||
.all(|ch| ch.is_ascii_alphanumeric() || ch == '-' || ch == '_' || ch == '~')
|
||||
}
|
||||
|
||||
fn remote_marketplace_to_info(marketplace: RemoteMarketplace) -> PluginMarketplaceEntry {
|
||||
@@ -1307,35 +903,42 @@ fn remote_plugin_catalog_error_to_jsonrpc(
|
||||
err: RemotePluginCatalogError,
|
||||
context: &str,
|
||||
) -> JSONRPCErrorError {
|
||||
let code = match &err {
|
||||
match err {
|
||||
RemotePluginCatalogError::AuthRequired | RemotePluginCatalogError::UnsupportedAuthMode => {
|
||||
INVALID_REQUEST_ERROR_CODE
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
RemotePluginCatalogError::UnexpectedStatus { status, .. } if status.as_u16() == 404 => {
|
||||
INVALID_REQUEST_ERROR_CODE
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
RemotePluginCatalogError::InvalidPluginPath { .. }
|
||||
| RemotePluginCatalogError::ArchiveTooLarge { .. }
|
||||
| RemotePluginCatalogError::UnknownMarketplace { .. } => INVALID_REQUEST_ERROR_CODE,
|
||||
| RemotePluginCatalogError::ArchiveTooLarge { .. } => JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
},
|
||||
RemotePluginCatalogError::AuthToken(_)
|
||||
| RemotePluginCatalogError::Request { .. }
|
||||
| RemotePluginCatalogError::UnexpectedStatus { .. }
|
||||
| RemotePluginCatalogError::Decode { .. }
|
||||
| RemotePluginCatalogError::InvalidBaseUrl(_)
|
||||
| RemotePluginCatalogError::InvalidBaseUrlPath
|
||||
| RemotePluginCatalogError::UnexpectedPluginId { .. }
|
||||
| RemotePluginCatalogError::UnexpectedSkillName { .. }
|
||||
| RemotePluginCatalogError::UnexpectedEnabledState { .. }
|
||||
| RemotePluginCatalogError::Archive { .. }
|
||||
| RemotePluginCatalogError::ArchiveJoin(_)
|
||||
| RemotePluginCatalogError::MissingUploadEtag
|
||||
| RemotePluginCatalogError::UnexpectedResponse(_)
|
||||
| RemotePluginCatalogError::CacheRemove(_) => INTERNAL_ERROR_CODE,
|
||||
};
|
||||
JSONRPCErrorError {
|
||||
code,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
| RemotePluginCatalogError::CacheRemove(_) => JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,122 +1,68 @@
|
||||
use super::*;
|
||||
use codex_protocol::protocol::validate_thread_goal_objective;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ThreadGoalRequestProcessor {
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config: Arc<Config>,
|
||||
thread_state_manager: ThreadStateManager,
|
||||
state_db: Option<StateDbHandle>,
|
||||
}
|
||||
|
||||
impl ThreadGoalRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config: Arc<Config>,
|
||||
thread_state_manager: ThreadStateManager,
|
||||
state_db: Option<StateDbHandle>,
|
||||
) -> Self {
|
||||
Self {
|
||||
thread_manager,
|
||||
outgoing,
|
||||
config,
|
||||
thread_state_manager,
|
||||
state_db,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn thread_goal_set(
|
||||
impl CodexMessageProcessor {
|
||||
pub(super) async fn thread_goal_set(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalSetParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.thread_goal_set_inner(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn thread_goal_get(
|
||||
&self,
|
||||
params: ThreadGoalGetParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.thread_goal_get_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn thread_goal_clear(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalClearParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.thread_goal_clear_inner(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn emit_resume_goal_snapshot_and_continue(
|
||||
&self,
|
||||
thread_id: ThreadId,
|
||||
thread: &CodexThread,
|
||||
) {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
self.send_invalid_request_error(request_id, "goals feature is disabled".to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
self.emit_thread_goal_snapshot(thread_id).await;
|
||||
// App-server owns resume response and snapshot ordering, so wait until
|
||||
// those are sent before letting core start goal continuation.
|
||||
if let Err(err) = thread.continue_active_goal_if_idle().await {
|
||||
tracing::warn!("failed to continue active goal after resume: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn pending_resume_goal_state(
|
||||
&self,
|
||||
thread: &CodexThread,
|
||||
) -> (bool, Option<StateDbHandle>) {
|
||||
let emit_thread_goal_update = self.config.features.enabled(Feature::Goals);
|
||||
let thread_goal_state_db = if emit_thread_goal_update {
|
||||
if let Some(state_db) = thread.state_db() {
|
||||
Some(state_db)
|
||||
} else {
|
||||
self.state_db.clone()
|
||||
let thread_id = match parse_thread_id_for_request(params.thread_id.as_str()) {
|
||||
Ok(thread_id) => thread_id,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(emit_thread_goal_update, thread_goal_state_db)
|
||||
}
|
||||
|
||||
async fn thread_goal_set_inner(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalSetParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
return Err(invalid_request("goals feature is disabled"));
|
||||
}
|
||||
|
||||
let thread_id = parse_thread_id_for_request(params.thread_id.as_str())?;
|
||||
let state_db = self.state_db_for_materialized_thread(thread_id).await?;
|
||||
let running_thread = self.thread_manager.get_thread(thread_id).await.ok();
|
||||
let rollout_path = match running_thread.as_ref() {
|
||||
Some(thread) => thread.rollout_path().ok_or_else(|| {
|
||||
invalid_request(format!(
|
||||
"ephemeral thread does not support goals: {thread_id}"
|
||||
))
|
||||
})?,
|
||||
None => find_thread_path_by_id_str(
|
||||
&self.config.codex_home,
|
||||
&thread_id.to_string(),
|
||||
self.state_db.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to locate thread id {thread_id}: {err}"))
|
||||
})?
|
||||
.ok_or_else(|| invalid_request(format!("thread not found: {thread_id}")))?,
|
||||
Some(thread) => match thread.rollout_path() {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("ephemeral thread does not support goals: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string())
|
||||
.await
|
||||
{
|
||||
Ok(Some(path)) => path,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("thread not found: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
reconcile_rollout(
|
||||
Some(&state_db),
|
||||
@@ -138,67 +84,63 @@ impl ThreadGoalRequestProcessor {
|
||||
let objective = params.objective.as_deref().map(str::trim);
|
||||
|
||||
if let Some(objective) = objective {
|
||||
validate_thread_goal_objective(objective).map_err(invalid_request)?;
|
||||
}
|
||||
if objective.is_some() || params.token_budget.is_some() {
|
||||
validate_goal_budget(params.token_budget.flatten()).map_err(invalid_request)?;
|
||||
if let Err(message) = validate_thread_goal_objective(objective) {
|
||||
self.send_invalid_request_error(request_id, message).await;
|
||||
return;
|
||||
}
|
||||
if let Err(message) = validate_goal_budget(params.token_budget.flatten()) {
|
||||
self.send_invalid_request_error(request_id, message).await;
|
||||
return;
|
||||
}
|
||||
} else if let Some(token_budget) = params.token_budget
|
||||
&& let Err(message) = validate_goal_budget(token_budget)
|
||||
{
|
||||
self.send_invalid_request_error(request_id, message).await;
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(thread) = running_thread.as_ref() {
|
||||
thread.prepare_external_goal_mutation().await;
|
||||
}
|
||||
|
||||
let (goal, previous_status) = (if let Some(objective) = objective {
|
||||
let existing_goal = state_db
|
||||
.get_thread_goal(thread_id)
|
||||
.await
|
||||
.map_err(|err| invalid_request(err.to_string()))?;
|
||||
if let Some(goal) = existing_goal.as_ref().filter(|goal| {
|
||||
goal.objective == objective
|
||||
&& goal.status != codex_state::ThreadGoalStatus::Complete
|
||||
}) {
|
||||
let previous_status = ExternalGoalPreviousStatus::Existing(goal.status);
|
||||
state_db
|
||||
.update_thread_goal(
|
||||
thread_id,
|
||||
codex_state::ThreadGoalUpdate {
|
||||
status,
|
||||
token_budget: params.token_budget,
|
||||
expected_goal_id: Some(goal.goal_id.clone()),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.and_then(|goal| {
|
||||
goal.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"cannot update goal for thread {thread_id}: no goal exists"
|
||||
let goal = if let Some(objective) = objective {
|
||||
match state_db.get_thread_goal(thread_id).await {
|
||||
Ok(goal) => {
|
||||
if let Some(goal) = goal.as_ref().filter(|goal| {
|
||||
goal.objective == objective
|
||||
&& goal.status != codex_state::ThreadGoalStatus::Complete
|
||||
}) {
|
||||
state_db
|
||||
.update_thread_goal(
|
||||
thread_id,
|
||||
codex_state::ThreadGoalUpdate {
|
||||
status,
|
||||
token_budget: params.token_budget,
|
||||
expected_goal_id: Some(goal.goal_id.clone()),
|
||||
},
|
||||
)
|
||||
})
|
||||
})
|
||||
.map(|goal| (goal, previous_status))
|
||||
} else {
|
||||
let previous_status = ExternalGoalPreviousStatus::NewGoal;
|
||||
state_db
|
||||
.replace_thread_goal(
|
||||
thread_id,
|
||||
objective,
|
||||
status.unwrap_or(codex_state::ThreadGoalStatus::Active),
|
||||
params.token_budget.flatten(),
|
||||
)
|
||||
.await
|
||||
.map(|goal| (goal, previous_status))
|
||||
.await
|
||||
.and_then(|goal| {
|
||||
goal.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"cannot update goal for thread {thread_id}: no goal exists"
|
||||
)
|
||||
})
|
||||
})
|
||||
} else {
|
||||
state_db
|
||||
.replace_thread_goal(
|
||||
thread_id,
|
||||
objective,
|
||||
status.unwrap_or(codex_state::ThreadGoalStatus::Active),
|
||||
params.token_budget.flatten(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
} else {
|
||||
let existing_goal = state_db
|
||||
.get_thread_goal(thread_id)
|
||||
.await
|
||||
.map_err(|err| invalid_request(err.to_string()))?;
|
||||
let Some(existing_goal) = existing_goal else {
|
||||
return Err(invalid_request(format!(
|
||||
"cannot update goal for thread {thread_id}: no goal exists"
|
||||
)));
|
||||
};
|
||||
let previous_status = ExternalGoalPreviousStatus::Existing(existing_goal.status);
|
||||
state_db
|
||||
.update_thread_goal(
|
||||
thread_id,
|
||||
@@ -214,13 +156,17 @@ impl ThreadGoalRequestProcessor {
|
||||
anyhow::anyhow!("cannot update goal for thread {thread_id}: no goal exists")
|
||||
})
|
||||
})
|
||||
.map(|goal| (goal, previous_status))
|
||||
})
|
||||
.map_err(|err| invalid_request(err.to_string()))?;
|
||||
let external_goal_set = ExternalGoalSet {
|
||||
goal: goal.clone(),
|
||||
previous_status,
|
||||
};
|
||||
|
||||
let goal = match goal {
|
||||
Ok(goal) => goal,
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(request_id, err.to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let goal_status = goal.status;
|
||||
let goal = api_thread_goal_from_state(goal);
|
||||
self.outgoing
|
||||
.send_response(
|
||||
@@ -231,57 +177,109 @@ impl ThreadGoalRequestProcessor {
|
||||
self.emit_thread_goal_updated_ordered(thread_id, goal, listener_command_tx)
|
||||
.await;
|
||||
if let Some(thread) = running_thread.as_ref() {
|
||||
thread.apply_external_goal_set(external_goal_set).await;
|
||||
thread.apply_external_goal_set(goal_status).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn thread_goal_get_inner(
|
||||
pub(super) async fn thread_goal_get(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalGetParams,
|
||||
) -> Result<ThreadGoalGetResponse, JSONRPCErrorError> {
|
||||
) {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
return Err(invalid_request("goals feature is disabled"));
|
||||
self.send_invalid_request_error(request_id, "goals feature is disabled".to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
let thread_id = parse_thread_id_for_request(params.thread_id.as_str())?;
|
||||
let state_db = self.state_db_for_materialized_thread(thread_id).await?;
|
||||
let goal = state_db
|
||||
.get_thread_goal(thread_id)
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to read thread goal: {err}")))?
|
||||
.map(api_thread_goal_from_state);
|
||||
Ok(ThreadGoalGetResponse { goal })
|
||||
let thread_id = match parse_thread_id_for_request(params.thread_id.as_str()) {
|
||||
Ok(thread_id) => thread_id,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let goal = match state_db.get_thread_goal(thread_id).await {
|
||||
Ok(goal) => goal.map(api_thread_goal_from_state),
|
||||
Err(err) => {
|
||||
self.send_internal_error(request_id, format!("failed to read thread goal: {err}"))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
self.outgoing
|
||||
.send_response(request_id, ThreadGoalGetResponse { goal })
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn thread_goal_clear_inner(
|
||||
pub(super) async fn thread_goal_clear(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalClearParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
) {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
return Err(invalid_request("goals feature is disabled"));
|
||||
self.send_invalid_request_error(request_id, "goals feature is disabled".to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
let thread_id = parse_thread_id_for_request(params.thread_id.as_str())?;
|
||||
let state_db = self.state_db_for_materialized_thread(thread_id).await?;
|
||||
let thread_id = match parse_thread_id_for_request(params.thread_id.as_str()) {
|
||||
Ok(thread_id) => thread_id,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let running_thread = self.thread_manager.get_thread(thread_id).await.ok();
|
||||
let rollout_path = match running_thread.as_ref() {
|
||||
Some(thread) => thread.rollout_path().ok_or_else(|| {
|
||||
invalid_request(format!(
|
||||
"ephemeral thread does not support goals: {thread_id}"
|
||||
))
|
||||
})?,
|
||||
None => find_thread_path_by_id_str(
|
||||
&self.config.codex_home,
|
||||
&thread_id.to_string(),
|
||||
self.state_db.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to locate thread id {thread_id}: {err}"))
|
||||
})?
|
||||
.ok_or_else(|| invalid_request(format!("thread not found: {thread_id}")))?,
|
||||
Some(thread) => match thread.rollout_path() {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("ephemeral thread does not support goals: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string())
|
||||
.await
|
||||
{
|
||||
Ok(Some(path)) => path,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("thread not found: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
reconcile_rollout(
|
||||
Some(&state_db),
|
||||
@@ -303,10 +301,14 @@ impl ThreadGoalRequestProcessor {
|
||||
let thread_state = thread_state.lock().await;
|
||||
thread_state.listener_command_tx()
|
||||
};
|
||||
let cleared = state_db
|
||||
.delete_thread_goal(thread_id)
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to clear thread goal: {err}")))?;
|
||||
let cleared = match state_db.delete_thread_goal(thread_id).await {
|
||||
Ok(cleared) => cleared,
|
||||
Err(err) => {
|
||||
self.send_internal_error(request_id, format!("failed to clear thread goal: {err}"))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if cleared && let Some(thread) = running_thread.as_ref() {
|
||||
thread.apply_external_goal_clear().await;
|
||||
@@ -319,7 +321,6 @@ impl ThreadGoalRequestProcessor {
|
||||
self.emit_thread_goal_cleared_ordered(thread_id, listener_command_tx)
|
||||
.await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn state_db_for_materialized_thread(
|
||||
@@ -336,24 +337,26 @@ impl ThreadGoalRequestProcessor {
|
||||
return Ok(state_db);
|
||||
}
|
||||
} else {
|
||||
find_thread_path_by_id_str(
|
||||
&self.config.codex_home,
|
||||
&thread_id.to_string(),
|
||||
self.state_db.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to locate thread id {thread_id}: {err}"))
|
||||
})?
|
||||
.ok_or_else(|| invalid_request(format!("thread not found: {thread_id}")))?;
|
||||
match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string()).await
|
||||
{
|
||||
Ok(Some(_)) => {}
|
||||
Ok(None) => {
|
||||
return Err(invalid_request(format!("thread not found: {thread_id}")));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(internal_error(format!(
|
||||
"failed to locate thread id {thread_id}: {err}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.state_db
|
||||
.clone()
|
||||
open_state_db_for_direct_thread_lookup(&self.config)
|
||||
.await
|
||||
.ok_or_else(|| internal_error("sqlite state db unavailable for thread goals"))
|
||||
}
|
||||
|
||||
async fn emit_thread_goal_snapshot(&self, thread_id: ThreadId) {
|
||||
pub(super) async fn emit_thread_goal_snapshot(&self, thread_id: ThreadId) {
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(err) => {
|
||||
@@ -474,8 +477,3 @@ pub(super) fn api_thread_goal_from_state(goal: codex_state::ThreadGoal) -> Threa
|
||||
updated_at: goal.updated_at.timestamp(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_thread_id_for_request(thread_id: &str) -> Result<ThreadId, JSONRPCErrorError> {
|
||||
ThreadId::from_string(thread_id)
|
||||
.map_err(|err| invalid_request(format!("invalid thread id: {err}")))
|
||||
}
|
||||
@@ -9,6 +9,7 @@
|
||||
//! the time the `TokenCount` was persisted so the notification still targets the
|
||||
//! corresponding rebuilt turn.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
@@ -23,6 +24,7 @@ use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
|
||||
use crate::codex_message_processor::read_rollout_items_from_rollout;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
|
||||
@@ -30,7 +32,7 @@ use crate::outgoing_message::OutgoingMessageSender;
|
||||
///
|
||||
/// This is lifecycle replay rather than a model event: the rollout already contains
|
||||
/// the original `TokenCount`, and emitting through `send_event` here would duplicate
|
||||
/// persisted usage records. Keeping replay connection-scoped also avoids
|
||||
/// persisted usage records. Keeping this helper connection-scoped also avoids
|
||||
/// surprising other subscribers with a historical usage update while they may be
|
||||
/// rendering live turn events.
|
||||
pub(super) async fn send_thread_token_usage_update_to_connection(
|
||||
@@ -57,6 +59,19 @@ pub(super) async fn send_thread_token_usage_update_to_connection(
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(super) async fn latest_token_usage_turn_id_for_thread_path(thread: &Thread) -> Option<String> {
|
||||
let rollout_path = thread.path.as_deref()?;
|
||||
latest_token_usage_turn_id_from_rollout_path(rollout_path, thread.turns.as_slice()).await
|
||||
}
|
||||
|
||||
pub(super) async fn latest_token_usage_turn_id_from_rollout_path(
|
||||
rollout_path: &Path,
|
||||
turns: &[Turn],
|
||||
) -> Option<String> {
|
||||
let rollout_items = read_rollout_items_from_rollout(rollout_path).await.ok()?;
|
||||
latest_token_usage_turn_id_from_rollout_items(&rollout_items, turns)
|
||||
}
|
||||
|
||||
/// Identifies the turn that was active when a `TokenCount` record appeared.
|
||||
///
|
||||
/// The id is preferred when it still appears in the rebuilt thread. The position is a
|
||||
@@ -112,62 +127,3 @@ fn latest_token_usage_turn_id(thread: &Thread) -> String {
|
||||
.map(|turn| turn.id.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_app_server_protocol::build_turns_from_rollout_items;
|
||||
use codex_protocol::protocol::AgentMessageEvent;
|
||||
use codex_protocol::protocol::TokenCountEvent;
|
||||
use codex_protocol::protocol::UserMessageEvent;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn replay_attribution_uses_already_loaded_history() {
|
||||
let rollout_items = token_usage_history();
|
||||
let turns = build_turns_from_rollout_items(&rollout_items);
|
||||
|
||||
assert_eq!(
|
||||
latest_token_usage_turn_id_from_rollout_items(&rollout_items, turns.as_slice()),
|
||||
Some(turns[0].id.clone())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replay_attribution_falls_back_to_rebuilt_turn_position() {
|
||||
let rollout_items = token_usage_history();
|
||||
let mut turns = build_turns_from_rollout_items(&rollout_items);
|
||||
turns[0].id = "rebuilt-turn-id".to_string();
|
||||
|
||||
assert_eq!(
|
||||
latest_token_usage_turn_id_from_rollout_items(&rollout_items, turns.as_slice()),
|
||||
Some("rebuilt-turn-id".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
fn token_usage_history() -> Vec<RolloutItem> {
|
||||
vec![
|
||||
RolloutItem::EventMsg(EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "first turn".to_string(),
|
||||
images: None,
|
||||
local_images: Vec::new(),
|
||||
text_elements: Vec::new(),
|
||||
})),
|
||||
RolloutItem::EventMsg(EventMsg::AgentMessage(AgentMessageEvent {
|
||||
message: "first answer".to_string(),
|
||||
phase: None,
|
||||
memory_citation: None,
|
||||
})),
|
||||
RolloutItem::EventMsg(EventMsg::TokenCount(TokenCountEvent {
|
||||
info: None,
|
||||
rate_limits: None,
|
||||
})),
|
||||
RolloutItem::EventMsg(EventMsg::UserMessage(UserMessageEvent {
|
||||
message: "second turn".to_string(),
|
||||
images: None,
|
||||
local_images: Vec::new(),
|
||||
text_elements: Vec::new(),
|
||||
})),
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -477,7 +477,7 @@ async fn run_command(params: RunCommandParams) {
|
||||
});
|
||||
let stderr_handle = spawn_process_output(SpawnProcessOutputParams {
|
||||
connection_id: request_id.connection_id,
|
||||
process_id: process_id.clone(),
|
||||
process_id,
|
||||
output_rx: stderr_rx,
|
||||
stdio_timeout_rx,
|
||||
outgoing: Arc::clone(&outgoing),
|
||||
|
||||
874
codex-rs/app-server/src/config_api.rs
Normal file
874
codex-rs/app-server/src/config_api.rs
Normal file
@@ -0,0 +1,874 @@
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::config_manager_service::ConfigManagerError;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use async_trait::async_trait;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigReadResponse;
|
||||
use codex_app_server_protocol::ConfigRequirements;
|
||||
use codex_app_server_protocol::ConfigRequirementsReadResponse;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::ConfiguredHookHandler;
|
||||
use codex_app_server_protocol::ConfiguredHookMatcherGroup;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ManagedHooksRequirements;
|
||||
use codex_app_server_protocol::NetworkDomainPermission;
|
||||
use codex_app_server_protocol::NetworkRequirements;
|
||||
use codex_app_server_protocol::NetworkUnixSocketPermission;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_config::ConfigRequirementsToml;
|
||||
use codex_config::HookEventsToml;
|
||||
use codex_config::HookHandlerConfig as CoreHookHandlerConfig;
|
||||
use codex_config::ManagedHooksRequirementsToml;
|
||||
use codex_config::MatcherGroup as CoreMatcherGroup;
|
||||
use codex_config::ResidencyRequirement as CoreResidencyRequirement;
|
||||
use codex_config::SandboxModeRequirement as CoreSandboxModeRequirement;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core_plugins::loader::installed_plugin_telemetry_metadata;
|
||||
use codex_core_plugins::toggles::collect_plugin_enabled_candidates;
|
||||
use codex_features::canonical_feature_for_key;
|
||||
use codex_features::feature_for_key;
|
||||
use codex_plugin::PluginId;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::protocol::Op;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tracing::warn;
|
||||
|
||||
const SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT: &[&str] = &[
|
||||
"apps",
|
||||
"memories",
|
||||
"plugins",
|
||||
"remote_control",
|
||||
"tool_search",
|
||||
"tool_suggest",
|
||||
"tool_call_mcp_elicitation",
|
||||
];
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait UserConfigReloader: Send + Sync {
|
||||
async fn reload_user_config(&self);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserConfigReloader for ThreadManager {
|
||||
async fn reload_user_config(&self) {
|
||||
let thread_ids = self.list_thread_ids().await;
|
||||
for thread_id in thread_ids {
|
||||
let Ok(thread) = self.get_thread(thread_id).await else {
|
||||
continue;
|
||||
};
|
||||
if let Err(err) = thread.submit(Op::ReloadUserConfig).await {
|
||||
warn!("failed to request user config reload: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigApi {
|
||||
config_manager: ConfigManager,
|
||||
user_config_reloader: Arc<dyn UserConfigReloader>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(
|
||||
config_manager: ConfigManager,
|
||||
user_config_reloader: Arc<dyn UserConfigReloader>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
) -> Self {
|
||||
Self {
|
||||
config_manager,
|
||||
user_config_reloader,
|
||||
analytics_events_client,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!(
|
||||
"failed to resolve feature override precedence: {err}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn read(
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
|
||||
let fallback_cwd = params.cwd.as_ref().map(PathBuf::from);
|
||||
let mut response = self.config_manager.read(params).await.map_err(map_error)?;
|
||||
let config = self.load_latest_config(fallback_cwd).await?;
|
||||
for feature_key in SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT {
|
||||
let Some(feature) = feature_for_key(feature_key) else {
|
||||
continue;
|
||||
};
|
||||
let features = response
|
||||
.config
|
||||
.additional
|
||||
.entry("features".to_string())
|
||||
.or_insert_with(|| json!({}));
|
||||
if !features.is_object() {
|
||||
*features = json!({});
|
||||
}
|
||||
if let Some(features) = features.as_object_mut() {
|
||||
features.insert(
|
||||
(*feature_key).to_string(),
|
||||
json!(config.features.enabled(feature)),
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn config_requirements_read(
|
||||
&self,
|
||||
) -> Result<ConfigRequirementsReadResponse, JSONRPCErrorError> {
|
||||
let requirements = self
|
||||
.config_manager
|
||||
.read_requirements()
|
||||
.await
|
||||
.map_err(map_error)?
|
||||
.map(map_requirements_toml_to_api);
|
||||
|
||||
Ok(ConfigRequirementsReadResponse { requirements })
|
||||
}
|
||||
|
||||
pub(crate) async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let pending_changes =
|
||||
collect_plugin_enabled_candidates([(¶ms.key_path, ¶ms.value)].into_iter());
|
||||
let response = self
|
||||
.config_manager
|
||||
.write_value(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn batch_write(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let reload_user_config = params.reload_user_config;
|
||||
let pending_changes = collect_plugin_enabled_candidates(
|
||||
params
|
||||
.edits
|
||||
.iter()
|
||||
.map(|edit| (&edit.key_path, &edit.value)),
|
||||
);
|
||||
let response = self
|
||||
.config_manager
|
||||
.batch_write(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
if reload_user_config {
|
||||
self.user_config_reloader.reload_user_config().await;
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn set_experimental_feature_enablement(
|
||||
&self,
|
||||
params: ExperimentalFeatureEnablementSetParams,
|
||||
) -> Result<ExperimentalFeatureEnablementSetResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureEnablementSetParams { enablement } = params;
|
||||
for key in enablement.keys() {
|
||||
if canonical_feature_for_key(key).is_some() {
|
||||
if SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.contains(&key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Err(invalid_request(format!(
|
||||
"unsupported feature enablement `{key}`: currently supported features are {}",
|
||||
SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.join(", ")
|
||||
)));
|
||||
}
|
||||
|
||||
let message = if let Some(feature) = feature_for_key(key) {
|
||||
format!(
|
||||
"invalid feature enablement `{key}`: use canonical feature key `{}`",
|
||||
feature.key()
|
||||
)
|
||||
} else {
|
||||
format!("invalid feature enablement `{key}`")
|
||||
};
|
||||
return Err(invalid_request(message));
|
||||
}
|
||||
|
||||
if enablement.is_empty() {
|
||||
return Ok(ExperimentalFeatureEnablementSetResponse { enablement });
|
||||
}
|
||||
|
||||
self.config_manager
|
||||
.extend_runtime_feature_enablement(
|
||||
enablement
|
||||
.iter()
|
||||
.map(|(name, enabled)| (name.clone(), *enabled)),
|
||||
)
|
||||
.map_err(|_| internal_error("failed to update feature enablement"))?;
|
||||
|
||||
self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
self.user_config_reloader.reload_user_config().await;
|
||||
|
||||
Ok(ExperimentalFeatureEnablementSetResponse { enablement })
|
||||
}
|
||||
|
||||
async fn emit_plugin_toggle_events(
|
||||
&self,
|
||||
pending_changes: std::collections::BTreeMap<String, bool>,
|
||||
) {
|
||||
for (plugin_id, enabled) in pending_changes {
|
||||
let Ok(plugin_id) = PluginId::parse(&plugin_id) else {
|
||||
continue;
|
||||
};
|
||||
let metadata =
|
||||
installed_plugin_telemetry_metadata(self.config_manager.codex_home(), &plugin_id)
|
||||
.await;
|
||||
if enabled {
|
||||
self.analytics_events_client.track_plugin_enabled(metadata);
|
||||
} else {
|
||||
self.analytics_events_client.track_plugin_disabled(metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigRequirements {
|
||||
ConfigRequirements {
|
||||
allowed_approval_policies: requirements.allowed_approval_policies.map(|policies| {
|
||||
policies
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::AskForApproval::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_approvals_reviewers: requirements.allowed_approvals_reviewers.map(|reviewers| {
|
||||
reviewers
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::ApprovalsReviewer::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_sandbox_modes: requirements.allowed_sandbox_modes.map(|modes| {
|
||||
modes
|
||||
.into_iter()
|
||||
.filter_map(map_sandbox_mode_requirement_to_api)
|
||||
.collect()
|
||||
}),
|
||||
allowed_web_search_modes: requirements.allowed_web_search_modes.map(|modes| {
|
||||
let mut normalized = modes
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<WebSearchMode>>();
|
||||
if !normalized.contains(&WebSearchMode::Disabled) {
|
||||
normalized.push(WebSearchMode::Disabled);
|
||||
}
|
||||
normalized
|
||||
}),
|
||||
feature_requirements: requirements
|
||||
.feature_requirements
|
||||
.map(|requirements| requirements.entries),
|
||||
hooks: requirements.hooks.map(map_hooks_requirements_to_api),
|
||||
enforce_residency: requirements
|
||||
.enforce_residency
|
||||
.map(map_residency_requirement_to_api),
|
||||
network: requirements.network.map(map_network_requirements_to_api),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hooks_requirements_to_api(hooks: ManagedHooksRequirementsToml) -> ManagedHooksRequirements {
|
||||
let ManagedHooksRequirementsToml {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
hooks,
|
||||
} = hooks;
|
||||
let HookEventsToml {
|
||||
pre_tool_use,
|
||||
permission_request,
|
||||
post_tool_use,
|
||||
session_start,
|
||||
user_prompt_submit,
|
||||
stop,
|
||||
} = hooks;
|
||||
|
||||
ManagedHooksRequirements {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
pre_tool_use: map_hook_matcher_groups_to_api(pre_tool_use),
|
||||
permission_request: map_hook_matcher_groups_to_api(permission_request),
|
||||
post_tool_use: map_hook_matcher_groups_to_api(post_tool_use),
|
||||
session_start: map_hook_matcher_groups_to_api(session_start),
|
||||
user_prompt_submit: map_hook_matcher_groups_to_api(user_prompt_submit),
|
||||
stop: map_hook_matcher_groups_to_api(stop),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_matcher_groups_to_api(
|
||||
groups: Vec<CoreMatcherGroup>,
|
||||
) -> Vec<ConfiguredHookMatcherGroup> {
|
||||
groups
|
||||
.into_iter()
|
||||
.map(map_hook_matcher_group_to_api)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn map_hook_matcher_group_to_api(group: CoreMatcherGroup) -> ConfiguredHookMatcherGroup {
|
||||
ConfiguredHookMatcherGroup {
|
||||
matcher: group.matcher,
|
||||
hooks: group
|
||||
.hooks
|
||||
.into_iter()
|
||||
.map(map_hook_handler_to_api)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_handler_to_api(handler: CoreHookHandlerConfig) -> ConfiguredHookHandler {
|
||||
match handler {
|
||||
CoreHookHandlerConfig::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
} => ConfiguredHookHandler::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
},
|
||||
CoreHookHandlerConfig::Prompt {} => ConfiguredHookHandler::Prompt {},
|
||||
CoreHookHandlerConfig::Agent {} => ConfiguredHookHandler::Agent {},
|
||||
}
|
||||
}
|
||||
|
||||
fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Option<SandboxMode> {
|
||||
match mode {
|
||||
CoreSandboxModeRequirement::ReadOnly => Some(SandboxMode::ReadOnly),
|
||||
CoreSandboxModeRequirement::WorkspaceWrite => Some(SandboxMode::WorkspaceWrite),
|
||||
CoreSandboxModeRequirement::DangerFullAccess => Some(SandboxMode::DangerFullAccess),
|
||||
CoreSandboxModeRequirement::ExternalSandbox => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_residency_requirement_to_api(
|
||||
residency: CoreResidencyRequirement,
|
||||
) -> codex_app_server_protocol::ResidencyRequirement {
|
||||
match residency {
|
||||
CoreResidencyRequirement::Us => codex_app_server_protocol::ResidencyRequirement::Us,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_requirements_to_api(
|
||||
network: codex_config::NetworkRequirementsToml,
|
||||
) -> NetworkRequirements {
|
||||
let allowed_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains);
|
||||
let denied_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::denied_domains);
|
||||
let allow_unix_sockets = network
|
||||
.unix_sockets
|
||||
.as_ref()
|
||||
.map(codex_config::NetworkUnixSocketPermissionsToml::allow_unix_sockets)
|
||||
.filter(|entries| !entries.is_empty());
|
||||
|
||||
NetworkRequirements {
|
||||
enabled: network.enabled,
|
||||
http_port: network.http_port,
|
||||
socks_port: network.socks_port,
|
||||
allow_upstream_proxy: network.allow_upstream_proxy,
|
||||
dangerously_allow_non_loopback_proxy: network.dangerously_allow_non_loopback_proxy,
|
||||
dangerously_allow_all_unix_sockets: network.dangerously_allow_all_unix_sockets,
|
||||
domains: network.domains.map(|domains| {
|
||||
domains
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(pattern, permission)| {
|
||||
(pattern, map_network_domain_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
managed_allowed_domains_only: network.managed_allowed_domains_only,
|
||||
allowed_domains,
|
||||
denied_domains,
|
||||
unix_sockets: network.unix_sockets.map(|unix_sockets| {
|
||||
unix_sockets
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(path, permission)| {
|
||||
(path, map_network_unix_socket_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
allow_unix_sockets,
|
||||
allow_local_binding: network.allow_local_binding,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_domain_permission_to_api(
|
||||
permission: codex_config::NetworkDomainPermissionToml,
|
||||
) -> NetworkDomainPermission {
|
||||
match permission {
|
||||
codex_config::NetworkDomainPermissionToml::Allow => NetworkDomainPermission::Allow,
|
||||
codex_config::NetworkDomainPermissionToml::Deny => NetworkDomainPermission::Deny,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_unix_socket_permission_to_api(
|
||||
permission: codex_config::NetworkUnixSocketPermissionToml,
|
||||
) -> NetworkUnixSocketPermission {
|
||||
match permission {
|
||||
codex_config::NetworkUnixSocketPermissionToml::Allow => NetworkUnixSocketPermission::Allow,
|
||||
codex_config::NetworkUnixSocketPermissionToml::None => NetworkUnixSocketPermission::None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigManagerError) -> JSONRPCErrorError {
|
||||
if let Some(code) = err.write_error_code() {
|
||||
return config_write_error(code, err.to_string());
|
||||
}
|
||||
|
||||
internal_error(err.to_string())
|
||||
}
|
||||
|
||||
fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: Some(json!({
|
||||
"config_write_error_code": code,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_manager::apply_runtime_feature_enablement;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_config::CloudRequirementsLoader;
|
||||
use codex_config::LoaderOverrides;
|
||||
use codex_config::NetworkDomainPermissionToml as CoreNetworkDomainPermissionToml;
|
||||
use codex_config::NetworkDomainPermissionsToml as CoreNetworkDomainPermissionsToml;
|
||||
use codex_config::NetworkRequirementsToml as CoreNetworkRequirementsToml;
|
||||
use codex_config::NetworkUnixSocketPermissionToml as CoreNetworkUnixSocketPermissionToml;
|
||||
use codex_config::NetworkUnixSocketPermissionsToml as CoreNetworkUnixSocketPermissionsToml;
|
||||
use codex_features::Feature;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_protocol::config_types::ApprovalsReviewer as CoreApprovalsReviewer;
|
||||
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tempfile::TempDir;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[derive(Default)]
|
||||
struct RecordingUserConfigReloader {
|
||||
call_count: AtomicUsize,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserConfigReloader for RecordingUserConfigReloader {
|
||||
async fn reload_user_config(&self) {
|
||||
self.call_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_converts_core_enums() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: Some(vec![
|
||||
CoreAskForApproval::Never,
|
||||
CoreAskForApproval::OnRequest,
|
||||
]),
|
||||
allowed_approvals_reviewers: Some(vec![
|
||||
CoreApprovalsReviewer::User,
|
||||
CoreApprovalsReviewer::AutoReview,
|
||||
]),
|
||||
allowed_sandbox_modes: Some(vec![
|
||||
CoreSandboxModeRequirement::ReadOnly,
|
||||
CoreSandboxModeRequirement::ExternalSandbox,
|
||||
]),
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: Some(vec![codex_config::WebSearchModeRequirement::Cached]),
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: Some(codex_config::FeatureRequirementsToml {
|
||||
entries: std::collections::BTreeMap::from([
|
||||
("apps".to_string(), false),
|
||||
("personality".to_string(), true),
|
||||
]),
|
||||
}),
|
||||
hooks: Some(ManagedHooksRequirementsToml {
|
||||
managed_dir: Some(PathBuf::from("/enterprise/hooks")),
|
||||
windows_managed_dir: Some(PathBuf::from(r"C:\enterprise\hooks")),
|
||||
hooks: HookEventsToml {
|
||||
pre_tool_use: vec![CoreMatcherGroup {
|
||||
matcher: Some("^Bash$".to_string()),
|
||||
hooks: vec![CoreHookHandlerConfig::Command {
|
||||
command: "python3 /enterprise/hooks/pre.py".to_string(),
|
||||
timeout_sec: Some(10),
|
||||
r#async: false,
|
||||
status_message: Some("checking".to_string()),
|
||||
}],
|
||||
}],
|
||||
..Default::default()
|
||||
},
|
||||
}),
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: Some(CoreResidencyRequirement::Us),
|
||||
network: Some(CoreNetworkRequirementsToml {
|
||||
enabled: Some(true),
|
||||
http_port: Some(8080),
|
||||
socks_port: Some(1080),
|
||||
allow_upstream_proxy: Some(false),
|
||||
dangerously_allow_non_loopback_proxy: Some(false),
|
||||
dangerously_allow_all_unix_sockets: Some(true),
|
||||
domains: Some(CoreNetworkDomainPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([
|
||||
(
|
||||
"api.openai.com".to_string(),
|
||||
CoreNetworkDomainPermissionToml::Allow,
|
||||
),
|
||||
(
|
||||
"example.com".to_string(),
|
||||
CoreNetworkDomainPermissionToml::Deny,
|
||||
),
|
||||
]),
|
||||
}),
|
||||
managed_allowed_domains_only: Some(false),
|
||||
unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([(
|
||||
"/tmp/proxy.sock".to_string(),
|
||||
CoreNetworkUnixSocketPermissionToml::Allow,
|
||||
)]),
|
||||
}),
|
||||
allow_local_binding: Some(true),
|
||||
}),
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.allowed_approval_policies,
|
||||
Some(vec![
|
||||
codex_app_server_protocol::AskForApproval::Never,
|
||||
codex_app_server_protocol::AskForApproval::OnRequest,
|
||||
])
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_approvals_reviewers,
|
||||
Some(vec![
|
||||
codex_app_server_protocol::ApprovalsReviewer::User,
|
||||
codex_app_server_protocol::ApprovalsReviewer::AutoReview,
|
||||
])
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_sandbox_modes,
|
||||
Some(vec![SandboxMode::ReadOnly]),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_web_search_modes,
|
||||
Some(vec![WebSearchMode::Cached, WebSearchMode::Disabled]),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.feature_requirements,
|
||||
Some(std::collections::BTreeMap::from([
|
||||
("apps".to_string(), false),
|
||||
("personality".to_string(), true),
|
||||
])),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.hooks,
|
||||
Some(ManagedHooksRequirements {
|
||||
managed_dir: Some(PathBuf::from("/enterprise/hooks")),
|
||||
windows_managed_dir: Some(PathBuf::from(r"C:\enterprise\hooks")),
|
||||
pre_tool_use: vec![ConfiguredHookMatcherGroup {
|
||||
matcher: Some("^Bash$".to_string()),
|
||||
hooks: vec![ConfiguredHookHandler::Command {
|
||||
command: "python3 /enterprise/hooks/pre.py".to_string(),
|
||||
timeout_sec: Some(10),
|
||||
r#async: false,
|
||||
status_message: Some("checking".to_string()),
|
||||
}],
|
||||
}],
|
||||
permission_request: Vec::new(),
|
||||
post_tool_use: Vec::new(),
|
||||
session_start: Vec::new(),
|
||||
user_prompt_submit: Vec::new(),
|
||||
stop: Vec::new(),
|
||||
}),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.enforce_residency,
|
||||
Some(codex_app_server_protocol::ResidencyRequirement::Us),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.network,
|
||||
Some(NetworkRequirements {
|
||||
enabled: Some(true),
|
||||
http_port: Some(8080),
|
||||
socks_port: Some(1080),
|
||||
allow_upstream_proxy: Some(false),
|
||||
dangerously_allow_non_loopback_proxy: Some(false),
|
||||
dangerously_allow_all_unix_sockets: Some(true),
|
||||
domains: Some(std::collections::BTreeMap::from([
|
||||
("api.openai.com".to_string(), NetworkDomainPermission::Allow,),
|
||||
("example.com".to_string(), NetworkDomainPermission::Deny),
|
||||
])),
|
||||
managed_allowed_domains_only: Some(false),
|
||||
allowed_domains: Some(vec!["api.openai.com".to_string()]),
|
||||
denied_domains: Some(vec!["example.com".to_string()]),
|
||||
unix_sockets: Some(std::collections::BTreeMap::from([(
|
||||
"/tmp/proxy.sock".to_string(),
|
||||
NetworkUnixSocketPermission::Allow,
|
||||
)])),
|
||||
allow_unix_sockets: Some(vec!["/tmp/proxy.sock".to_string()]),
|
||||
allow_local_binding: Some(true),
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_omits_unix_socket_none_entries_from_legacy_network_fields() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: None,
|
||||
allowed_approvals_reviewers: None,
|
||||
allowed_sandbox_modes: None,
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: None,
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: None,
|
||||
hooks: None,
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: None,
|
||||
network: Some(CoreNetworkRequirementsToml {
|
||||
enabled: None,
|
||||
http_port: None,
|
||||
socks_port: None,
|
||||
allow_upstream_proxy: None,
|
||||
dangerously_allow_non_loopback_proxy: None,
|
||||
dangerously_allow_all_unix_sockets: None,
|
||||
domains: None,
|
||||
managed_allowed_domains_only: None,
|
||||
unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([(
|
||||
"/tmp/ignored.sock".to_string(),
|
||||
CoreNetworkUnixSocketPermissionToml::None,
|
||||
)]),
|
||||
}),
|
||||
allow_local_binding: None,
|
||||
}),
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.network,
|
||||
Some(NetworkRequirements {
|
||||
enabled: None,
|
||||
http_port: None,
|
||||
socks_port: None,
|
||||
allow_upstream_proxy: None,
|
||||
dangerously_allow_non_loopback_proxy: None,
|
||||
dangerously_allow_all_unix_sockets: None,
|
||||
domains: None,
|
||||
managed_allowed_domains_only: None,
|
||||
allowed_domains: None,
|
||||
denied_domains: None,
|
||||
unix_sockets: Some(std::collections::BTreeMap::from([(
|
||||
"/tmp/ignored.sock".to_string(),
|
||||
NetworkUnixSocketPermission::None,
|
||||
)])),
|
||||
allow_unix_sockets: None,
|
||||
allow_local_binding: None,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_normalizes_allowed_web_search_modes() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: None,
|
||||
allowed_approvals_reviewers: None,
|
||||
allowed_sandbox_modes: None,
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: Some(Vec::new()),
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: None,
|
||||
hooks: None,
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: None,
|
||||
network: None,
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.allowed_web_search_modes,
|
||||
Some(vec![WebSearchMode::Disabled])
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_runtime_feature_enablement_keeps_cli_overrides_above_config_and_runtime() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
std::fs::write(
|
||||
codex_home.path().join("config.toml"),
|
||||
"[features]\napps = false\n",
|
||||
)
|
||||
.expect("write config");
|
||||
|
||||
let mut config = codex_core::config::ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.fallback_cwd(Some(codex_home.path().to_path_buf()))
|
||||
.cli_overrides(vec![(
|
||||
"features.apps".to_string(),
|
||||
TomlValue::Boolean(true),
|
||||
)])
|
||||
.build()
|
||||
.await
|
||||
.expect("load config");
|
||||
|
||||
apply_runtime_feature_enablement(
|
||||
&mut config,
|
||||
&BTreeMap::from([("apps".to_string(), false)]),
|
||||
);
|
||||
|
||||
assert!(config.features.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_runtime_feature_enablement_keeps_cloud_pins_above_cli_and_runtime() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
|
||||
let mut config = codex_core::config::ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.cli_overrides(vec![(
|
||||
"features.apps".to_string(),
|
||||
TomlValue::Boolean(true),
|
||||
)])
|
||||
.cloud_requirements(CloudRequirementsLoader::new(async {
|
||||
Ok(Some(ConfigRequirementsToml {
|
||||
feature_requirements: Some(codex_config::FeatureRequirementsToml {
|
||||
entries: BTreeMap::from([("apps".to_string(), false)]),
|
||||
}),
|
||||
..Default::default()
|
||||
}))
|
||||
}))
|
||||
.build()
|
||||
.await
|
||||
.expect("load config");
|
||||
|
||||
apply_runtime_feature_enablement(
|
||||
&mut config,
|
||||
&BTreeMap::from([("apps".to_string(), true)]),
|
||||
);
|
||||
|
||||
assert!(!config.features.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn batch_write_reloads_user_config_when_requested() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
let user_config_path = codex_home.path().join("config.toml");
|
||||
std::fs::write(&user_config_path, "").expect("write config");
|
||||
let reloader = Arc::new(RecordingUserConfigReloader::default());
|
||||
let analytics_config = Arc::new(
|
||||
codex_core::config::ConfigBuilder::default()
|
||||
.build()
|
||||
.await
|
||||
.expect("load analytics config"),
|
||||
);
|
||||
let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("test"));
|
||||
let config_api = ConfigApi::new(
|
||||
ConfigManager::new(
|
||||
codex_home.path().to_path_buf(),
|
||||
Vec::new(),
|
||||
LoaderOverrides::default(),
|
||||
CloudRequirementsLoader::default(),
|
||||
Arg0DispatchPaths::default(),
|
||||
Arc::new(codex_config::NoopThreadConfigLoader),
|
||||
),
|
||||
reloader.clone(),
|
||||
AnalyticsEventsClient::new(
|
||||
auth_manager,
|
||||
analytics_config
|
||||
.chatgpt_base_url
|
||||
.trim_end_matches('/')
|
||||
.to_string(),
|
||||
analytics_config.analytics_enabled,
|
||||
),
|
||||
);
|
||||
|
||||
let response = config_api
|
||||
.batch_write(ConfigBatchWriteParams {
|
||||
edits: vec![codex_app_server_protocol::ConfigEdit {
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-5"),
|
||||
merge_strategy: codex_app_server_protocol::MergeStrategy::Replace,
|
||||
}],
|
||||
file_path: Some(user_config_path.display().to_string()),
|
||||
expected_version: None,
|
||||
reload_user_config: true,
|
||||
})
|
||||
.await
|
||||
.expect("batch write should succeed");
|
||||
|
||||
assert_eq!(
|
||||
response,
|
||||
ConfigWriteResponse {
|
||||
status: codex_app_server_protocol::WriteStatus::Ok,
|
||||
version: response.version.clone(),
|
||||
file_path: codex_utils_absolute_path::AbsolutePathBuf::try_from(
|
||||
user_config_path.clone()
|
||||
)
|
||||
.expect("absolute config path"),
|
||||
overridden_metadata: None,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_to_string(user_config_path).unwrap(),
|
||||
"model = \"gpt-5\"\n"
|
||||
);
|
||||
assert_eq!(reloader.call_count.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,8 @@
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use async_trait::async_trait;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::DeviceKeyAlgorithm;
|
||||
use codex_app_server_protocol::DeviceKeyCreateParams;
|
||||
use codex_app_server_protocol::DeviceKeyCreateResponse;
|
||||
@@ -35,160 +28,112 @@ use codex_device_key::RemoteControlClientEnrollmentAudience;
|
||||
use codex_device_key::RemoteControlClientEnrollmentSignPayload;
|
||||
use codex_state::DeviceKeyBindingRecord;
|
||||
use codex_state::StateRuntime;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct DeviceKeyRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(crate) struct DeviceKeyApi {
|
||||
store: DeviceKeyStore,
|
||||
}
|
||||
|
||||
impl DeviceKeyRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
state_db: Option<Arc<StateRuntime>>,
|
||||
) -> Self {
|
||||
impl DeviceKeyApi {
|
||||
pub(crate) fn new(sqlite_home: PathBuf, default_provider: String) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
store: DeviceKeyStore::new(Arc::new(StateDeviceKeyBindingStore::new(state_db))),
|
||||
store: DeviceKeyStore::new(Arc::new(StateDeviceKeyBindingStore::new(
|
||||
sqlite_home,
|
||||
default_provider,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create(
|
||||
pub(crate) async fn create(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: DeviceKeyCreateParams,
|
||||
device_key_requests_allowed: bool,
|
||||
) {
|
||||
self.spawn_request(
|
||||
request_id,
|
||||
"device/key/create",
|
||||
device_key_requests_allowed,
|
||||
move |store| async move { create_device_key(store, params).await },
|
||||
);
|
||||
) -> Result<DeviceKeyCreateResponse, JSONRPCErrorError> {
|
||||
let info = self
|
||||
.store
|
||||
.create(DeviceKeyCreateRequest {
|
||||
protection_policy: protection_policy_from_params(params.protection_policy),
|
||||
binding: DeviceKeyBinding {
|
||||
account_user_id: params.account_user_id,
|
||||
client_id: params.client_id,
|
||||
},
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(create_response_from_info(info))
|
||||
}
|
||||
|
||||
pub(crate) fn public(
|
||||
pub(crate) async fn public(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: DeviceKeyPublicParams,
|
||||
device_key_requests_allowed: bool,
|
||||
) {
|
||||
self.spawn_request(
|
||||
request_id,
|
||||
"device/key/public",
|
||||
device_key_requests_allowed,
|
||||
move |store| async move { public_device_key(store, params).await },
|
||||
);
|
||||
) -> Result<DeviceKeyPublicResponse, JSONRPCErrorError> {
|
||||
let info = self
|
||||
.store
|
||||
.get_public(DeviceKeyGetPublicRequest {
|
||||
key_id: params.key_id,
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(public_response_from_info(info))
|
||||
}
|
||||
|
||||
pub(crate) fn sign(
|
||||
pub(crate) async fn sign(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: DeviceKeySignParams,
|
||||
device_key_requests_allowed: bool,
|
||||
) {
|
||||
self.spawn_request(
|
||||
request_id,
|
||||
"device/key/sign",
|
||||
device_key_requests_allowed,
|
||||
move |store| async move { sign_device_key(store, params).await },
|
||||
);
|
||||
) -> Result<DeviceKeySignResponse, JSONRPCErrorError> {
|
||||
let signature = self
|
||||
.store
|
||||
.sign(DeviceKeySignRequest {
|
||||
key_id: params.key_id,
|
||||
payload: payload_from_params(params.payload),
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(DeviceKeySignResponse {
|
||||
signature_der_base64: STANDARD.encode(signature.signature_der),
|
||||
signed_payload_base64: STANDARD.encode(signature.signed_payload),
|
||||
algorithm: algorithm_from_store(signature.algorithm),
|
||||
})
|
||||
}
|
||||
|
||||
fn spawn_request<R, F, Fut>(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
method: &'static str,
|
||||
device_key_requests_allowed: bool,
|
||||
run_request: F,
|
||||
) where
|
||||
R: Into<ClientResponsePayload> + Send + 'static,
|
||||
F: FnOnce(DeviceKeyStore) -> Fut + Send + 'static,
|
||||
Fut: Future<Output = Result<R, JSONRPCErrorError>> + Send + 'static,
|
||||
{
|
||||
let store = self.store.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
tokio::spawn(async move {
|
||||
let result = if !device_key_requests_allowed {
|
||||
Err(invalid_request(format!(
|
||||
"{method} is not available over remote transports"
|
||||
)))
|
||||
} else {
|
||||
run_request(store).await
|
||||
};
|
||||
outgoing.send_result(request_id, result).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_device_key(
|
||||
store: DeviceKeyStore,
|
||||
params: DeviceKeyCreateParams,
|
||||
) -> Result<DeviceKeyCreateResponse, JSONRPCErrorError> {
|
||||
let info = store
|
||||
.create(DeviceKeyCreateRequest {
|
||||
protection_policy: protection_policy_from_params(params.protection_policy),
|
||||
binding: DeviceKeyBinding {
|
||||
account_user_id: params.account_user_id,
|
||||
client_id: params.client_id,
|
||||
},
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(create_response_from_info(info))
|
||||
}
|
||||
|
||||
async fn public_device_key(
|
||||
store: DeviceKeyStore,
|
||||
params: DeviceKeyPublicParams,
|
||||
) -> Result<DeviceKeyPublicResponse, JSONRPCErrorError> {
|
||||
let info = store
|
||||
.get_public(DeviceKeyGetPublicRequest {
|
||||
key_id: params.key_id,
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(public_response_from_info(info))
|
||||
}
|
||||
|
||||
async fn sign_device_key(
|
||||
store: DeviceKeyStore,
|
||||
params: DeviceKeySignParams,
|
||||
) -> Result<DeviceKeySignResponse, JSONRPCErrorError> {
|
||||
let signature = store
|
||||
.sign(DeviceKeySignRequest {
|
||||
key_id: params.key_id,
|
||||
payload: payload_from_params(params.payload),
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(DeviceKeySignResponse {
|
||||
signature_der_base64: STANDARD.encode(signature.signature_der),
|
||||
signed_payload_base64: STANDARD.encode(signature.signed_payload),
|
||||
algorithm: algorithm_from_store(signature.algorithm),
|
||||
})
|
||||
}
|
||||
|
||||
struct StateDeviceKeyBindingStore {
|
||||
state_db: Option<Arc<StateRuntime>>,
|
||||
sqlite_home: PathBuf,
|
||||
default_provider: String,
|
||||
state_db: OnceCell<Arc<StateRuntime>>,
|
||||
}
|
||||
|
||||
impl StateDeviceKeyBindingStore {
|
||||
fn new(state_db: Option<Arc<StateRuntime>>) -> Self {
|
||||
Self { state_db }
|
||||
fn new(sqlite_home: PathBuf, default_provider: String) -> Self {
|
||||
Self {
|
||||
sqlite_home,
|
||||
default_provider,
|
||||
state_db: OnceCell::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn state_db(&self) -> Result<Arc<StateRuntime>, DeviceKeyError> {
|
||||
let sqlite_home = self.sqlite_home.clone();
|
||||
let default_provider = self.default_provider.clone();
|
||||
self.state_db
|
||||
.clone()
|
||||
.ok_or_else(|| DeviceKeyError::Platform("sqlite state db unavailable".to_string()))
|
||||
.get_or_try_init(|| async move {
|
||||
StateRuntime::init(sqlite_home, default_provider)
|
||||
.await
|
||||
.map_err(|err| DeviceKeyError::Platform(err.to_string()))
|
||||
})
|
||||
.await
|
||||
.cloned()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for StateDeviceKeyBindingStore {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("StateDeviceKeyBindingStore")
|
||||
.field("has_state_db", &self.state_db.is_some())
|
||||
.field("sqlite_home", &self.sqlite_home)
|
||||
.field("default_provider", &self.default_provider)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
@@ -1,22 +1,15 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config::external_agent_config::ExternalAgentConfigDetectOptions;
|
||||
use crate::config::external_agent_config::ExternalAgentConfigMigrationItem as CoreMigrationItem;
|
||||
use crate::config::external_agent_config::ExternalAgentConfigMigrationItemType as CoreMigrationItemType;
|
||||
use crate::config::external_agent_config::ExternalAgentConfigService;
|
||||
use crate::config::external_agent_config::NamedMigration as CoreNamedMigration;
|
||||
use crate::config::external_agent_config::PendingPluginImport;
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_params;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_app_server_protocol::CommandMigration;
|
||||
use codex_app_server_protocol::ExternalAgentConfigDetectParams;
|
||||
use codex_app_server_protocol::ExternalAgentConfigDetectResponse;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportCompletedNotification;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportParams;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportResponse;
|
||||
use codex_app_server_protocol::ExternalAgentConfigMigrationItem;
|
||||
use codex_app_server_protocol::ExternalAgentConfigMigrationItemType;
|
||||
use codex_app_server_protocol::HookMigration;
|
||||
@@ -24,55 +17,30 @@ use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::McpServerMigration;
|
||||
use codex_app_server_protocol::MigrationDetails;
|
||||
use codex_app_server_protocol::PluginsMigration;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_core::StartThreadOptions;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_app_server_protocol::SubagentMigration;
|
||||
use codex_external_agent_sessions::ExternalAgentSessionMigration as CoreSessionMigration;
|
||||
use codex_external_agent_sessions::ImportedExternalAgentSession;
|
||||
use codex_external_agent_sessions::PendingSessionImport;
|
||||
use codex_external_agent_sessions::prepare_validated_session_imports;
|
||||
use codex_external_agent_sessions::record_imported_session;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::Op;
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use super::ConfigRequestProcessor;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ExternalAgentConfigRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(crate) struct ExternalAgentConfigApi {
|
||||
codex_home: PathBuf,
|
||||
migration_service: ExternalAgentConfigService,
|
||||
session_import_permits: Arc<Semaphore>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config_manager: ConfigManager,
|
||||
config_processor: ConfigRequestProcessor,
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
}
|
||||
|
||||
impl ExternalAgentConfigRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config_manager: ConfigManager,
|
||||
config_processor: ConfigRequestProcessor,
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
codex_home: PathBuf,
|
||||
) -> Self {
|
||||
impl ExternalAgentConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
migration_service: ExternalAgentConfigService::new(codex_home.clone()),
|
||||
codex_home,
|
||||
session_import_permits: Arc::new(Semaphore::new(1)),
|
||||
thread_manager,
|
||||
config_manager,
|
||||
config_processor,
|
||||
arg0_paths,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,7 +123,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
subagents: details
|
||||
.subagents
|
||||
.into_iter()
|
||||
.map(|subagent| codex_app_server_protocol::SubagentMigration {
|
||||
.map(|subagent| SubagentMigration {
|
||||
name: subagent.name,
|
||||
})
|
||||
.collect(),
|
||||
@@ -170,164 +138,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn import(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ExternalAgentConfigImportParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let needs_runtime_refresh = migration_items_need_runtime_refresh(¶ms.migration_items);
|
||||
let has_migration_items = !params.migration_items.is_empty();
|
||||
let has_plugin_imports = params.migration_items.iter().any(|item| {
|
||||
matches!(
|
||||
item.item_type,
|
||||
ExternalAgentConfigMigrationItemType::Plugins
|
||||
)
|
||||
});
|
||||
let pending_session_imports = self.validate_pending_session_imports(¶ms)?;
|
||||
let pending_plugin_imports = self.import_external_agent_config(params).await?;
|
||||
if needs_runtime_refresh {
|
||||
self.config_processor.handle_config_mutation().await;
|
||||
}
|
||||
self.outgoing
|
||||
.send_response(request_id, ExternalAgentConfigImportResponse {})
|
||||
.await;
|
||||
|
||||
if !has_migration_items {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let has_background_imports =
|
||||
!pending_plugin_imports.is_empty() || !pending_session_imports.is_empty();
|
||||
if !has_background_imports {
|
||||
self.outgoing
|
||||
.send_server_notification(ServerNotification::ExternalAgentConfigImportCompleted(
|
||||
ExternalAgentConfigImportCompletedNotification {},
|
||||
))
|
||||
.await;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let session_import_permits = Arc::clone(&self.session_import_permits);
|
||||
let session_processor = self.clone();
|
||||
let plugin_processor = self.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let thread_manager = Arc::clone(&self.thread_manager);
|
||||
tokio::spawn(async move {
|
||||
let session_imports = async move {
|
||||
if !pending_session_imports.is_empty() {
|
||||
let Ok(_session_import_permit) = session_import_permits.acquire_owned().await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let pending_session_imports = session_processor
|
||||
.prepare_validated_session_imports(pending_session_imports);
|
||||
for pending_session_import in pending_session_imports {
|
||||
match session_processor
|
||||
.import_external_agent_session(pending_session_import.session)
|
||||
.await
|
||||
{
|
||||
Ok(imported_thread_id) => {
|
||||
session_processor.record_imported_session(
|
||||
&pending_session_import.source_path,
|
||||
imported_thread_id,
|
||||
);
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
error = %error.message,
|
||||
path = %pending_session_import.source_path.display(),
|
||||
"external agent session import failed"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let plugin_imports = async move {
|
||||
for pending_plugin_import in pending_plugin_imports {
|
||||
match plugin_processor
|
||||
.complete_pending_plugin_import(pending_plugin_import)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {}
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
error = %error.message,
|
||||
"external agent config plugin import failed"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
tokio::join!(session_imports, plugin_imports);
|
||||
if has_plugin_imports {
|
||||
thread_manager.plugins_manager().clear_cache();
|
||||
thread_manager.skills_manager().clear_cache();
|
||||
}
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ExternalAgentConfigImportCompleted(
|
||||
ExternalAgentConfigImportCompletedNotification {},
|
||||
))
|
||||
.await;
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn import_external_agent_session(
|
||||
&self,
|
||||
session: ImportedExternalAgentSession,
|
||||
) -> Result<ThreadId, JSONRPCErrorError> {
|
||||
let ImportedExternalAgentSession {
|
||||
cwd,
|
||||
title,
|
||||
rollout_items,
|
||||
} = session;
|
||||
let config = self
|
||||
.config_manager
|
||||
.load_with_overrides(
|
||||
/*request_overrides*/ None,
|
||||
ConfigOverrides {
|
||||
cwd: Some(PathBuf::from(cwd.to_string_lossy().into_owned())),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to load imported session config: {err}"))
|
||||
})?;
|
||||
let environments = self
|
||||
.thread_manager
|
||||
.default_environment_selections(&config.cwd);
|
||||
let imported_thread = self
|
||||
.thread_manager
|
||||
.start_thread_with_options(StartThreadOptions {
|
||||
config,
|
||||
initial_history: InitialHistory::Forked(rollout_items),
|
||||
session_source: None,
|
||||
dynamic_tools: Vec::new(),
|
||||
persist_extended_history: false,
|
||||
metrics_service_name: None,
|
||||
parent_trace: None,
|
||||
environments,
|
||||
})
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to import session: {err}")))?;
|
||||
if let Some(title) = title
|
||||
&& let Some(name) = codex_core::util::normalize_thread_name(&title)
|
||||
{
|
||||
imported_thread
|
||||
.thread
|
||||
.submit(Op::SetThreadName { name })
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to name imported session: {err}")))?;
|
||||
}
|
||||
Ok(imported_thread.thread_id)
|
||||
}
|
||||
|
||||
fn validate_pending_session_imports(
|
||||
pub(crate) fn validate_pending_session_imports(
|
||||
&self,
|
||||
params: &ExternalAgentConfigImportParams,
|
||||
) -> Result<Vec<CoreSessionMigration>, JSONRPCErrorError> {
|
||||
@@ -365,14 +176,22 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
Ok(selected_sessions)
|
||||
}
|
||||
|
||||
fn prepare_validated_session_imports(
|
||||
pub(crate) fn prepare_validated_session_imports(
|
||||
&self,
|
||||
sessions: Vec<CoreSessionMigration>,
|
||||
) -> Vec<PendingSessionImport> {
|
||||
prepare_validated_session_imports(&self.codex_home, sessions)
|
||||
}
|
||||
|
||||
fn record_imported_session(&self, source_path: &std::path::Path, imported_thread_id: ThreadId) {
|
||||
pub(crate) fn session_import_permits(&self) -> Arc<Semaphore> {
|
||||
Arc::clone(&self.session_import_permits)
|
||||
}
|
||||
|
||||
pub(crate) fn record_imported_session(
|
||||
&self,
|
||||
source_path: &std::path::Path,
|
||||
imported_thread_id: ThreadId,
|
||||
) {
|
||||
if let Err(err) = record_imported_session(&self.codex_home, source_path, imported_thread_id)
|
||||
{
|
||||
tracing::warn!(
|
||||
@@ -383,7 +202,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn import_external_agent_config(
|
||||
pub(crate) async fn import(
|
||||
&self,
|
||||
params: ExternalAgentConfigImportParams,
|
||||
) -> Result<Vec<PendingPluginImport>, JSONRPCErrorError> {
|
||||
@@ -478,7 +297,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
.map_err(|err| internal_error(err.to_string()))
|
||||
}
|
||||
|
||||
async fn complete_pending_plugin_import(
|
||||
pub(crate) async fn complete_pending_plugin_import(
|
||||
&self,
|
||||
pending_plugin_import: PendingPluginImport,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
@@ -493,27 +312,9 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn migration_items_need_runtime_refresh(items: &[ExternalAgentConfigMigrationItem]) -> bool {
|
||||
items.iter().any(|item| {
|
||||
matches!(
|
||||
item.item_type,
|
||||
ExternalAgentConfigMigrationItemType::Config
|
||||
| ExternalAgentConfigMigrationItemType::Skills
|
||||
| ExternalAgentConfigMigrationItemType::McpServerConfig
|
||||
| ExternalAgentConfigMigrationItemType::Hooks
|
||||
| ExternalAgentConfigMigrationItemType::Commands
|
||||
| ExternalAgentConfigMigrationItemType::Plugins
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn session_not_detected_error(path: &std::path::Path) -> JSONRPCErrorError {
|
||||
invalid_params(format!(
|
||||
"external agent session was not detected for import: {}",
|
||||
path.display()
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "external_agent_config_processor_tests.rs"]
|
||||
mod external_agent_config_processor_tests;
|
||||
@@ -1,7 +1,5 @@
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::fs_watch::FsWatchManager;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
@@ -17,10 +15,6 @@ use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsReadFileResponse;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsRemoveResponse;
|
||||
use codex_app_server_protocol::FsUnwatchParams;
|
||||
use codex_app_server_protocol::FsUnwatchResponse;
|
||||
use codex_app_server_protocol::FsWatchParams;
|
||||
use codex_app_server_protocol::FsWatchResponse;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
use codex_app_server_protocol::FsWriteFileResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
@@ -32,24 +26,13 @@ use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FsRequestProcessor {
|
||||
pub(crate) struct FsApi {
|
||||
file_system: Arc<dyn ExecutorFileSystem>,
|
||||
fs_watch_manager: FsWatchManager,
|
||||
}
|
||||
|
||||
impl FsRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
file_system: Arc<dyn ExecutorFileSystem>,
|
||||
fs_watch_manager: FsWatchManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
file_system,
|
||||
fs_watch_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn connection_closed(&self, connection_id: ConnectionId) {
|
||||
self.fs_watch_manager.connection_closed(connection_id).await;
|
||||
impl FsApi {
|
||||
pub(crate) fn new(file_system: Arc<dyn ExecutorFileSystem>) -> Self {
|
||||
Self { file_system }
|
||||
}
|
||||
|
||||
pub(crate) async fn read_file(
|
||||
@@ -173,25 +156,9 @@ impl FsRequestProcessor {
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsCopyResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn watch(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
params: FsWatchParams,
|
||||
) -> Result<FsWatchResponse, JSONRPCErrorError> {
|
||||
self.fs_watch_manager.watch(connection_id, params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn unwatch(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
params: FsUnwatchParams,
|
||||
) -> Result<FsUnwatchResponse, JSONRPCErrorError> {
|
||||
self.fs_watch_manager.unwatch(connection_id, params).await
|
||||
}
|
||||
}
|
||||
|
||||
fn map_fs_error(err: io::Error) -> JSONRPCErrorError {
|
||||
pub(crate) fn map_fs_error(err: io::Error) -> JSONRPCErrorError {
|
||||
if err.kind() == io::ErrorKind::InvalidInput {
|
||||
invalid_request(err.to_string())
|
||||
} else {
|
||||
@@ -86,7 +86,6 @@ use codex_exec_server::EnvironmentManager;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_login::AuthManager;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
pub use codex_rollout::StateDbHandle;
|
||||
pub use codex_state::log_db::LogDbLayer;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -127,8 +126,6 @@ pub struct InProcessStartArgs {
|
||||
pub feedback: CodexFeedback,
|
||||
/// SQLite tracing layer used to flush recently emitted logs before feedback upload.
|
||||
pub log_db: Option<LogDbLayer>,
|
||||
/// Process-wide SQLite state handle shared with embedded app-server consumers.
|
||||
pub state_db: Option<StateDbHandle>,
|
||||
/// Environment manager used by core execution and filesystem operations.
|
||||
pub environment_manager: Arc<EnvironmentManager>,
|
||||
/// Startup warnings emitted after initialize succeeds.
|
||||
@@ -254,8 +251,6 @@ pub struct InProcessClientHandle {
|
||||
client: InProcessClientSender,
|
||||
event_rx: mpsc::Receiver<InProcessServerEvent>,
|
||||
runtime_handle: tokio::task::JoinHandle<()>,
|
||||
#[cfg(test)]
|
||||
_test_codex_home: Option<tempfile::TempDir>,
|
||||
}
|
||||
|
||||
impl InProcessClientHandle {
|
||||
@@ -423,7 +418,6 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle {
|
||||
environment_manager: args.environment_manager,
|
||||
feedback: args.feedback,
|
||||
log_db: args.log_db,
|
||||
state_db: args.state_db,
|
||||
config_warnings: args.config_warnings,
|
||||
session_source: args.session_source,
|
||||
auth_manager,
|
||||
@@ -723,8 +717,6 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle {
|
||||
client: InProcessClientSender { client_tx },
|
||||
event_rx,
|
||||
runtime_handle,
|
||||
#[cfg(test)]
|
||||
_test_codex_home: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -746,22 +738,13 @@ mod tests {
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
|
||||
async fn build_test_config(codex_home: &Path) -> Config {
|
||||
match ConfigBuilder::default()
|
||||
.codex_home(codex_home.to_path_buf())
|
||||
.build()
|
||||
.await
|
||||
{
|
||||
async fn build_test_config() -> Config {
|
||||
match ConfigBuilder::default().build().await {
|
||||
Ok(config) => config,
|
||||
Err(_) => Config::load_default_with_cli_overrides_for_codex_home(
|
||||
codex_home.to_path_buf(),
|
||||
Vec::new(),
|
||||
)
|
||||
.await
|
||||
.expect("default config should load"),
|
||||
Err(_) => Config::load_default_with_cli_overrides(Vec::new())
|
||||
.await
|
||||
.expect("default config should load"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,21 +752,15 @@ mod tests {
|
||||
session_source: SessionSource,
|
||||
channel_capacity: usize,
|
||||
) -> InProcessClientHandle {
|
||||
let codex_home = TempDir::new().expect("temp dir");
|
||||
let config = Arc::new(build_test_config(codex_home.path()).await);
|
||||
let state_db = codex_rollout::state_db::try_init(config.as_ref())
|
||||
.await
|
||||
.expect("state db should initialize for in-process test");
|
||||
let args = InProcessStartArgs {
|
||||
arg0_paths: Arg0DispatchPaths::default(),
|
||||
config,
|
||||
config: Arc::new(build_test_config().await),
|
||||
cli_overrides: Vec::new(),
|
||||
loader_overrides: LoaderOverrides::default(),
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
thread_config_loader: Arc::new(codex_config::NoopThreadConfigLoader),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: Some(state_db),
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source,
|
||||
@@ -798,9 +775,7 @@ mod tests {
|
||||
},
|
||||
channel_capacity,
|
||||
};
|
||||
let mut client = start(args).await.expect("in-process runtime should start");
|
||||
client._test_codex_home = Some(codex_home);
|
||||
client
|
||||
start(args).await.expect("in-process runtime should start")
|
||||
}
|
||||
|
||||
async fn start_test_client(session_source: SessionSource) -> InProcessClientHandle {
|
||||
@@ -829,7 +804,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn in_process_allows_device_key_requests_to_reach_device_key_processor() {
|
||||
async fn in_process_allows_device_key_requests_to_reach_device_key_api() {
|
||||
let client = start_test_client(SessionSource::Cli).await;
|
||||
const MALFORMED_KEY_ID_MESSAGE: &str = concat!(
|
||||
"invalid device key payload: keyId must be dk_hse_, dk_tpm_, or dk_osn_ ",
|
||||
|
||||
@@ -54,7 +54,6 @@ use codex_exec_server::EnvironmentManager;
|
||||
use codex_exec_server::ExecServerRuntimePaths;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_rollout::state_db as rollout_state_db;
|
||||
use codex_state::log_db;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -74,21 +73,25 @@ use tracing_subscriber::util::SubscriberInitExt;
|
||||
mod analytics_utils;
|
||||
mod app_server_tracing;
|
||||
mod bespoke_event_handling;
|
||||
mod codex_message_processor;
|
||||
mod command_exec;
|
||||
mod config;
|
||||
mod config_api;
|
||||
mod config_manager;
|
||||
mod config_manager_service;
|
||||
mod connection_rpc_gate;
|
||||
mod device_key_api;
|
||||
mod dynamic_tools;
|
||||
mod error_code;
|
||||
mod external_agent_config_api;
|
||||
mod filters;
|
||||
mod fs_api;
|
||||
mod fs_watch;
|
||||
mod fuzzy_file_search;
|
||||
pub mod in_process;
|
||||
mod message_processor;
|
||||
mod models;
|
||||
mod outgoing_message;
|
||||
mod request_processors;
|
||||
mod request_serialization;
|
||||
mod server_request_error;
|
||||
mod thread_state;
|
||||
@@ -454,6 +457,23 @@ pub async fn run_main_with_transport_options(
|
||||
.await
|
||||
{
|
||||
Ok(config) => {
|
||||
let effective_toml = config.config_layer_stack.effective_config();
|
||||
match effective_toml.try_into() {
|
||||
Ok(config_toml) => {
|
||||
if let Err(err) = codex_core::personality_migration::maybe_migrate_personality(
|
||||
&config.codex_home,
|
||||
&config_toml,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!(error = %err, "Failed to run personality migration");
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(error = %err, "Failed to deserialize config for personality migration");
|
||||
}
|
||||
}
|
||||
|
||||
let discovered_thread_config_loader = configured_thread_config_loader(&config);
|
||||
config_manager
|
||||
.replace_thread_config_loader(Arc::clone(&discovered_thread_config_loader));
|
||||
@@ -467,70 +487,23 @@ pub async fn run_main_with_transport_options(
|
||||
}
|
||||
};
|
||||
let mut config_warnings = Vec::new();
|
||||
let (mut config, should_run_personality_migration) = match config_manager
|
||||
let config = match config_manager
|
||||
.load_latest_config(/*fallback_cwd*/ None)
|
||||
.await
|
||||
{
|
||||
Ok(config) => (config, true),
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let message = config_warning_from_error("Invalid configuration; using defaults.", &err);
|
||||
config_warnings.push(message);
|
||||
(
|
||||
config_manager.load_default_config().await.map_err(|e| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("error loading default config after config error: {e}"),
|
||||
)
|
||||
})?,
|
||||
false,
|
||||
)
|
||||
config_manager.load_default_config().await.map_err(|e| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("error loading default config after config error: {e}"),
|
||||
)
|
||||
})?
|
||||
}
|
||||
};
|
||||
|
||||
let state_db_result = rollout_state_db::try_init(&config).await;
|
||||
let state_db_init_error = state_db_result.as_ref().err().map(ToString::to_string);
|
||||
let state_db = state_db_result.ok();
|
||||
|
||||
if should_run_personality_migration {
|
||||
let effective_toml = config.config_layer_stack.effective_config();
|
||||
match effective_toml.try_into() {
|
||||
Ok(config_toml) => {
|
||||
match codex_core::personality_migration::maybe_migrate_personality(
|
||||
&config.codex_home,
|
||||
&config_toml,
|
||||
state_db.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(codex_core::personality_migration::PersonalityMigrationStatus::Applied) => {
|
||||
config = config_manager
|
||||
.load_latest_config(/*fallback_cwd*/ None)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!(
|
||||
"error reloading config after personality migration: {err}"
|
||||
),
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(
|
||||
codex_core::personality_migration::PersonalityMigrationStatus::SkippedMarker
|
||||
| codex_core::personality_migration::PersonalityMigrationStatus::SkippedExplicitPersonality
|
||||
| codex_core::personality_migration::PersonalityMigrationStatus::SkippedNoSessions,
|
||||
) => {}
|
||||
Err(err) => {
|
||||
warn!(error = %err, "Failed to run personality migration");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(error = %err, "Failed to deserialize config for personality migration");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(Some(err)) = check_execpolicy_for_warnings(&config.config_layer_stack).await {
|
||||
let (path, range) = exec_policy_warning_location(&err);
|
||||
let message = ConfigWarningNotification {
|
||||
@@ -598,6 +571,13 @@ pub async fn run_main_with_transport_options(
|
||||
|
||||
let feedback_layer = feedback.logger_layer();
|
||||
let feedback_metadata_layer = feedback.metadata_layer();
|
||||
let state_db_result = codex_state::StateRuntime::init(
|
||||
config.sqlite_home.clone(),
|
||||
config.model_provider_id.clone(),
|
||||
)
|
||||
.await;
|
||||
let state_db_init_error = state_db_result.as_ref().err().map(ToString::to_string);
|
||||
let state_db = state_db_result.ok();
|
||||
let log_db = state_db.clone().map(log_db::start);
|
||||
let log_db_layer = log_db
|
||||
.clone()
|
||||
@@ -769,7 +749,6 @@ pub async fn run_main_with_transport_options(
|
||||
environment_manager,
|
||||
feedback: feedback.clone(),
|
||||
log_db,
|
||||
state_db: state_db.clone(),
|
||||
config_warnings,
|
||||
session_source,
|
||||
auth_manager,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -290,7 +290,6 @@ async fn build_test_processor(
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::VSCode,
|
||||
auth_manager,
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::Model;
|
||||
use codex_app_server_protocol::ModelServiceTier;
|
||||
use codex_app_server_protocol::ModelUpgradeInfo;
|
||||
use codex_app_server_protocol::ReasoningEffortOption;
|
||||
use codex_core::ThreadManager;
|
||||
@@ -44,15 +43,6 @@ fn model_from_preset(preset: ModelPreset) -> Model {
|
||||
input_modalities: preset.input_modalities,
|
||||
supports_personality: preset.supports_personality,
|
||||
additional_speed_tiers: preset.additional_speed_tiers,
|
||||
service_tiers: preset
|
||||
.service_tiers
|
||||
.into_iter()
|
||||
.map(|service_tier| ModelServiceTier {
|
||||
id: service_tier.id,
|
||||
name: service_tier.name,
|
||||
description: service_tier.description,
|
||||
})
|
||||
.collect(),
|
||||
is_default: preset.is_default,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
use std::sync::atomic::Ordering;
|
||||
@@ -14,6 +15,7 @@ use codex_app_server_protocol::ServerRequestPayload;
|
||||
use codex_otel::span_w3c_trace_context;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::W3cTraceContext;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -24,17 +26,22 @@ use tracing::warn;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::server_request_error::TURN_TRANSITION_PENDING_REQUEST_ERROR_REASON;
|
||||
pub(crate) use codex_app_server_transport::ConnectionId;
|
||||
pub(crate) use codex_app_server_transport::OutgoingError;
|
||||
pub(crate) use codex_app_server_transport::OutgoingMessage;
|
||||
pub(crate) use codex_app_server_transport::OutgoingResponse;
|
||||
pub(crate) use codex_app_server_transport::QueuedOutgoingMessage;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_protocol::account::PlanType;
|
||||
|
||||
pub(crate) type ClientRequestResult = std::result::Result<Result, JSONRPCErrorError>;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub(crate) struct ConnectionId(pub(crate) u64);
|
||||
|
||||
impl fmt::Display for ConnectionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stable identifier for a client request scoped to a transport connection.
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
pub(crate) struct ConnectionRequestId {
|
||||
@@ -89,6 +96,21 @@ pub(crate) enum OutgoingEnvelope {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct QueuedOutgoingMessage {
|
||||
pub(crate) message: OutgoingMessage,
|
||||
pub(crate) write_complete_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl QueuedOutgoingMessage {
|
||||
pub(crate) fn new(message: OutgoingMessage) -> Self {
|
||||
Self {
|
||||
message,
|
||||
write_complete_tx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends messages to the client and manages request callbacks.
|
||||
pub(crate) struct OutgoingMessageSender {
|
||||
next_server_request_id: AtomicI64,
|
||||
@@ -643,6 +665,30 @@ impl OutgoingMessageSender {
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing message from the server to the client.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub(crate) enum OutgoingMessage {
|
||||
Request(ServerRequest),
|
||||
/// AppServerNotification is specific to the case where this is run as an
|
||||
/// "app server" as opposed to an MCP server.
|
||||
AppServerNotification(ServerNotification),
|
||||
Response(OutgoingResponse),
|
||||
Error(OutgoingError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub(crate) struct OutgoingResponse {
|
||||
pub id: RequestId,
|
||||
pub result: Result,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub(crate) struct OutgoingError {
|
||||
pub error: JSONRPCErrorError,
|
||||
pub id: RequestId,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -1,500 +0,0 @@
|
||||
use crate::bespoke_event_handling::apply_bespoke_event_handling;
|
||||
use crate::bespoke_event_handling::maybe_emit_hook_prompt_item_completed;
|
||||
use crate::command_exec::CommandExecManager;
|
||||
use crate::command_exec::StartCommandExecParams;
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::error_code::INPUT_TOO_LARGE_ERROR_CODE;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_PARAMS_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::invalid_params;
|
||||
use crate::models::supported_models;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use crate::outgoing_message::RequestContext;
|
||||
use crate::outgoing_message::ThreadScopedOutgoingMessageSender;
|
||||
use crate::thread_status::ThreadWatchManager;
|
||||
use crate::thread_status::resolve_thread_status;
|
||||
use chrono::DateTime;
|
||||
use chrono::Duration as ChronoDuration;
|
||||
use chrono::SecondsFormat;
|
||||
use chrono::Utc;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_analytics::AnalyticsJsonRpcError;
|
||||
use codex_analytics::InputError;
|
||||
use codex_analytics::TurnSteerRequestError;
|
||||
use codex_app_server_protocol::Account;
|
||||
use codex_app_server_protocol::AccountLoginCompletedNotification;
|
||||
use codex_app_server_protocol::AccountUpdatedNotification;
|
||||
use codex_app_server_protocol::AddCreditsNudgeCreditType;
|
||||
use codex_app_server_protocol::AddCreditsNudgeEmailStatus;
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::AppSummary;
|
||||
use codex_app_server_protocol::AppsListParams;
|
||||
use codex_app_server_protocol::AppsListResponse;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountResponse;
|
||||
use codex_app_server_protocol::CancelLoginAccountStatus;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::CodexErrorInfo;
|
||||
use codex_app_server_protocol::CollaborationModeListParams;
|
||||
use codex_app_server_protocol::CollaborationModeListResponse;
|
||||
use codex_app_server_protocol::CommandExecParams;
|
||||
use codex_app_server_protocol::CommandExecResizeParams;
|
||||
use codex_app_server_protocol::CommandExecTerminateParams;
|
||||
use codex_app_server_protocol::CommandExecWriteParams;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::ConversationGitInfo;
|
||||
use codex_app_server_protocol::ConversationSummary;
|
||||
use codex_app_server_protocol::DynamicToolSpec as ApiDynamicToolSpec;
|
||||
use codex_app_server_protocol::ExperimentalFeature as ApiExperimentalFeature;
|
||||
use codex_app_server_protocol::ExperimentalFeatureListParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureListResponse;
|
||||
use codex_app_server_protocol::ExperimentalFeatureStage as ApiExperimentalFeatureStage;
|
||||
use codex_app_server_protocol::FeedbackUploadParams;
|
||||
use codex_app_server_protocol::FeedbackUploadResponse;
|
||||
use codex_app_server_protocol::GetAccountParams;
|
||||
use codex_app_server_protocol::GetAccountRateLimitsResponse;
|
||||
use codex_app_server_protocol::GetAccountResponse;
|
||||
use codex_app_server_protocol::GetAuthStatusParams;
|
||||
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||
use codex_app_server_protocol::GetConversationSummaryParams;
|
||||
use codex_app_server_protocol::GetConversationSummaryResponse;
|
||||
use codex_app_server_protocol::GitDiffToRemoteParams;
|
||||
use codex_app_server_protocol::GitDiffToRemoteResponse;
|
||||
use codex_app_server_protocol::GitInfo as ApiGitInfo;
|
||||
use codex_app_server_protocol::HookMetadata;
|
||||
use codex_app_server_protocol::HooksListParams;
|
||||
use codex_app_server_protocol::HooksListResponse;
|
||||
use codex_app_server_protocol::InitializeParams;
|
||||
use codex_app_server_protocol::InitializeResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ListMcpServerStatusParams;
|
||||
use codex_app_server_protocol::ListMcpServerStatusResponse;
|
||||
use codex_app_server_protocol::LoginAccountParams;
|
||||
use codex_app_server_protocol::LoginAccountResponse;
|
||||
use codex_app_server_protocol::LoginApiKeyParams;
|
||||
use codex_app_server_protocol::LogoutAccountResponse;
|
||||
use codex_app_server_protocol::MarketplaceAddParams;
|
||||
use codex_app_server_protocol::MarketplaceAddResponse;
|
||||
use codex_app_server_protocol::MarketplaceInterface;
|
||||
use codex_app_server_protocol::MarketplaceRemoveParams;
|
||||
use codex_app_server_protocol::MarketplaceRemoveResponse;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeErrorInfo;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeParams;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeResponse;
|
||||
use codex_app_server_protocol::McpResourceReadParams;
|
||||
use codex_app_server_protocol::McpResourceReadResponse;
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::McpServerOauthLoginParams;
|
||||
use codex_app_server_protocol::McpServerOauthLoginResponse;
|
||||
use codex_app_server_protocol::McpServerRefreshResponse;
|
||||
use codex_app_server_protocol::McpServerStatus;
|
||||
use codex_app_server_protocol::McpServerStatusDetail;
|
||||
use codex_app_server_protocol::McpServerToolCallParams;
|
||||
use codex_app_server_protocol::McpServerToolCallResponse;
|
||||
use codex_app_server_protocol::MemoryResetResponse;
|
||||
use codex_app_server_protocol::MockExperimentalMethodParams;
|
||||
use codex_app_server_protocol::MockExperimentalMethodResponse;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::PermissionProfileModificationParams;
|
||||
use codex_app_server_protocol::PermissionProfileSelectionParams;
|
||||
use codex_app_server_protocol::PluginDetail;
|
||||
use codex_app_server_protocol::PluginInstallParams;
|
||||
use codex_app_server_protocol::PluginInstallResponse;
|
||||
use codex_app_server_protocol::PluginInterface;
|
||||
use codex_app_server_protocol::PluginListParams;
|
||||
use codex_app_server_protocol::PluginListResponse;
|
||||
use codex_app_server_protocol::PluginMarketplaceEntry;
|
||||
use codex_app_server_protocol::PluginReadParams;
|
||||
use codex_app_server_protocol::PluginReadResponse;
|
||||
use codex_app_server_protocol::PluginShareDeleteParams;
|
||||
use codex_app_server_protocol::PluginShareDeleteResponse;
|
||||
use codex_app_server_protocol::PluginShareListItem;
|
||||
use codex_app_server_protocol::PluginShareListParams;
|
||||
use codex_app_server_protocol::PluginShareListResponse;
|
||||
use codex_app_server_protocol::PluginShareSaveParams;
|
||||
use codex_app_server_protocol::PluginShareSaveResponse;
|
||||
use codex_app_server_protocol::PluginSkillReadParams;
|
||||
use codex_app_server_protocol::PluginSkillReadResponse;
|
||||
use codex_app_server_protocol::PluginSource;
|
||||
use codex_app_server_protocol::PluginSummary;
|
||||
use codex_app_server_protocol::PluginUninstallParams;
|
||||
use codex_app_server_protocol::PluginUninstallResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ReviewDelivery as ApiReviewDelivery;
|
||||
use codex_app_server_protocol::ReviewStartParams;
|
||||
use codex_app_server_protocol::ReviewStartResponse;
|
||||
use codex_app_server_protocol::ReviewTarget as ApiReviewTarget;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::SendAddCreditsNudgeEmailParams;
|
||||
use codex_app_server_protocol::SendAddCreditsNudgeEmailResponse;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequestResolvedNotification;
|
||||
use codex_app_server_protocol::SkillSummary;
|
||||
use codex_app_server_protocol::SkillsConfigWriteParams;
|
||||
use codex_app_server_protocol::SkillsConfigWriteResponse;
|
||||
use codex_app_server_protocol::SkillsListParams;
|
||||
use codex_app_server_protocol::SkillsListResponse;
|
||||
use codex_app_server_protocol::SortDirection;
|
||||
use codex_app_server_protocol::Thread;
|
||||
use codex_app_server_protocol::ThreadApproveGuardianDeniedActionParams;
|
||||
use codex_app_server_protocol::ThreadApproveGuardianDeniedActionResponse;
|
||||
use codex_app_server_protocol::ThreadArchiveParams;
|
||||
use codex_app_server_protocol::ThreadArchiveResponse;
|
||||
use codex_app_server_protocol::ThreadArchivedNotification;
|
||||
use codex_app_server_protocol::ThreadBackgroundTerminalsCleanParams;
|
||||
use codex_app_server_protocol::ThreadBackgroundTerminalsCleanResponse;
|
||||
use codex_app_server_protocol::ThreadClosedNotification;
|
||||
use codex_app_server_protocol::ThreadCompactStartParams;
|
||||
use codex_app_server_protocol::ThreadCompactStartResponse;
|
||||
use codex_app_server_protocol::ThreadDecrementElicitationParams;
|
||||
use codex_app_server_protocol::ThreadDecrementElicitationResponse;
|
||||
use codex_app_server_protocol::ThreadForkParams;
|
||||
use codex_app_server_protocol::ThreadForkResponse;
|
||||
use codex_app_server_protocol::ThreadGoal;
|
||||
use codex_app_server_protocol::ThreadGoalClearParams;
|
||||
use codex_app_server_protocol::ThreadGoalClearResponse;
|
||||
use codex_app_server_protocol::ThreadGoalClearedNotification;
|
||||
use codex_app_server_protocol::ThreadGoalGetParams;
|
||||
use codex_app_server_protocol::ThreadGoalGetResponse;
|
||||
use codex_app_server_protocol::ThreadGoalSetParams;
|
||||
use codex_app_server_protocol::ThreadGoalSetResponse;
|
||||
use codex_app_server_protocol::ThreadGoalStatus;
|
||||
use codex_app_server_protocol::ThreadGoalUpdatedNotification;
|
||||
use codex_app_server_protocol::ThreadHistoryBuilder;
|
||||
use codex_app_server_protocol::ThreadIncrementElicitationParams;
|
||||
use codex_app_server_protocol::ThreadIncrementElicitationResponse;
|
||||
use codex_app_server_protocol::ThreadInjectItemsParams;
|
||||
use codex_app_server_protocol::ThreadInjectItemsResponse;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadListCwdFilter;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
use codex_app_server_protocol::ThreadLoadedListParams;
|
||||
use codex_app_server_protocol::ThreadLoadedListResponse;
|
||||
use codex_app_server_protocol::ThreadMemoryModeSetParams;
|
||||
use codex_app_server_protocol::ThreadMemoryModeSetResponse;
|
||||
use codex_app_server_protocol::ThreadMetadataGitInfoUpdateParams;
|
||||
use codex_app_server_protocol::ThreadMetadataUpdateParams;
|
||||
use codex_app_server_protocol::ThreadMetadataUpdateResponse;
|
||||
use codex_app_server_protocol::ThreadNameUpdatedNotification;
|
||||
use codex_app_server_protocol::ThreadReadParams;
|
||||
use codex_app_server_protocol::ThreadReadResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendAudioParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendAudioResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendTextParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendTextResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeListVoicesResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartTransport;
|
||||
use codex_app_server_protocol::ThreadRealtimeStopParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeStopResponse;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
use codex_app_server_protocol::ThreadRollbackParams;
|
||||
use codex_app_server_protocol::ThreadSetNameParams;
|
||||
use codex_app_server_protocol::ThreadSetNameResponse;
|
||||
use codex_app_server_protocol::ThreadShellCommandParams;
|
||||
use codex_app_server_protocol::ThreadShellCommandResponse;
|
||||
use codex_app_server_protocol::ThreadSortKey;
|
||||
use codex_app_server_protocol::ThreadSourceKind;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::ThreadStartedNotification;
|
||||
use codex_app_server_protocol::ThreadStatus;
|
||||
use codex_app_server_protocol::ThreadTurnsListParams;
|
||||
use codex_app_server_protocol::ThreadTurnsListResponse;
|
||||
use codex_app_server_protocol::ThreadUnarchiveParams;
|
||||
use codex_app_server_protocol::ThreadUnarchiveResponse;
|
||||
use codex_app_server_protocol::ThreadUnarchivedNotification;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeParams;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeResponse;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeStatus;
|
||||
use codex_app_server_protocol::Turn;
|
||||
use codex_app_server_protocol::TurnEnvironmentParams;
|
||||
use codex_app_server_protocol::TurnError;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnInterruptResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::TurnSteerParams;
|
||||
use codex_app_server_protocol::TurnSteerResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupCompletedNotification;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupMode;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartParams;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartResponse;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_backend_client::AddCreditsNudgeCreditType as BackendAddCreditsNudgeCreditType;
|
||||
use codex_backend_client::Client as BackendClient;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_chatgpt::workspace_settings;
|
||||
use codex_config::CloudRequirementsLoadError;
|
||||
use codex_config::CloudRequirementsLoadErrorCode;
|
||||
use codex_config::ConfigLayerStack;
|
||||
use codex_config::loader::project_trust_key;
|
||||
use codex_config::types::McpServerTransportConfig;
|
||||
use codex_core::CodexThread;
|
||||
use codex_core::CodexThreadTurnContextOverrides;
|
||||
use codex_core::ExternalGoalPreviousStatus;
|
||||
use codex_core::ExternalGoalSet;
|
||||
use codex_core::ForkSnapshot;
|
||||
use codex_core::NewThread;
|
||||
#[cfg(test)]
|
||||
use codex_core::SessionMeta;
|
||||
use codex_core::StartThreadOptions;
|
||||
use codex_core::SteerInputError;
|
||||
use codex_core::ThreadConfigSnapshot;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::NetworkProxyAuditMetadata;
|
||||
use codex_core::config::edit::ConfigEdit;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::exec::ExecCapturePolicy;
|
||||
use codex_core::exec::ExecExpiration;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec_env::create_env;
|
||||
use codex_core::find_thread_name_by_id;
|
||||
use codex_core::find_thread_path_by_id_str;
|
||||
use codex_core::path_utils;
|
||||
#[cfg(test)]
|
||||
use codex_core::read_head_for_summary;
|
||||
use codex_core::sandboxing::SandboxPermissions;
|
||||
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
|
||||
use codex_core::windows_sandbox::WindowsSandboxSetupMode as CoreWindowsSandboxSetupMode;
|
||||
use codex_core::windows_sandbox::WindowsSandboxSetupRequest;
|
||||
use codex_core_plugins::OPENAI_CURATED_MARKETPLACE_NAME;
|
||||
use codex_core_plugins::PluginInstallError as CorePluginInstallError;
|
||||
use codex_core_plugins::PluginInstallRequest;
|
||||
use codex_core_plugins::PluginLoadOutcome;
|
||||
use codex_core_plugins::PluginReadRequest;
|
||||
use codex_core_plugins::PluginUninstallError as CorePluginUninstallError;
|
||||
use codex_core_plugins::loader::load_plugin_apps;
|
||||
use codex_core_plugins::loader::load_plugin_mcp_servers;
|
||||
use codex_core_plugins::loader::plugin_telemetry_metadata_from_root;
|
||||
use codex_core_plugins::manifest::PluginManifestInterface;
|
||||
use codex_core_plugins::marketplace::MarketplaceError;
|
||||
use codex_core_plugins::marketplace::MarketplacePluginSource;
|
||||
use codex_core_plugins::marketplace_add::MarketplaceAddError;
|
||||
use codex_core_plugins::marketplace_add::MarketplaceAddRequest;
|
||||
use codex_core_plugins::marketplace_add::add_marketplace as add_marketplace_to_codex_home;
|
||||
use codex_core_plugins::marketplace_remove::MarketplaceRemoveError;
|
||||
use codex_core_plugins::marketplace_remove::MarketplaceRemoveRequest as CoreMarketplaceRemoveRequest;
|
||||
use codex_core_plugins::marketplace_remove::remove_marketplace;
|
||||
use codex_core_plugins::remote::RemoteMarketplace;
|
||||
use codex_core_plugins::remote::RemotePluginCatalogError;
|
||||
use codex_core_plugins::remote::RemotePluginDetail as RemoteCatalogPluginDetail;
|
||||
use codex_core_plugins::remote::RemotePluginServiceConfig;
|
||||
use codex_core_plugins::remote::RemotePluginShareSummary as RemoteCatalogPluginShareSummary;
|
||||
use codex_core_plugins::remote::RemotePluginSummary as RemoteCatalogPluginSummary;
|
||||
use codex_exec_server::EnvironmentManager;
|
||||
use codex_exec_server::LOCAL_FS;
|
||||
use codex_features::FEATURES;
|
||||
use codex_features::Feature;
|
||||
use codex_features::Stage;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_feedback::FeedbackAttachmentPath;
|
||||
use codex_feedback::FeedbackUploadOptions;
|
||||
use codex_git_utils::git_diff_to_remote;
|
||||
use codex_git_utils::resolve_root_git_project_for_trust;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CLIENT_ID;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_login::ServerOptions as LoginServerOptions;
|
||||
use codex_login::ShutdownHandle;
|
||||
use codex_login::auth::login_with_chatgpt_auth_tokens;
|
||||
use codex_login::complete_device_code_login;
|
||||
use codex_login::login_with_api_key;
|
||||
use codex_login::request_device_code;
|
||||
use codex_login::run_login_server;
|
||||
use codex_mcp::McpRuntimeEnvironment;
|
||||
use codex_mcp::McpServerStatusSnapshot;
|
||||
use codex_mcp::McpSnapshotDetail;
|
||||
use codex_mcp::collect_mcp_server_status_snapshot_with_detail;
|
||||
use codex_mcp::discover_supported_scopes;
|
||||
use codex_mcp::effective_mcp_servers;
|
||||
use codex_mcp::read_mcp_resource as read_mcp_resource_without_thread;
|
||||
use codex_mcp::resolve_oauth_scopes;
|
||||
use codex_memories_write::clear_memory_roots_contents;
|
||||
use codex_model_provider::ProviderAccountError;
|
||||
use codex_model_provider::create_model_provider;
|
||||
use codex_models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::CollaborationMode;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::Personality;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::WindowsSandboxLevel;
|
||||
use codex_protocol::dynamic_tools::DynamicToolSpec as CoreDynamicToolSpec;
|
||||
use codex_protocol::error::CodexErr;
|
||||
use codex_protocol::error::Result as CodexResult;
|
||||
#[cfg(test)]
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::permissions::FileSystemSandboxPolicy;
|
||||
use codex_protocol::protocol::AgentStatus;
|
||||
use codex_protocol::protocol::ConversationAudioParams;
|
||||
use codex_protocol::protocol::ConversationStartParams;
|
||||
use codex_protocol::protocol::ConversationStartTransport;
|
||||
use codex_protocol::protocol::ConversationTextParams;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
#[cfg(test)]
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::McpAuthStatus as CoreMcpAuthStatus;
|
||||
use codex_protocol::protocol::McpServerRefreshConfig;
|
||||
use codex_protocol::protocol::Op;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RealtimeVoicesList;
|
||||
use codex_protocol::protocol::ResumedHistory;
|
||||
use codex_protocol::protocol::ReviewDelivery as CoreReviewDelivery;
|
||||
use codex_protocol::protocol::ReviewRequest;
|
||||
use codex_protocol::protocol::ReviewTarget as CoreReviewTarget;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionConfiguredEvent;
|
||||
#[cfg(test)]
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::TurnEnvironmentSelection;
|
||||
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||
use codex_protocol::protocol::W3cTraceContext;
|
||||
use codex_protocol::user_input::MAX_USER_INPUT_TEXT_CHARS;
|
||||
use codex_protocol::user_input::UserInput as CoreInputItem;
|
||||
use codex_rmcp_client::perform_oauth_login_return_url;
|
||||
use codex_rollout::EventPersistenceMode;
|
||||
use codex_rollout::is_persisted_rollout_item;
|
||||
use codex_rollout::state_db::StateDbHandle;
|
||||
use codex_rollout::state_db::reconcile_rollout;
|
||||
use codex_state::ThreadMetadata;
|
||||
use codex_state::log_db::LogDbLayer;
|
||||
use codex_thread_store::ArchiveThreadParams as StoreArchiveThreadParams;
|
||||
use codex_thread_store::GitInfoPatch as StoreGitInfoPatch;
|
||||
use codex_thread_store::ListThreadsParams as StoreListThreadsParams;
|
||||
use codex_thread_store::LocalThreadStore;
|
||||
use codex_thread_store::ReadThreadByRolloutPathParams as StoreReadThreadByRolloutPathParams;
|
||||
use codex_thread_store::ReadThreadParams as StoreReadThreadParams;
|
||||
use codex_thread_store::SortDirection as StoreSortDirection;
|
||||
use codex_thread_store::StoredThread;
|
||||
use codex_thread_store::ThreadMetadataPatch as StoreThreadMetadataPatch;
|
||||
use codex_thread_store::ThreadSortKey as StoreThreadSortKey;
|
||||
use codex_thread_store::ThreadStore;
|
||||
use codex_thread_store::ThreadStoreError;
|
||||
use codex_thread_store::UpdateThreadMetadataParams as StoreUpdateThreadMetadataParams;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_pty::DEFAULT_OUTPUT_BYTES_CAP;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::io::Error as IoError;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::result::Result;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::sync::SemaphorePermit;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::watch;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tokio_util::task::TaskTracker;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Instrument;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
|
||||
mod account_processor;
|
||||
mod apps_processor;
|
||||
mod catalog_processor;
|
||||
mod command_exec_processor;
|
||||
mod config_processor;
|
||||
mod device_key_processor;
|
||||
mod external_agent_config_processor;
|
||||
mod feedback_processor;
|
||||
mod fs_processor;
|
||||
mod git_processor;
|
||||
mod initialize_processor;
|
||||
mod marketplace_processor;
|
||||
mod mcp_processor;
|
||||
mod plugins;
|
||||
mod process_exec_processor;
|
||||
mod search;
|
||||
mod thread_processor;
|
||||
mod token_usage_replay;
|
||||
mod turn_processor;
|
||||
mod windows_sandbox_processor;
|
||||
|
||||
pub(crate) use account_processor::AccountRequestProcessor;
|
||||
pub(crate) use apps_processor::AppsRequestProcessor;
|
||||
pub(crate) use catalog_processor::CatalogRequestProcessor;
|
||||
pub(crate) use command_exec_processor::CommandExecRequestProcessor;
|
||||
pub(crate) use config_processor::ConfigRequestProcessor;
|
||||
pub(crate) use device_key_processor::DeviceKeyRequestProcessor;
|
||||
pub(crate) use external_agent_config_processor::ExternalAgentConfigRequestProcessor;
|
||||
pub(crate) use feedback_processor::FeedbackRequestProcessor;
|
||||
pub(crate) use fs_processor::FsRequestProcessor;
|
||||
pub(crate) use git_processor::GitRequestProcessor;
|
||||
pub(crate) use initialize_processor::InitializeRequestProcessor;
|
||||
pub(crate) use marketplace_processor::MarketplaceRequestProcessor;
|
||||
pub(crate) use mcp_processor::McpRequestProcessor;
|
||||
pub(crate) use plugins::PluginRequestProcessor;
|
||||
pub(crate) use process_exec_processor::ProcessExecRequestProcessor;
|
||||
pub(crate) use search::SearchRequestProcessor;
|
||||
pub(crate) use thread_goal_processor::ThreadGoalRequestProcessor;
|
||||
pub(crate) use thread_processor::ThreadRequestProcessor;
|
||||
pub(crate) use turn_processor::TurnRequestProcessor;
|
||||
pub(crate) use windows_sandbox_processor::WindowsSandboxRequestProcessor;
|
||||
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::filters::compute_source_filters;
|
||||
use crate::filters::source_kind_matches;
|
||||
use crate::thread_state::ThreadListenerCommand;
|
||||
use crate::thread_state::ThreadState;
|
||||
use crate::thread_state::ThreadStateManager;
|
||||
use token_usage_replay::latest_token_usage_turn_id_from_rollout_items;
|
||||
use token_usage_replay::send_thread_token_usage_update_to_connection;
|
||||
|
||||
mod config_errors;
|
||||
mod request_errors;
|
||||
mod thread_goal_processor;
|
||||
mod thread_lifecycle;
|
||||
mod thread_summary;
|
||||
|
||||
use self::config_errors::*;
|
||||
use self::request_errors::*;
|
||||
use self::thread_goal_processor::api_thread_goal_from_state;
|
||||
use self::thread_lifecycle::*;
|
||||
use self::thread_summary::*;
|
||||
|
||||
pub(crate) use self::thread_lifecycle::populate_thread_turns_from_history;
|
||||
pub(crate) use self::thread_processor::thread_from_stored_thread;
|
||||
#[cfg(test)]
|
||||
pub(crate) use self::thread_summary::read_summary_from_rollout;
|
||||
pub(crate) use self::thread_summary::summary_to_thread;
|
||||
|
||||
pub(crate) fn build_api_turns_from_rollout_items(items: &[RolloutItem]) -> Vec<Turn> {
|
||||
let mut builder = ThreadHistoryBuilder::new();
|
||||
for item in items {
|
||||
if is_persisted_rollout_item(item, EventPersistenceMode::Limited) {
|
||||
builder.handle_rollout_item(item);
|
||||
}
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,350 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AppsRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
impl AppsRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn apps_list(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.apps_list_inner(request_id, params)
|
||||
.await
|
||||
.map(|response| response.map(Into::into))
|
||||
}
|
||||
|
||||
async fn apps_list_inner(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
) -> Result<Option<AppsListResponse>, JSONRPCErrorError> {
|
||||
let mut config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
|
||||
if let Some(thread_id) = params.thread_id.as_deref() {
|
||||
let (_, thread) = self.load_thread(thread_id).await?;
|
||||
|
||||
let _ = config
|
||||
.features
|
||||
.set_enabled(Feature::Apps, thread.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
if !config
|
||||
.features
|
||||
.apps_enabled_for_auth(auth.as_ref().is_some_and(CodexAuth::uses_codex_backend))
|
||||
{
|
||||
return Ok(Some(AppsListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
}));
|
||||
}
|
||||
|
||||
if !self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await
|
||||
{
|
||||
return Ok(Some(AppsListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
}));
|
||||
}
|
||||
|
||||
let request = request_id.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
tokio::spawn(async move {
|
||||
Self::apps_list_task(outgoing, request, params, config, environment_manager).await;
|
||||
});
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn apps_list_task(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
config: Config,
|
||||
environment_manager: Arc<EnvironmentManager>,
|
||||
) {
|
||||
let result = Self::apps_list_response(&outgoing, params, config, environment_manager).await;
|
||||
outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn apps_list_response(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
params: AppsListParams,
|
||||
config: Config,
|
||||
environment_manager: Arc<EnvironmentManager>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let AppsListParams {
|
||||
cursor,
|
||||
limit,
|
||||
thread_id: _,
|
||||
force_refetch,
|
||||
} = params;
|
||||
let start = match cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
let (mut accessible_connectors, mut all_connectors) = tokio::join!(
|
||||
connectors::list_cached_accessible_connectors_from_mcp_tools(&config),
|
||||
connectors::list_cached_all_connectors(&config)
|
||||
);
|
||||
let cached_all_connectors = all_connectors.clone();
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
|
||||
let accessible_config = config.clone();
|
||||
let accessible_tx = tx.clone();
|
||||
tokio::spawn(async move {
|
||||
let result =
|
||||
connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
&accessible_config,
|
||||
force_refetch,
|
||||
&environment_manager,
|
||||
)
|
||||
.await
|
||||
.map(|status| status.connectors)
|
||||
.map_err(|err| format!("failed to load accessible apps: {err}"));
|
||||
let _ = accessible_tx.send(AppListLoadResult::Accessible(result));
|
||||
});
|
||||
|
||||
let all_config = config.clone();
|
||||
tokio::spawn(async move {
|
||||
let result = connectors::list_all_connectors_with_options(&all_config, force_refetch)
|
||||
.await
|
||||
.map_err(|err| format!("failed to list apps: {err}"));
|
||||
let _ = tx.send(AppListLoadResult::Directory(result));
|
||||
});
|
||||
|
||||
let app_list_deadline = tokio::time::Instant::now() + APP_LIST_LOAD_TIMEOUT;
|
||||
let mut accessible_loaded = false;
|
||||
let mut all_loaded = false;
|
||||
let mut last_notified_apps = None;
|
||||
|
||||
if accessible_connectors.is_some() || all_connectors.is_some() {
|
||||
let merged = connectors::with_app_enabled_state(
|
||||
merge_loaded_apps(all_connectors.as_deref(), accessible_connectors.as_deref()),
|
||||
&config,
|
||||
);
|
||||
if should_send_app_list_updated_notification(
|
||||
merged.as_slice(),
|
||||
accessible_loaded,
|
||||
all_loaded,
|
||||
) {
|
||||
send_app_list_updated_notification(outgoing, merged.clone()).await;
|
||||
last_notified_apps = Some(merged);
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
let result = match tokio::time::timeout_at(app_list_deadline, rx.recv()).await {
|
||||
Ok(Some(result)) => result,
|
||||
Ok(None) => {
|
||||
return Err(internal_error("failed to load app lists"));
|
||||
}
|
||||
Err(_) => {
|
||||
let timeout_seconds = APP_LIST_LOAD_TIMEOUT.as_secs();
|
||||
return Err(internal_error(format!(
|
||||
"timed out waiting for app lists after {timeout_seconds} seconds"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
AppListLoadResult::Accessible(Ok(connectors)) => {
|
||||
accessible_connectors = Some(connectors);
|
||||
accessible_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Accessible(Err(err)) => {
|
||||
return Err(internal_error(err));
|
||||
}
|
||||
AppListLoadResult::Directory(Ok(connectors)) => {
|
||||
all_connectors = Some(connectors);
|
||||
all_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Directory(Err(err)) => {
|
||||
return Err(internal_error(err));
|
||||
}
|
||||
}
|
||||
|
||||
let showing_interim_force_refetch = force_refetch && !(accessible_loaded && all_loaded);
|
||||
let all_connectors_for_update =
|
||||
if showing_interim_force_refetch && cached_all_connectors.is_some() {
|
||||
cached_all_connectors.as_deref()
|
||||
} else {
|
||||
all_connectors.as_deref()
|
||||
};
|
||||
let accessible_connectors_for_update =
|
||||
if showing_interim_force_refetch && !accessible_loaded {
|
||||
None
|
||||
} else {
|
||||
accessible_connectors.as_deref()
|
||||
};
|
||||
let merged = connectors::with_app_enabled_state(
|
||||
merge_loaded_apps(all_connectors_for_update, accessible_connectors_for_update),
|
||||
&config,
|
||||
);
|
||||
if should_send_app_list_updated_notification(
|
||||
merged.as_slice(),
|
||||
accessible_loaded,
|
||||
all_loaded,
|
||||
) && last_notified_apps.as_ref() != Some(&merged)
|
||||
{
|
||||
send_app_list_updated_notification(outgoing, merged.clone()).await;
|
||||
last_notified_apps = Some(merged.clone());
|
||||
}
|
||||
|
||||
if accessible_loaded && all_loaded {
|
||||
return paginate_apps(merged.as_slice(), start, limit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_thread(
|
||||
&self,
|
||||
thread_id: &str,
|
||||
) -> Result<(ThreadId, Arc<CodexThread>), JSONRPCErrorError> {
|
||||
let thread_id = ThreadId::from_string(thread_id).map_err(|err| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid thread id: {err}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
let thread = self
|
||||
.thread_manager
|
||||
.get_thread(thread_id)
|
||||
.await
|
||||
.map_err(|_| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("thread not found: {thread_id}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
Ok((thread_id, thread))
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const APP_LIST_LOAD_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
|
||||
enum AppListLoadResult {
|
||||
Accessible(Result<Vec<AppInfo>, String>),
|
||||
Directory(Result<Vec<AppInfo>, String>),
|
||||
}
|
||||
|
||||
fn merge_loaded_apps(
|
||||
all_connectors: Option<&[AppInfo]>,
|
||||
accessible_connectors: Option<&[AppInfo]>,
|
||||
) -> Vec<AppInfo> {
|
||||
let all_connectors_loaded = all_connectors.is_some();
|
||||
let all = all_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
let accessible = accessible_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
connectors::merge_connectors_with_accessible(all, accessible, all_connectors_loaded)
|
||||
}
|
||||
|
||||
fn should_send_app_list_updated_notification(
|
||||
connectors: &[AppInfo],
|
||||
accessible_loaded: bool,
|
||||
all_loaded: bool,
|
||||
) -> bool {
|
||||
connectors.iter().any(|connector| connector.is_accessible) || (accessible_loaded && all_loaded)
|
||||
}
|
||||
|
||||
fn paginate_apps(
|
||||
connectors: &[AppInfo],
|
||||
start: usize,
|
||||
limit: Option<u32>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let total = connectors.len();
|
||||
if start > total {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("cursor {start} exceeds total apps {total}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = connectors[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(AppsListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn send_app_list_updated_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
data: Vec<AppInfo>,
|
||||
) {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
}
|
||||
@@ -1,600 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CatalogRequestProcessor {
|
||||
pub(super) auth_manager: Arc<AuthManager>,
|
||||
pub(super) thread_manager: Arc<ThreadManager>,
|
||||
pub(super) config: Arc<Config>,
|
||||
pub(super) config_manager: ConfigManager,
|
||||
pub(super) workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
fn skills_to_info(
|
||||
skills: &[codex_core::skills::SkillMetadata],
|
||||
disabled_paths: &HashSet<AbsolutePathBuf>,
|
||||
) -> Vec<codex_app_server_protocol::SkillMetadata> {
|
||||
skills
|
||||
.iter()
|
||||
.map(|skill| {
|
||||
let enabled = !disabled_paths.contains(&skill.path_to_skills_md);
|
||||
codex_app_server_protocol::SkillMetadata {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
short_description: skill.short_description.clone(),
|
||||
interface: skill.interface.clone().map(|interface| {
|
||||
codex_app_server_protocol::SkillInterface {
|
||||
display_name: interface.display_name,
|
||||
short_description: interface.short_description,
|
||||
icon_small: interface.icon_small,
|
||||
icon_large: interface.icon_large,
|
||||
brand_color: interface.brand_color,
|
||||
default_prompt: interface.default_prompt,
|
||||
}
|
||||
}),
|
||||
dependencies: skill.dependencies.clone().map(|dependencies| {
|
||||
codex_app_server_protocol::SkillDependencies {
|
||||
tools: dependencies
|
||||
.tools
|
||||
.into_iter()
|
||||
.map(|tool| codex_app_server_protocol::SkillToolDependency {
|
||||
r#type: tool.r#type,
|
||||
value: tool.value,
|
||||
description: tool.description,
|
||||
transport: tool.transport,
|
||||
command: tool.command,
|
||||
url: tool.url,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}),
|
||||
path: skill.path_to_skills_md.clone(),
|
||||
scope: skill.scope.into(),
|
||||
enabled,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn hooks_to_info(hooks: &[codex_hooks::HookListEntry]) -> Vec<HookMetadata> {
|
||||
hooks
|
||||
.iter()
|
||||
.map(|hook| HookMetadata {
|
||||
key: hook.key.clone(),
|
||||
event_name: hook.event_name.into(),
|
||||
handler_type: hook.handler_type.into(),
|
||||
matcher: hook.matcher.clone(),
|
||||
command: hook.command.clone(),
|
||||
timeout_sec: hook.timeout_sec,
|
||||
status_message: hook.status_message.clone(),
|
||||
source_path: hook.source_path.clone(),
|
||||
source: hook.source.into(),
|
||||
plugin_id: hook.plugin_id.clone(),
|
||||
display_order: hook.display_order,
|
||||
enabled: hook.enabled,
|
||||
is_managed: hook.is_managed,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn errors_to_info(
|
||||
errors: &[codex_core::skills::SkillError],
|
||||
) -> Vec<codex_app_server_protocol::SkillErrorInfo> {
|
||||
errors
|
||||
.iter()
|
||||
.map(|err| codex_app_server_protocol::SkillErrorInfo {
|
||||
path: err.path.to_path_buf(),
|
||||
message: err.message.clone(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
impl CatalogRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
config,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn skills_list(
|
||||
&self,
|
||||
params: SkillsListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.skills_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn hooks_list(
|
||||
&self,
|
||||
params: HooksListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.hooks_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn skills_config_write(
|
||||
&self,
|
||||
params: SkillsConfigWriteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.skills_config_write_response_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn model_list(
|
||||
&self,
|
||||
params: ModelListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
Self::list_models(self.thread_manager.clone(), params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn experimental_feature_list(
|
||||
&self,
|
||||
params: ExperimentalFeatureListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.experimental_feature_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn collaboration_mode_list(
|
||||
&self,
|
||||
params: CollaborationModeListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
Self::list_collaboration_modes(self.thread_manager.clone(), params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn mock_experimental_method(
|
||||
&self,
|
||||
params: MockExperimentalMethodParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.mock_experimental_method_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn resolve_cwd_config(
|
||||
&self,
|
||||
cwd: &Path,
|
||||
) -> Result<(AbsolutePathBuf, ConfigLayerStack), String> {
|
||||
let cwd_abs =
|
||||
AbsolutePathBuf::relative_to_current_dir(cwd).map_err(|err| err.to_string())?;
|
||||
let config_layer_stack = self
|
||||
.config_manager
|
||||
.load_config_layers_for_cwd(cwd_abs.clone())
|
||||
.await
|
||||
.map_err(|err| err.to_string())?;
|
||||
|
||||
Ok((cwd_abs, config_layer_stack))
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_models(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
params: ModelListParams,
|
||||
) -> Result<ModelListResponse, JSONRPCErrorError> {
|
||||
let ModelListParams {
|
||||
limit,
|
||||
cursor,
|
||||
include_hidden,
|
||||
} = params;
|
||||
let models = supported_models(thread_manager, include_hidden.unwrap_or(false)).await;
|
||||
let total = models.len();
|
||||
|
||||
if total == 0 {
|
||||
return Ok(ModelListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = effective_limit.min(total);
|
||||
let start = match cursor {
|
||||
Some(cursor) => cursor
|
||||
.parse::<usize>()
|
||||
.map_err(|_| invalid_request(format!("invalid cursor: {cursor}")))?,
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total models {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let items = models[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(ModelListResponse {
|
||||
data: items,
|
||||
next_cursor,
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_collaboration_modes(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
params: CollaborationModeListParams,
|
||||
) -> Result<CollaborationModeListResponse, JSONRPCErrorError> {
|
||||
let CollaborationModeListParams {} = params;
|
||||
let items = thread_manager
|
||||
.list_collaboration_modes()
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
let response = CollaborationModeListResponse { data: items };
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn experimental_feature_list_response(
|
||||
&self,
|
||||
params: ExperimentalFeatureListParams,
|
||||
) -> Result<ExperimentalFeatureListResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureListParams { cursor, limit } = params;
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
|
||||
let data = FEATURES
|
||||
.iter()
|
||||
.map(|spec| {
|
||||
let (stage, display_name, description, announcement) = match spec.stage {
|
||||
Stage::Experimental {
|
||||
name,
|
||||
menu_description,
|
||||
announcement,
|
||||
} => (
|
||||
ApiExperimentalFeatureStage::Beta,
|
||||
Some(name.to_string()),
|
||||
Some(menu_description.to_string()),
|
||||
Some(announcement.to_string()),
|
||||
),
|
||||
Stage::UnderDevelopment => (
|
||||
ApiExperimentalFeatureStage::UnderDevelopment,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
Stage::Stable => (ApiExperimentalFeatureStage::Stable, None, None, None),
|
||||
Stage::Deprecated => {
|
||||
(ApiExperimentalFeatureStage::Deprecated, None, None, None)
|
||||
}
|
||||
Stage::Removed => (ApiExperimentalFeatureStage::Removed, None, None, None),
|
||||
};
|
||||
|
||||
ApiExperimentalFeature {
|
||||
name: spec.key.to_string(),
|
||||
stage,
|
||||
display_name,
|
||||
description,
|
||||
announcement,
|
||||
enabled: config.features.enabled(spec.id)
|
||||
&& (workspace_codex_plugins_enabled
|
||||
|| !matches!(spec.id, Feature::Apps | Feature::Plugins)),
|
||||
default_enabled: spec.default_enabled,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let total = data.len();
|
||||
if total == 0 {
|
||||
return Ok(ExperimentalFeatureListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Clamp to 1 so limit=0 cannot return a non-advancing page.
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = effective_limit.min(total);
|
||||
let start = match cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total feature flags {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = data[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ExperimentalFeatureListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn mock_experimental_method_inner(
|
||||
&self,
|
||||
params: MockExperimentalMethodParams,
|
||||
) -> Result<MockExperimentalMethodResponse, JSONRPCErrorError> {
|
||||
let MockExperimentalMethodParams { value } = params;
|
||||
let response = MockExperimentalMethodResponse { echoed: value };
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn skills_list_response(
|
||||
&self,
|
||||
params: SkillsListParams,
|
||||
) -> Result<SkillsListResponse, JSONRPCErrorError> {
|
||||
let SkillsListParams {
|
||||
cwds,
|
||||
force_reload,
|
||||
per_cwd_extra_user_roots,
|
||||
} = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.to_path_buf()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
let cwd_set: HashSet<PathBuf> = cwds.iter().cloned().collect();
|
||||
|
||||
let mut extra_roots_by_cwd: HashMap<PathBuf, Vec<AbsolutePathBuf>> = HashMap::new();
|
||||
for entry in per_cwd_extra_user_roots.unwrap_or_default() {
|
||||
if !cwd_set.contains(&entry.cwd) {
|
||||
warn!(
|
||||
cwd = %entry.cwd.display(),
|
||||
"ignoring per-cwd extra roots for cwd not present in skills/list cwds"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut valid_extra_roots = Vec::new();
|
||||
for root in entry.extra_user_roots {
|
||||
let root =
|
||||
AbsolutePathBuf::from_absolute_path_checked(root.as_path()).map_err(|_| {
|
||||
invalid_request(format!(
|
||||
"skills/list perCwdExtraUserRoots extraUserRoots paths must be absolute: {}",
|
||||
root.display()
|
||||
))
|
||||
})?;
|
||||
valid_extra_roots.push(root);
|
||||
}
|
||||
extra_roots_by_cwd
|
||||
.entry(entry.cwd)
|
||||
.or_default()
|
||||
.extend(valid_extra_roots);
|
||||
}
|
||||
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
let skills_manager = self.thread_manager.skills_manager();
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let fs = self
|
||||
.thread_manager
|
||||
.environment_manager()
|
||||
.default_environment()
|
||||
.map(|environment| environment.get_filesystem());
|
||||
let mut data = Vec::new();
|
||||
for cwd in cwds {
|
||||
let (cwd_abs, config_layer_stack) = match self.resolve_cwd_config(&cwd).await {
|
||||
Ok(resolved) => resolved,
|
||||
Err(message) => {
|
||||
let error_path = cwd.clone();
|
||||
data.push(codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills: Vec::new(),
|
||||
errors: vec![codex_app_server_protocol::SkillErrorInfo {
|
||||
path: error_path,
|
||||
message,
|
||||
}],
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let extra_roots = extra_roots_by_cwd
|
||||
.get(&cwd)
|
||||
.map_or(&[][..], std::vec::Vec::as_slice);
|
||||
let effective_skill_roots = if workspace_codex_plugins_enabled {
|
||||
let plugins_input = config.plugins_config_input();
|
||||
plugins_manager
|
||||
.effective_skill_roots_for_layer_stack(&config_layer_stack, &plugins_input)
|
||||
.await
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let skills_input = codex_core::skills::SkillsLoadInput::new(
|
||||
cwd_abs.clone(),
|
||||
effective_skill_roots,
|
||||
config_layer_stack,
|
||||
config.bundled_skills_enabled(),
|
||||
);
|
||||
let outcome = skills_manager
|
||||
.skills_for_cwd_with_extra_user_roots(
|
||||
&skills_input,
|
||||
force_reload,
|
||||
extra_roots,
|
||||
fs.clone(),
|
||||
)
|
||||
.await;
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills, &outcome.disabled_paths);
|
||||
data.push(codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
});
|
||||
}
|
||||
Ok(SkillsListResponse { data })
|
||||
}
|
||||
|
||||
/// Handle `hooks/list` by resolving hooks for each requested cwd.
|
||||
async fn hooks_list_response(
|
||||
&self,
|
||||
params: HooksListParams,
|
||||
) -> Result<HooksListResponse, JSONRPCErrorError> {
|
||||
let HooksListParams { cwds } = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.to_path_buf()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let mut data = Vec::new();
|
||||
for cwd in cwds {
|
||||
let config = match self
|
||||
.config_manager
|
||||
.load_for_cwd(
|
||||
/*request_overrides*/ None,
|
||||
ConfigOverrides::default(),
|
||||
Some(cwd.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error_path = cwd.clone();
|
||||
data.push(codex_app_server_protocol::HooksListEntry {
|
||||
cwd,
|
||||
hooks: Vec::new(),
|
||||
warnings: Vec::new(),
|
||||
errors: vec![codex_app_server_protocol::HookErrorInfo {
|
||||
path: error_path,
|
||||
message: err.to_string(),
|
||||
}],
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
let plugins_enabled =
|
||||
config.features.enabled(Feature::Plugins) && workspace_codex_plugins_enabled;
|
||||
let plugin_outcome = if plugins_enabled && config.features.enabled(Feature::PluginHooks)
|
||||
{
|
||||
let plugins_input = config.plugins_config_input();
|
||||
plugins_manager
|
||||
.plugins_for_layer_stack(
|
||||
&config.config_layer_stack,
|
||||
&plugins_input,
|
||||
/*plugin_hooks_feature_enabled*/ true,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
PluginLoadOutcome::default()
|
||||
};
|
||||
let hooks = codex_hooks::list_hooks(codex_hooks::HooksConfig {
|
||||
feature_enabled: config.features.enabled(Feature::CodexHooks),
|
||||
config_layer_stack: Some(config.config_layer_stack),
|
||||
plugin_hook_sources: plugin_outcome.effective_plugin_hook_sources(),
|
||||
plugin_hook_load_warnings: plugin_outcome.effective_plugin_hook_warnings(),
|
||||
..Default::default()
|
||||
});
|
||||
data.push(codex_app_server_protocol::HooksListEntry {
|
||||
cwd,
|
||||
hooks: hooks_to_info(&hooks.hooks),
|
||||
warnings: hooks.warnings,
|
||||
errors: Vec::new(),
|
||||
});
|
||||
}
|
||||
Ok(HooksListResponse { data })
|
||||
}
|
||||
|
||||
async fn skills_config_write_response_inner(
|
||||
&self,
|
||||
params: SkillsConfigWriteParams,
|
||||
) -> Result<SkillsConfigWriteResponse, JSONRPCErrorError> {
|
||||
let SkillsConfigWriteParams {
|
||||
path,
|
||||
name,
|
||||
enabled,
|
||||
} = params;
|
||||
let edit = match (path, name) {
|
||||
(Some(path), None) => ConfigEdit::SetSkillConfig {
|
||||
path: path.into_path_buf(),
|
||||
enabled,
|
||||
},
|
||||
(None, Some(name)) if !name.trim().is_empty() => {
|
||||
ConfigEdit::SetSkillConfigByName { name, enabled }
|
||||
}
|
||||
_ => {
|
||||
return Err(invalid_params(
|
||||
"skills/config/write requires exactly one of path or name",
|
||||
));
|
||||
}
|
||||
};
|
||||
let edits = vec![edit];
|
||||
ConfigEditsBuilder::new(&self.config.codex_home)
|
||||
.with_edits(edits)
|
||||
.apply()
|
||||
.await
|
||||
.map(|()| {
|
||||
self.thread_manager.plugins_manager().clear_cache();
|
||||
self.thread_manager.skills_manager().clear_cache();
|
||||
SkillsConfigWriteResponse {
|
||||
effective_enabled: enabled,
|
||||
}
|
||||
})
|
||||
.map_err(|err| internal_error(format!("failed to update skill settings: {err}")))
|
||||
}
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CommandExecRequestProcessor {
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
config: Arc<Config>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
command_exec_manager: CommandExecManager,
|
||||
}
|
||||
|
||||
impl CommandExecRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
config: Arc<Config>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
) -> Self {
|
||||
Self {
|
||||
arg0_paths,
|
||||
config,
|
||||
outgoing,
|
||||
command_exec_manager: CommandExecManager::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn one_off_command_exec(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: CommandExecParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.exec_one_off_command(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn command_exec_write(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecWriteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.command_exec_manager
|
||||
.write(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn command_exec_resize(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecResizeParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.command_exec_manager
|
||||
.resize(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn command_exec_terminate(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecTerminateParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.command_exec_manager
|
||||
.terminate(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn connection_closed(&self, connection_id: ConnectionId) {
|
||||
self.command_exec_manager
|
||||
.connection_closed(connection_id)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn exec_one_off_command(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: CommandExecParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
self.exec_one_off_command_inner(request_id.clone(), params)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn exec_one_off_command_inner(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
tracing::debug!("ExecOneOffCommand params: {params:?}");
|
||||
|
||||
let request = request_id.clone();
|
||||
|
||||
if params.command.is_empty() {
|
||||
return Err(invalid_request("command must not be empty"));
|
||||
}
|
||||
|
||||
let CommandExecParams {
|
||||
command,
|
||||
process_id,
|
||||
tty,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
disable_output_cap,
|
||||
disable_timeout,
|
||||
timeout_ms,
|
||||
cwd,
|
||||
env: env_overrides,
|
||||
size,
|
||||
sandbox_policy,
|
||||
permission_profile,
|
||||
} = params;
|
||||
if sandbox_policy.is_some() && permission_profile.is_some() {
|
||||
return Err(invalid_request(
|
||||
"`permissionProfile` cannot be combined with `sandboxPolicy`",
|
||||
));
|
||||
}
|
||||
|
||||
if size.is_some() && !tty {
|
||||
return Err(invalid_params("command/exec size requires tty: true"));
|
||||
}
|
||||
|
||||
if disable_output_cap && output_bytes_cap.is_some() {
|
||||
return Err(invalid_params(
|
||||
"command/exec cannot set both outputBytesCap and disableOutputCap",
|
||||
));
|
||||
}
|
||||
|
||||
if disable_timeout && timeout_ms.is_some() {
|
||||
return Err(invalid_params(
|
||||
"command/exec cannot set both timeoutMs and disableTimeout",
|
||||
));
|
||||
}
|
||||
|
||||
let cwd = cwd.map_or_else(|| self.config.cwd.clone(), |cwd| self.config.cwd.join(cwd));
|
||||
let mut env = create_env(
|
||||
&self.config.permissions.shell_environment_policy,
|
||||
/*thread_id*/ None,
|
||||
);
|
||||
if let Some(env_overrides) = env_overrides {
|
||||
for (key, value) in env_overrides {
|
||||
match value {
|
||||
Some(value) => {
|
||||
env.insert(key, value);
|
||||
}
|
||||
None => {
|
||||
env.remove(&key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let timeout_ms = match timeout_ms {
|
||||
Some(timeout_ms) => match u64::try_from(timeout_ms) {
|
||||
Ok(timeout_ms) => Some(timeout_ms),
|
||||
Err(_) => {
|
||||
return Err(invalid_params(format!(
|
||||
"command/exec timeoutMs must be non-negative, got {timeout_ms}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
let managed_network_requirements_enabled =
|
||||
self.config.managed_network_requirements_enabled();
|
||||
let started_network_proxy = match self.config.permissions.network.as_ref() {
|
||||
Some(spec) => match spec
|
||||
.start_proxy(
|
||||
self.config.permissions.permission_profile.get(),
|
||||
/*policy_decider*/ None,
|
||||
/*blocked_request_observer*/ None,
|
||||
managed_network_requirements_enabled,
|
||||
NetworkProxyAuditMetadata::default(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(started) => Some(started),
|
||||
Err(err) => {
|
||||
return Err(internal_error(format!(
|
||||
"failed to start managed network proxy: {err}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
let windows_sandbox_level = WindowsSandboxLevel::from_config(&self.config);
|
||||
let output_bytes_cap = if disable_output_cap {
|
||||
None
|
||||
} else {
|
||||
Some(output_bytes_cap.unwrap_or(DEFAULT_OUTPUT_BYTES_CAP))
|
||||
};
|
||||
let expiration = if disable_timeout {
|
||||
ExecExpiration::Cancellation(CancellationToken::new())
|
||||
} else {
|
||||
match timeout_ms {
|
||||
Some(timeout_ms) => timeout_ms.into(),
|
||||
None => ExecExpiration::DefaultTimeout,
|
||||
}
|
||||
};
|
||||
let capture_policy = if disable_output_cap {
|
||||
ExecCapturePolicy::FullBuffer
|
||||
} else {
|
||||
ExecCapturePolicy::ShellTool
|
||||
};
|
||||
let sandbox_cwd = if permission_profile.is_some() {
|
||||
cwd.clone()
|
||||
} else {
|
||||
self.config.cwd.clone()
|
||||
};
|
||||
let exec_params = ExecParams {
|
||||
command,
|
||||
cwd: cwd.clone(),
|
||||
expiration,
|
||||
capture_policy,
|
||||
env,
|
||||
network: started_network_proxy
|
||||
.as_ref()
|
||||
.map(codex_core::config::StartedNetworkProxy::proxy),
|
||||
sandbox_permissions: SandboxPermissions::UseDefault,
|
||||
windows_sandbox_level,
|
||||
windows_sandbox_private_desktop: self
|
||||
.config
|
||||
.permissions
|
||||
.windows_sandbox_private_desktop,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
};
|
||||
|
||||
let effective_permission_profile = if let Some(permission_profile) = permission_profile {
|
||||
let permission_profile =
|
||||
codex_protocol::models::PermissionProfile::from(permission_profile);
|
||||
let (mut file_system_sandbox_policy, network_sandbox_policy) =
|
||||
permission_profile.to_runtime_permissions();
|
||||
let configured_file_system_sandbox_policy =
|
||||
self.config.permissions.file_system_sandbox_policy();
|
||||
Self::preserve_configured_deny_read_restrictions(
|
||||
&mut file_system_sandbox_policy,
|
||||
&configured_file_system_sandbox_policy,
|
||||
);
|
||||
let effective_permission_profile =
|
||||
codex_protocol::models::PermissionProfile::from_runtime_permissions_with_enforcement(
|
||||
permission_profile.enforcement(),
|
||||
&file_system_sandbox_policy,
|
||||
network_sandbox_policy,
|
||||
);
|
||||
self.config
|
||||
.permissions
|
||||
.permission_profile
|
||||
.can_set(&effective_permission_profile)
|
||||
.map_err(|err| invalid_request(format!("invalid permission profile: {err}")))?;
|
||||
effective_permission_profile
|
||||
} else if let Some(policy) = sandbox_policy.map(|policy| policy.to_core()) {
|
||||
self.config
|
||||
.permissions
|
||||
.can_set_legacy_sandbox_policy(&policy, &sandbox_cwd)
|
||||
.map_err(|err| invalid_request(format!("invalid sandbox policy: {err}")))?;
|
||||
let file_system_sandbox_policy =
|
||||
codex_protocol::permissions::FileSystemSandboxPolicy::from_legacy_sandbox_policy_for_cwd(&policy, &sandbox_cwd);
|
||||
let network_sandbox_policy =
|
||||
codex_protocol::permissions::NetworkSandboxPolicy::from(&policy);
|
||||
let permission_profile =
|
||||
codex_protocol::models::PermissionProfile::from_runtime_permissions_with_enforcement(
|
||||
codex_protocol::models::SandboxEnforcement::from_legacy_sandbox_policy(&policy),
|
||||
&file_system_sandbox_policy,
|
||||
network_sandbox_policy,
|
||||
);
|
||||
self.config
|
||||
.permissions
|
||||
.permission_profile
|
||||
.can_set(&permission_profile)
|
||||
.map_err(|err| invalid_request(format!("invalid sandbox policy: {err}")))?;
|
||||
permission_profile
|
||||
} else {
|
||||
self.config.permissions.permission_profile()
|
||||
};
|
||||
|
||||
let codex_linux_sandbox_exe = self.arg0_paths.codex_linux_sandbox_exe.clone();
|
||||
let outgoing = self.outgoing.clone();
|
||||
let request_for_task = request.clone();
|
||||
let started_network_proxy_for_task = started_network_proxy;
|
||||
let use_legacy_landlock = self.config.features.use_legacy_landlock();
|
||||
let size = match size.map(crate::command_exec::terminal_size_from_protocol) {
|
||||
Some(Ok(size)) => Some(size),
|
||||
Some(Err(error)) => return Err(error),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let exec_request = codex_core::exec::build_exec_request(
|
||||
exec_params,
|
||||
&effective_permission_profile,
|
||||
&sandbox_cwd,
|
||||
&codex_linux_sandbox_exe,
|
||||
use_legacy_landlock,
|
||||
)
|
||||
.map_err(|err| internal_error(format!("exec failed: {err}")))?;
|
||||
self.command_exec_manager
|
||||
.start(StartCommandExecParams {
|
||||
outgoing,
|
||||
request_id: request_for_task,
|
||||
process_id,
|
||||
exec_request,
|
||||
started_network_proxy: started_network_proxy_for_task,
|
||||
tty,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
size,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn preserve_configured_deny_read_restrictions(
|
||||
file_system_sandbox_policy: &mut FileSystemSandboxPolicy,
|
||||
configured_file_system_sandbox_policy: &FileSystemSandboxPolicy,
|
||||
) {
|
||||
file_system_sandbox_policy
|
||||
.preserve_deny_read_restrictions_from(configured_file_system_sandbox_policy);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "command_exec_processor_tests.rs"]
|
||||
mod command_exec_processor_tests;
|
||||
@@ -1,38 +0,0 @@
|
||||
use super::*;
|
||||
use codex_protocol::permissions::FileSystemAccessMode;
|
||||
use codex_protocol::permissions::FileSystemPath;
|
||||
use codex_protocol::permissions::FileSystemSandboxEntry;
|
||||
use codex_protocol::permissions::FileSystemSandboxPolicy;
|
||||
use codex_utils_absolute_path::test_support::PathBufExt;
|
||||
use codex_utils_absolute_path::test_support::test_path_buf;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn command_profile_preserves_configured_deny_read_restrictions() {
|
||||
let readable_entry = FileSystemSandboxEntry {
|
||||
path: FileSystemPath::Path {
|
||||
path: test_path_buf("/tmp/project").abs(),
|
||||
},
|
||||
access: FileSystemAccessMode::Read,
|
||||
};
|
||||
let deny_entry = FileSystemSandboxEntry {
|
||||
path: FileSystemPath::GlobPattern {
|
||||
pattern: "/tmp/project/**/*.env".to_string(),
|
||||
},
|
||||
access: FileSystemAccessMode::None,
|
||||
};
|
||||
let mut file_system_sandbox_policy =
|
||||
FileSystemSandboxPolicy::restricted(vec![readable_entry.clone()]);
|
||||
let mut configured_file_system_sandbox_policy =
|
||||
FileSystemSandboxPolicy::restricted(vec![deny_entry.clone()]);
|
||||
configured_file_system_sandbox_policy.glob_scan_max_depth = Some(2);
|
||||
|
||||
CommandExecRequestProcessor::preserve_configured_deny_read_restrictions(
|
||||
&mut file_system_sandbox_policy,
|
||||
&configured_file_system_sandbox_policy,
|
||||
);
|
||||
|
||||
let mut expected = FileSystemSandboxPolicy::restricted(vec![readable_entry, deny_entry]);
|
||||
expected.glob_scan_max_depth = Some(2);
|
||||
assert_eq!(file_system_sandbox_policy, expected);
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
fn cloud_requirements_load_error(err: &std::io::Error) -> Option<&CloudRequirementsLoadError> {
|
||||
let mut current: Option<&(dyn std::error::Error + 'static)> = err
|
||||
.get_ref()
|
||||
.map(|source| source as &(dyn std::error::Error + 'static));
|
||||
while let Some(source) = current {
|
||||
if let Some(cloud_error) = source.downcast_ref::<CloudRequirementsLoadError>() {
|
||||
return Some(cloud_error);
|
||||
}
|
||||
current = source.source();
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub(super) fn config_load_error(err: &std::io::Error) -> JSONRPCErrorError {
|
||||
let data = cloud_requirements_load_error(err).map(|cloud_error| {
|
||||
let mut data = serde_json::json!({
|
||||
"reason": "cloudRequirements",
|
||||
"errorCode": format!("{:?}", cloud_error.code()),
|
||||
"detail": cloud_error.to_string(),
|
||||
});
|
||||
if let Some(status_code) = cloud_error.status_code() {
|
||||
data["statusCode"] = serde_json::json!(status_code);
|
||||
}
|
||||
if cloud_error.code() == CloudRequirementsLoadErrorCode::Auth {
|
||||
data["action"] = serde_json::json!("relogin");
|
||||
}
|
||||
data
|
||||
});
|
||||
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("failed to load configuration: {err}"),
|
||||
data,
|
||||
}
|
||||
}
|
||||
@@ -1,622 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::config_manager_service::ConfigManagerError;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use crate::transport::RemoteControlHandle;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigReadResponse;
|
||||
use codex_app_server_protocol::ConfigRequirements;
|
||||
use codex_app_server_protocol::ConfigRequirementsReadResponse;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::ConfiguredHookHandler;
|
||||
use codex_app_server_protocol::ConfiguredHookMatcherGroup;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ManagedHooksRequirements;
|
||||
use codex_app_server_protocol::ModelProviderCapabilitiesReadResponse;
|
||||
use codex_app_server_protocol::NetworkDomainPermission;
|
||||
use codex_app_server_protocol::NetworkRequirements;
|
||||
use codex_app_server_protocol::NetworkUnixSocketPermission;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_config::ConfigRequirementsToml;
|
||||
use codex_config::HookEventsToml;
|
||||
use codex_config::HookHandlerConfig as CoreHookHandlerConfig;
|
||||
use codex_config::ManagedHooksRequirementsToml;
|
||||
use codex_config::MatcherGroup as CoreMatcherGroup;
|
||||
use codex_config::ResidencyRequirement as CoreResidencyRequirement;
|
||||
use codex_config::SandboxModeRequirement as CoreSandboxModeRequirement;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_features::Feature;
|
||||
use codex_features::canonical_feature_for_key;
|
||||
use codex_features::feature_for_key;
|
||||
use codex_login::AuthManager;
|
||||
use codex_model_provider::create_model_provider;
|
||||
use codex_plugin::PluginId;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::protocol::Op;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
|
||||
const SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT: &[&str] = &[
|
||||
"apps",
|
||||
"memories",
|
||||
"plugins",
|
||||
"remote_control",
|
||||
"tool_search",
|
||||
"tool_suggest",
|
||||
"tool_call_mcp_elicitation",
|
||||
];
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
remote_control_handle: Option<RemoteControlHandle>,
|
||||
}
|
||||
|
||||
impl ConfigRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
remote_control_handle: Option<RemoteControlHandle>,
|
||||
) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
config_manager,
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
analytics_events_client,
|
||||
remote_control_handle,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn read(
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
|
||||
let fallback_cwd = params.cwd.as_ref().map(PathBuf::from);
|
||||
let mut response = self.config_manager.read(params).await.map_err(map_error)?;
|
||||
let config = self.load_latest_config(fallback_cwd).await?;
|
||||
for feature_key in SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT {
|
||||
let Some(feature) = feature_for_key(feature_key) else {
|
||||
continue;
|
||||
};
|
||||
let features = response
|
||||
.config
|
||||
.additional
|
||||
.entry("features".to_string())
|
||||
.or_insert_with(|| json!({}));
|
||||
if !features.is_object() {
|
||||
*features = json!({});
|
||||
}
|
||||
if let Some(features) = features.as_object_mut() {
|
||||
features.insert(
|
||||
(*feature_key).to_string(),
|
||||
json!(config.features.enabled(feature)),
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn config_requirements_read(
|
||||
&self,
|
||||
) -> Result<ConfigRequirementsReadResponse, JSONRPCErrorError> {
|
||||
let requirements = self
|
||||
.config_manager
|
||||
.read_requirements()
|
||||
.await
|
||||
.map_err(map_error)?
|
||||
.map(map_requirements_toml_to_api);
|
||||
|
||||
Ok(ConfigRequirementsReadResponse { requirements })
|
||||
}
|
||||
|
||||
pub(crate) async fn value_write(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ClientResponsePayload, JSONRPCErrorError> {
|
||||
self.handle_config_mutation_result(self.write_value(params).await)
|
||||
.await
|
||||
.map(ClientResponsePayload::ConfigValueWrite)
|
||||
}
|
||||
|
||||
pub(crate) async fn batch_write(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ClientResponsePayload, JSONRPCErrorError> {
|
||||
self.handle_config_mutation_result(self.batch_write_inner(params).await)
|
||||
.await
|
||||
.map(ClientResponsePayload::ConfigBatchWrite)
|
||||
}
|
||||
|
||||
pub(crate) async fn experimental_feature_enablement_set(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ExperimentalFeatureEnablementSetParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
let should_refresh_apps_list = params.enablement.get("apps").copied() == Some(true);
|
||||
let response = self
|
||||
.handle_config_mutation_result(self.set_experimental_feature_enablement(params).await)
|
||||
.await?;
|
||||
self.outgoing
|
||||
.send_response_as(
|
||||
request_id,
|
||||
ClientResponsePayload::ExperimentalFeatureEnablementSet(response),
|
||||
)
|
||||
.await;
|
||||
if should_refresh_apps_list {
|
||||
self.refresh_apps_list_after_experimental_feature_enablement_set()
|
||||
.await;
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub(crate) async fn model_provider_capabilities_read(
|
||||
&self,
|
||||
) -> Result<ModelProviderCapabilitiesReadResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let provider = create_model_provider(config.model_provider, /*auth_manager*/ None);
|
||||
let capabilities = provider.capabilities();
|
||||
Ok(ModelProviderCapabilitiesReadResponse {
|
||||
namespace_tools: capabilities.namespace_tools,
|
||||
image_generation: capabilities.image_generation,
|
||||
web_search: capabilities.web_search,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_config_mutation(&self) {
|
||||
self.thread_manager.plugins_manager().clear_cache();
|
||||
self.thread_manager.skills_manager().clear_cache();
|
||||
let Some(remote_control_handle) = &self.remote_control_handle else {
|
||||
return;
|
||||
};
|
||||
|
||||
match self.load_latest_config(/*fallback_cwd*/ None).await {
|
||||
Ok(config) => {
|
||||
remote_control_handle.set_enabled(config.features.enabled(Feature::RemoteControl));
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
"failed to load config for remote control enablement refresh after config mutation: {}",
|
||||
error.message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_config_mutation_result<T>(
|
||||
&self,
|
||||
result: std::result::Result<T, JSONRPCErrorError>,
|
||||
) -> Result<T, JSONRPCErrorError> {
|
||||
let response = result?;
|
||||
self.handle_config_mutation().await;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn refresh_apps_list_after_experimental_feature_enablement_set(&self) {
|
||||
let config = match self.load_latest_config(/*fallback_cwd*/ None).await {
|
||||
Ok(config) => config,
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
"failed to load config for apps list refresh after experimental feature enablement: {}",
|
||||
error.message
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let auth = self.auth_manager.auth().await;
|
||||
if !config.features.apps_enabled_for_auth(
|
||||
auth.as_ref()
|
||||
.is_some_and(codex_login::CodexAuth::uses_codex_backend),
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
tokio::spawn(async move {
|
||||
let (all_connectors_result, accessible_connectors_result) = tokio::join!(
|
||||
connectors::list_all_connectors_with_options(&config, /*force_refetch*/ true),
|
||||
connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
&config,
|
||||
/*force_refetch*/ true,
|
||||
&environment_manager,
|
||||
),
|
||||
);
|
||||
let all_connectors = match all_connectors_result {
|
||||
Ok(connectors) => connectors,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
"failed to force-refresh directory apps after experimental feature enablement: {err:#}"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let accessible_connectors = match accessible_connectors_result {
|
||||
Ok(status) => status.connectors,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
"failed to force-refresh accessible apps after experimental feature enablement: {err:#}"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let data = connectors::with_app_enabled_state(
|
||||
connectors::merge_connectors_with_accessible(
|
||||
all_connectors,
|
||||
accessible_connectors,
|
||||
/*all_connectors_loaded*/ true,
|
||||
),
|
||||
&config,
|
||||
);
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<codex_core::config::Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!(
|
||||
"failed to resolve feature override precedence: {err}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let pending_changes = codex_core_plugins::toggles::collect_plugin_enabled_candidates(
|
||||
[(¶ms.key_path, ¶ms.value)].into_iter(),
|
||||
);
|
||||
let response = self
|
||||
.config_manager
|
||||
.write_value(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn batch_write_inner(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let reload_user_config = params.reload_user_config;
|
||||
let pending_changes = codex_core_plugins::toggles::collect_plugin_enabled_candidates(
|
||||
params
|
||||
.edits
|
||||
.iter()
|
||||
.map(|edit| (&edit.key_path, &edit.value)),
|
||||
);
|
||||
let response = self
|
||||
.config_manager
|
||||
.batch_write(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
if reload_user_config {
|
||||
self.reload_user_config().await;
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn set_experimental_feature_enablement(
|
||||
&self,
|
||||
params: ExperimentalFeatureEnablementSetParams,
|
||||
) -> Result<ExperimentalFeatureEnablementSetResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureEnablementSetParams { enablement } = params;
|
||||
for key in enablement.keys() {
|
||||
if canonical_feature_for_key(key).is_some() {
|
||||
if SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.contains(&key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Err(invalid_request(format!(
|
||||
"unsupported feature enablement `{key}`: currently supported features are {}",
|
||||
SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.join(", ")
|
||||
)));
|
||||
}
|
||||
|
||||
let message = if let Some(feature) = feature_for_key(key) {
|
||||
format!(
|
||||
"invalid feature enablement `{key}`: use canonical feature key `{}`",
|
||||
feature.key()
|
||||
)
|
||||
} else {
|
||||
format!("invalid feature enablement `{key}`")
|
||||
};
|
||||
return Err(invalid_request(message));
|
||||
}
|
||||
|
||||
if enablement.is_empty() {
|
||||
return Ok(ExperimentalFeatureEnablementSetResponse { enablement });
|
||||
}
|
||||
|
||||
self.config_manager
|
||||
.extend_runtime_feature_enablement(
|
||||
enablement
|
||||
.iter()
|
||||
.map(|(name, enabled)| (name.clone(), *enabled)),
|
||||
)
|
||||
.map_err(|_| internal_error("failed to update feature enablement"))?;
|
||||
|
||||
self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
self.reload_user_config().await;
|
||||
|
||||
Ok(ExperimentalFeatureEnablementSetResponse { enablement })
|
||||
}
|
||||
|
||||
async fn reload_user_config(&self) {
|
||||
let thread_ids = self.thread_manager.list_thread_ids().await;
|
||||
for thread_id in thread_ids {
|
||||
let Ok(thread) = self.thread_manager.get_thread(thread_id).await else {
|
||||
continue;
|
||||
};
|
||||
if let Err(err) = thread.submit(Op::ReloadUserConfig).await {
|
||||
tracing::warn!("failed to request user config reload: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn emit_plugin_toggle_events(
|
||||
&self,
|
||||
pending_changes: std::collections::BTreeMap<String, bool>,
|
||||
) {
|
||||
for (plugin_id, enabled) in pending_changes {
|
||||
let Ok(plugin_id) = PluginId::parse(&plugin_id) else {
|
||||
continue;
|
||||
};
|
||||
let metadata = codex_core_plugins::loader::installed_plugin_telemetry_metadata(
|
||||
self.config_manager.codex_home(),
|
||||
&plugin_id,
|
||||
)
|
||||
.await;
|
||||
if enabled {
|
||||
self.analytics_events_client.track_plugin_enabled(metadata);
|
||||
} else {
|
||||
self.analytics_events_client.track_plugin_disabled(metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigRequirements {
|
||||
ConfigRequirements {
|
||||
allowed_approval_policies: requirements.allowed_approval_policies.map(|policies| {
|
||||
policies
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::AskForApproval::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_approvals_reviewers: requirements.allowed_approvals_reviewers.map(|reviewers| {
|
||||
reviewers
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::ApprovalsReviewer::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_sandbox_modes: requirements.allowed_sandbox_modes.map(|modes| {
|
||||
modes
|
||||
.into_iter()
|
||||
.filter_map(map_sandbox_mode_requirement_to_api)
|
||||
.collect()
|
||||
}),
|
||||
allowed_web_search_modes: requirements.allowed_web_search_modes.map(|modes| {
|
||||
let mut normalized = modes
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<WebSearchMode>>();
|
||||
if !normalized.contains(&WebSearchMode::Disabled) {
|
||||
normalized.push(WebSearchMode::Disabled);
|
||||
}
|
||||
normalized
|
||||
}),
|
||||
feature_requirements: requirements
|
||||
.feature_requirements
|
||||
.map(|requirements| requirements.entries),
|
||||
hooks: requirements.hooks.map(map_hooks_requirements_to_api),
|
||||
enforce_residency: requirements
|
||||
.enforce_residency
|
||||
.map(map_residency_requirement_to_api),
|
||||
network: requirements.network.map(map_network_requirements_to_api),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hooks_requirements_to_api(hooks: ManagedHooksRequirementsToml) -> ManagedHooksRequirements {
|
||||
let ManagedHooksRequirementsToml {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
hooks,
|
||||
} = hooks;
|
||||
let HookEventsToml {
|
||||
pre_tool_use,
|
||||
permission_request,
|
||||
post_tool_use,
|
||||
session_start,
|
||||
user_prompt_submit,
|
||||
stop,
|
||||
} = hooks;
|
||||
|
||||
ManagedHooksRequirements {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
pre_tool_use: map_hook_matcher_groups_to_api(pre_tool_use),
|
||||
permission_request: map_hook_matcher_groups_to_api(permission_request),
|
||||
post_tool_use: map_hook_matcher_groups_to_api(post_tool_use),
|
||||
session_start: map_hook_matcher_groups_to_api(session_start),
|
||||
user_prompt_submit: map_hook_matcher_groups_to_api(user_prompt_submit),
|
||||
stop: map_hook_matcher_groups_to_api(stop),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_matcher_groups_to_api(
|
||||
groups: Vec<CoreMatcherGroup>,
|
||||
) -> Vec<ConfiguredHookMatcherGroup> {
|
||||
groups
|
||||
.into_iter()
|
||||
.map(map_hook_matcher_group_to_api)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn map_hook_matcher_group_to_api(group: CoreMatcherGroup) -> ConfiguredHookMatcherGroup {
|
||||
ConfiguredHookMatcherGroup {
|
||||
matcher: group.matcher,
|
||||
hooks: group
|
||||
.hooks
|
||||
.into_iter()
|
||||
.map(map_hook_handler_to_api)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_handler_to_api(handler: CoreHookHandlerConfig) -> ConfiguredHookHandler {
|
||||
match handler {
|
||||
CoreHookHandlerConfig::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
} => ConfiguredHookHandler::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
},
|
||||
CoreHookHandlerConfig::Prompt {} => ConfiguredHookHandler::Prompt {},
|
||||
CoreHookHandlerConfig::Agent {} => ConfiguredHookHandler::Agent {},
|
||||
}
|
||||
}
|
||||
|
||||
fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Option<SandboxMode> {
|
||||
match mode {
|
||||
CoreSandboxModeRequirement::ReadOnly => Some(SandboxMode::ReadOnly),
|
||||
CoreSandboxModeRequirement::WorkspaceWrite => Some(SandboxMode::WorkspaceWrite),
|
||||
CoreSandboxModeRequirement::DangerFullAccess => Some(SandboxMode::DangerFullAccess),
|
||||
CoreSandboxModeRequirement::ExternalSandbox => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_residency_requirement_to_api(
|
||||
residency: CoreResidencyRequirement,
|
||||
) -> codex_app_server_protocol::ResidencyRequirement {
|
||||
match residency {
|
||||
CoreResidencyRequirement::Us => codex_app_server_protocol::ResidencyRequirement::Us,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_requirements_to_api(
|
||||
network: codex_config::NetworkRequirementsToml,
|
||||
) -> NetworkRequirements {
|
||||
let allowed_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains);
|
||||
let denied_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::denied_domains);
|
||||
let allow_unix_sockets = network
|
||||
.unix_sockets
|
||||
.as_ref()
|
||||
.map(codex_config::NetworkUnixSocketPermissionsToml::allow_unix_sockets)
|
||||
.filter(|entries| !entries.is_empty());
|
||||
|
||||
NetworkRequirements {
|
||||
enabled: network.enabled,
|
||||
http_port: network.http_port,
|
||||
socks_port: network.socks_port,
|
||||
allow_upstream_proxy: network.allow_upstream_proxy,
|
||||
dangerously_allow_non_loopback_proxy: network.dangerously_allow_non_loopback_proxy,
|
||||
dangerously_allow_all_unix_sockets: network.dangerously_allow_all_unix_sockets,
|
||||
domains: network.domains.map(|domains| {
|
||||
domains
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(pattern, permission)| {
|
||||
(pattern, map_network_domain_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
managed_allowed_domains_only: network.managed_allowed_domains_only,
|
||||
allowed_domains,
|
||||
denied_domains,
|
||||
unix_sockets: network.unix_sockets.map(|unix_sockets| {
|
||||
unix_sockets
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(path, permission)| {
|
||||
(path, map_network_unix_socket_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
allow_unix_sockets,
|
||||
allow_local_binding: network.allow_local_binding,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_domain_permission_to_api(
|
||||
permission: codex_config::NetworkDomainPermissionToml,
|
||||
) -> NetworkDomainPermission {
|
||||
match permission {
|
||||
codex_config::NetworkDomainPermissionToml::Allow => NetworkDomainPermission::Allow,
|
||||
codex_config::NetworkDomainPermissionToml::Deny => NetworkDomainPermission::Deny,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_unix_socket_permission_to_api(
|
||||
permission: codex_config::NetworkUnixSocketPermissionToml,
|
||||
) -> NetworkUnixSocketPermission {
|
||||
match permission {
|
||||
codex_config::NetworkUnixSocketPermissionToml::Allow => NetworkUnixSocketPermission::Allow,
|
||||
codex_config::NetworkUnixSocketPermissionToml::None => NetworkUnixSocketPermission::None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigManagerError) -> JSONRPCErrorError {
|
||||
if let Some(code) = err.write_error_code() {
|
||||
return config_write_error(code, err.to_string());
|
||||
}
|
||||
|
||||
internal_error(err.to_string())
|
||||
}
|
||||
|
||||
fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: Some(json!({
|
||||
"config_write_error_code": code,
|
||||
})),
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
fn migration_item(
|
||||
item_type: ExternalAgentConfigMigrationItemType,
|
||||
) -> ExternalAgentConfigMigrationItem {
|
||||
ExternalAgentConfigMigrationItem {
|
||||
item_type,
|
||||
description: String::new(),
|
||||
cwd: None,
|
||||
details: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn migration_items_that_update_runtime_sources_trigger_refresh() {
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Config,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Skills,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::McpServerConfig,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Hooks,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Commands,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Plugins,
|
||||
)]));
|
||||
assert!(!migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Sessions,
|
||||
)]));
|
||||
}
|
||||
@@ -1,245 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FeedbackRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: Arc<Config>,
|
||||
feedback: CodexFeedback,
|
||||
log_db: Option<LogDbLayer>,
|
||||
state_db: Option<StateDbHandle>,
|
||||
}
|
||||
|
||||
impl FeedbackRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: Arc<Config>,
|
||||
feedback: CodexFeedback,
|
||||
log_db: Option<LogDbLayer>,
|
||||
state_db: Option<StateDbHandle>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
config,
|
||||
feedback,
|
||||
log_db,
|
||||
state_db,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn feedback_upload(
|
||||
&self,
|
||||
params: FeedbackUploadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.upload_feedback_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn upload_feedback_response(
|
||||
&self,
|
||||
params: FeedbackUploadParams,
|
||||
) -> Result<FeedbackUploadResponse, JSONRPCErrorError> {
|
||||
if !self.config.feedback_enabled {
|
||||
return Err(invalid_request(
|
||||
"sending feedback is disabled by configuration",
|
||||
));
|
||||
}
|
||||
|
||||
let FeedbackUploadParams {
|
||||
classification,
|
||||
reason,
|
||||
thread_id,
|
||||
include_logs,
|
||||
extra_log_files,
|
||||
tags,
|
||||
} = params;
|
||||
|
||||
let conversation_id = match thread_id.as_deref() {
|
||||
Some(thread_id) => match ThreadId::from_string(thread_id) {
|
||||
Ok(conversation_id) => Some(conversation_id),
|
||||
Err(err) => return Err(invalid_request(format!("invalid thread id: {err}"))),
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
|
||||
if let Some(chatgpt_user_id) = self
|
||||
.auth_manager
|
||||
.auth_cached()
|
||||
.and_then(|auth| auth.get_chatgpt_user_id())
|
||||
{
|
||||
tracing::info!(target: "feedback_tags", chatgpt_user_id);
|
||||
}
|
||||
let snapshot = self.feedback.snapshot(conversation_id);
|
||||
let thread_id = snapshot.thread_id.clone();
|
||||
let (feedback_thread_ids, sqlite_feedback_logs, state_db_ctx) = if include_logs {
|
||||
if let Some(log_db) = self.log_db.as_ref() {
|
||||
log_db.flush().await;
|
||||
}
|
||||
let state_db_ctx = self.state_db.clone();
|
||||
let feedback_thread_ids = match conversation_id {
|
||||
Some(conversation_id) => match self
|
||||
.thread_manager
|
||||
.list_agent_subtree_thread_ids(conversation_id)
|
||||
.await
|
||||
{
|
||||
Ok(thread_ids) => thread_ids,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to list feedback subtree for thread_id={conversation_id}: {err}"
|
||||
);
|
||||
let mut thread_ids = vec![conversation_id];
|
||||
if let Some(state_db_ctx) = state_db_ctx.as_ref() {
|
||||
for status in [
|
||||
codex_state::DirectionalThreadSpawnEdgeStatus::Open,
|
||||
codex_state::DirectionalThreadSpawnEdgeStatus::Closed,
|
||||
] {
|
||||
match state_db_ctx
|
||||
.list_thread_spawn_descendants_with_status(
|
||||
conversation_id,
|
||||
status,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(descendant_ids) => thread_ids.extend(descendant_ids),
|
||||
Err(err) => warn!(
|
||||
"failed to list persisted feedback subtree for thread_id={conversation_id}: {err}"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
thread_ids
|
||||
}
|
||||
},
|
||||
None => Vec::new(),
|
||||
};
|
||||
let sqlite_feedback_logs = if let Some(state_db_ctx) = state_db_ctx.as_ref()
|
||||
&& !feedback_thread_ids.is_empty()
|
||||
{
|
||||
let thread_id_texts = feedback_thread_ids
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
let thread_id_refs = thread_id_texts
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>();
|
||||
match state_db_ctx
|
||||
.query_feedback_logs_for_threads(&thread_id_refs)
|
||||
.await
|
||||
{
|
||||
Ok(logs) if logs.is_empty() => None,
|
||||
Ok(logs) => Some(logs),
|
||||
Err(err) => {
|
||||
let thread_ids = thread_id_texts.join(", ");
|
||||
warn!(
|
||||
"failed to query feedback logs from sqlite for thread_ids=[{thread_ids}]: {err}"
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(feedback_thread_ids, sqlite_feedback_logs, state_db_ctx)
|
||||
} else {
|
||||
(Vec::new(), None, None)
|
||||
};
|
||||
|
||||
let mut attachment_paths = Vec::new();
|
||||
let mut seen_attachment_paths = HashSet::new();
|
||||
if include_logs {
|
||||
for feedback_thread_id in &feedback_thread_ids {
|
||||
let Some(rollout_path) = self
|
||||
.resolve_rollout_path(*feedback_thread_id, state_db_ctx.as_ref())
|
||||
.await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
if seen_attachment_paths.insert(rollout_path.clone()) {
|
||||
attachment_paths.push(FeedbackAttachmentPath {
|
||||
path: rollout_path,
|
||||
attachment_filename_override: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
if let Some(conversation_id) = conversation_id
|
||||
&& let Ok(conversation) = self.thread_manager.get_thread(conversation_id).await
|
||||
&& let Some(guardian_rollout_path) =
|
||||
conversation.guardian_trunk_rollout_path().await
|
||||
&& seen_attachment_paths.insert(guardian_rollout_path.clone())
|
||||
{
|
||||
attachment_paths.push(FeedbackAttachmentPath {
|
||||
path: guardian_rollout_path,
|
||||
attachment_filename_override: Some(auto_review_rollout_filename(
|
||||
conversation_id,
|
||||
)),
|
||||
});
|
||||
}
|
||||
}
|
||||
if let Some(extra_log_files) = extra_log_files {
|
||||
for extra_log_file in extra_log_files {
|
||||
if seen_attachment_paths.insert(extra_log_file.clone()) {
|
||||
attachment_paths.push(FeedbackAttachmentPath {
|
||||
path: extra_log_file,
|
||||
attachment_filename_override: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let session_source = self.thread_manager.session_source();
|
||||
|
||||
let upload_result = tokio::task::spawn_blocking(move || {
|
||||
snapshot.upload_feedback(FeedbackUploadOptions {
|
||||
classification: &classification,
|
||||
reason: reason.as_deref(),
|
||||
tags: tags.as_ref(),
|
||||
include_logs,
|
||||
extra_attachment_paths: &attachment_paths,
|
||||
session_source: Some(session_source),
|
||||
logs_override: sqlite_feedback_logs,
|
||||
})
|
||||
})
|
||||
.await;
|
||||
|
||||
let upload_result = match upload_result {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
return Err(internal_error(format!(
|
||||
"failed to upload feedback: {join_err}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
upload_result.map_err(|err| internal_error(format!("failed to upload feedback: {err}")))?;
|
||||
Ok(FeedbackUploadResponse { thread_id })
|
||||
}
|
||||
|
||||
async fn resolve_rollout_path(
|
||||
&self,
|
||||
conversation_id: ThreadId,
|
||||
state_db_ctx: Option<&StateDbHandle>,
|
||||
) -> Option<PathBuf> {
|
||||
if let Ok(conversation) = self.thread_manager.get_thread(conversation_id).await
|
||||
&& let Some(rollout_path) = conversation.rollout_path()
|
||||
{
|
||||
return Some(rollout_path);
|
||||
}
|
||||
|
||||
let state_db_ctx = state_db_ctx?;
|
||||
state_db_ctx
|
||||
.find_rollout_path_by_id(conversation_id, /*archived_only*/ None)
|
||||
.await
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("failed to resolve rollout path for thread_id={conversation_id}: {err}");
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn auto_review_rollout_filename(thread_id: ThreadId) -> String {
|
||||
format!("auto-review-rollout-{thread_id}.jsonl")
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct GitRequestProcessor;
|
||||
|
||||
impl GitRequestProcessor {
|
||||
pub(crate) fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
|
||||
pub(crate) async fn git_diff_to_remote(
|
||||
&self,
|
||||
params: GitDiffToRemoteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.git_diff_to_origin(params.cwd)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn git_diff_to_origin(
|
||||
&self,
|
||||
cwd: PathBuf,
|
||||
) -> Result<GitDiffToRemoteResponse, JSONRPCErrorError> {
|
||||
git_diff_to_remote(&cwd)
|
||||
.await
|
||||
.map(|value| GitDiffToRemoteResponse {
|
||||
sha: value.sha,
|
||||
diff: value.diff,
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
invalid_request(format!(
|
||||
"failed to compute git diff to remote for cwd: {cwd:?}"
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use axum::http::HeaderValue;
|
||||
use codex_analytics::AppServerRpcTransport;
|
||||
use codex_login::default_client::SetOriginatorError;
|
||||
use codex_login::default_client::USER_AGENT_SUFFIX;
|
||||
use codex_login::default_client::get_codex_user_agent;
|
||||
use codex_login::default_client::set_default_client_residency_requirement;
|
||||
use codex_login::default_client::set_default_originator;
|
||||
|
||||
use super::*;
|
||||
use crate::message_processor::ConnectionSessionState;
|
||||
use crate::message_processor::InitializedConnectionSessionState;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct InitializeRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config: Arc<Config>,
|
||||
config_warnings: Arc<Vec<ConfigWarningNotification>>,
|
||||
rpc_transport: AppServerRpcTransport,
|
||||
}
|
||||
|
||||
impl InitializeRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config: Arc<Config>,
|
||||
config_warnings: Vec<ConfigWarningNotification>,
|
||||
rpc_transport: AppServerRpcTransport,
|
||||
) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
analytics_events_client,
|
||||
config,
|
||||
config_warnings: Arc::new(config_warnings),
|
||||
rpc_transport,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn initialize(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
request_id: RequestId,
|
||||
params: InitializeParams,
|
||||
session: &ConnectionSessionState,
|
||||
// `Some(...)` means the caller wants initialize to immediately mark the
|
||||
// connection outbound-ready. Websocket JSON-RPC calls pass `None` so
|
||||
// lib.rs can deliver connection-scoped initialize notifications first.
|
||||
outbound_initialized: Option<&AtomicBool>,
|
||||
) -> Result<bool, JSONRPCErrorError> {
|
||||
let connection_request_id = ConnectionRequestId {
|
||||
connection_id,
|
||||
request_id,
|
||||
};
|
||||
if session.initialized() {
|
||||
return Err(invalid_request("Already initialized"));
|
||||
}
|
||||
|
||||
// TODO(maxj): Revisit capability scoping for `experimental_api_enabled`.
|
||||
// Current behavior is per-connection. Reviewer feedback notes this can
|
||||
// create odd cross-client behavior (for example dynamic tool calls on a
|
||||
// shared thread when another connected client did not opt into
|
||||
// experimental API). Proposed direction is instance-global first-write-wins
|
||||
// with initialize-time mismatch rejection.
|
||||
let analytics_initialize_params = params.clone();
|
||||
let (experimental_api_enabled, opt_out_notification_methods) = match params.capabilities {
|
||||
Some(capabilities) => (
|
||||
capabilities.experimental_api,
|
||||
capabilities
|
||||
.opt_out_notification_methods
|
||||
.unwrap_or_default(),
|
||||
),
|
||||
None => (false, Vec::new()),
|
||||
};
|
||||
let ClientInfo {
|
||||
name,
|
||||
title: _title,
|
||||
version,
|
||||
} = params.client_info;
|
||||
// Validate before committing; set_default_originator validates while
|
||||
// mutating process-global metadata.
|
||||
if HeaderValue::from_str(&name).is_err() {
|
||||
return Err(invalid_request(format!(
|
||||
"Invalid clientInfo.name: '{name}'. Must be a valid HTTP header value."
|
||||
)));
|
||||
}
|
||||
let originator = name.clone();
|
||||
let user_agent_suffix = format!("{name}; {version}");
|
||||
let codex_home = self.config.codex_home.clone();
|
||||
if session
|
||||
.initialize(InitializedConnectionSessionState {
|
||||
experimental_api_enabled,
|
||||
opted_out_notification_methods: opt_out_notification_methods.into_iter().collect(),
|
||||
app_server_client_name: name.clone(),
|
||||
client_version: version,
|
||||
})
|
||||
.is_err()
|
||||
{
|
||||
return Err(invalid_request("Already initialized"));
|
||||
}
|
||||
|
||||
// Only the request that wins session initialization may mutate
|
||||
// process-global client metadata.
|
||||
if let Err(error) = set_default_originator(originator.clone()) {
|
||||
match error {
|
||||
SetOriginatorError::InvalidHeaderValue => {
|
||||
tracing::warn!(
|
||||
client_info_name = %name,
|
||||
"validated clientInfo.name was rejected while setting originator"
|
||||
);
|
||||
}
|
||||
SetOriginatorError::AlreadyInitialized => {
|
||||
// No-op. This is expected to happen if the originator is already set via env var.
|
||||
// TODO(owen): Once we remove support for CODEX_INTERNAL_ORIGINATOR_OVERRIDE,
|
||||
// this will be an unexpected state and we can return a JSON-RPC error indicating
|
||||
// internal server error.
|
||||
}
|
||||
}
|
||||
}
|
||||
self.analytics_events_client.track_initialize(
|
||||
connection_id.0,
|
||||
analytics_initialize_params,
|
||||
originator,
|
||||
self.rpc_transport,
|
||||
);
|
||||
set_default_client_residency_requirement(self.config.enforce_residency.value());
|
||||
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
|
||||
*suffix = Some(user_agent_suffix);
|
||||
}
|
||||
|
||||
let user_agent = get_codex_user_agent();
|
||||
let response = InitializeResponse {
|
||||
user_agent,
|
||||
codex_home,
|
||||
platform_family: std::env::consts::FAMILY.to_string(),
|
||||
platform_os: std::env::consts::OS.to_string(),
|
||||
};
|
||||
|
||||
self.outgoing
|
||||
.send_response(connection_request_id, response)
|
||||
.await;
|
||||
|
||||
if let Some(outbound_initialized) = outbound_initialized {
|
||||
outbound_initialized.store(true, Ordering::Release);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub(crate) async fn send_initialize_notifications_to_connection(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
) {
|
||||
for notification in self.config_warnings.iter().cloned() {
|
||||
self.outgoing
|
||||
.send_server_notification_to_connections(
|
||||
&[connection_id],
|
||||
ServerNotification::ConfigWarning(notification),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn send_initialize_notifications(&self) {
|
||||
for notification in self.config_warnings.iter().cloned() {
|
||||
self.outgoing
|
||||
.send_server_notification(ServerNotification::ConfigWarning(notification))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn track_initialized_request(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
request_id: RequestId,
|
||||
request: &ClientRequest,
|
||||
) {
|
||||
self.analytics_events_client
|
||||
.track_request(connection_id.0, request_id, request);
|
||||
}
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MarketplaceRequestProcessor {
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
}
|
||||
|
||||
impl MarketplaceRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
config_manager,
|
||||
thread_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn marketplace_add(
|
||||
&self,
|
||||
params: MarketplaceAddParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.marketplace_add_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn marketplace_remove(
|
||||
&self,
|
||||
params: MarketplaceRemoveParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.marketplace_remove_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn marketplace_upgrade(
|
||||
&self,
|
||||
params: MarketplaceUpgradeParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.marketplace_upgrade_response_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn marketplace_remove_inner(
|
||||
&self,
|
||||
params: MarketplaceRemoveParams,
|
||||
) -> Result<MarketplaceRemoveResponse, JSONRPCErrorError> {
|
||||
remove_marketplace(
|
||||
self.config.codex_home.to_path_buf(),
|
||||
CoreMarketplaceRemoveRequest {
|
||||
marketplace_name: params.marketplace_name,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map(|outcome| MarketplaceRemoveResponse {
|
||||
marketplace_name: outcome.marketplace_name,
|
||||
installed_root: outcome.removed_installed_root,
|
||||
})
|
||||
.map_err(|err| match err {
|
||||
MarketplaceRemoveError::InvalidRequest(message) => invalid_request(message),
|
||||
MarketplaceRemoveError::Internal(message) => internal_error(message),
|
||||
})
|
||||
}
|
||||
|
||||
async fn marketplace_upgrade_response_inner(
|
||||
&self,
|
||||
params: MarketplaceUpgradeParams,
|
||||
) -> Result<MarketplaceUpgradeResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let MarketplaceUpgradeParams { marketplace_name } = params;
|
||||
let plugins_input = config.plugins_config_input();
|
||||
|
||||
let outcome = tokio::task::spawn_blocking(move || {
|
||||
plugins_manager.upgrade_configured_marketplaces_for_config(
|
||||
&plugins_input,
|
||||
marketplace_name.as_deref(),
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to upgrade marketplaces: {err}")))?
|
||||
.map_err(invalid_request)?;
|
||||
|
||||
Ok(MarketplaceUpgradeResponse {
|
||||
selected_marketplaces: outcome.selected_marketplaces,
|
||||
upgraded_roots: outcome.upgraded_roots,
|
||||
errors: outcome
|
||||
.errors
|
||||
.into_iter()
|
||||
.map(|err| MarketplaceUpgradeErrorInfo {
|
||||
marketplace_name: err.marketplace_name,
|
||||
message: err.message,
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn marketplace_add_inner(
|
||||
&self,
|
||||
params: MarketplaceAddParams,
|
||||
) -> Result<MarketplaceAddResponse, JSONRPCErrorError> {
|
||||
add_marketplace_to_codex_home(
|
||||
self.config.codex_home.to_path_buf(),
|
||||
MarketplaceAddRequest {
|
||||
source: params.source,
|
||||
ref_name: params.ref_name,
|
||||
sparse_paths: params.sparse_paths.unwrap_or_default(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map(|outcome| MarketplaceAddResponse {
|
||||
marketplace_name: outcome.marketplace_name,
|
||||
installed_root: outcome.installed_root,
|
||||
already_added: outcome.already_added,
|
||||
})
|
||||
.map_err(|err| match err {
|
||||
MarketplaceAddError::InvalidRequest(message) => invalid_request(message),
|
||||
MarketplaceAddError::Internal(message) => internal_error(message),
|
||||
})
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,513 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
const MCP_TOOL_THREAD_ID_META_KEY: &str = "threadId";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct McpRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
}
|
||||
|
||||
impl McpRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
config_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_oauth_login(
|
||||
&self,
|
||||
params: McpServerOauthLoginParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.mcp_server_oauth_login_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_refresh(
|
||||
&self,
|
||||
params: Option<()>,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.mcp_server_refresh_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_status_list(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: ListMcpServerStatusParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.list_mcp_server_status(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_resource_read(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpResourceReadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.read_mcp_resource(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_tool_call(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpServerToolCallParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.call_mcp_server_tool(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
async fn mcp_server_refresh_response(
|
||||
&self,
|
||||
_params: Option<()>,
|
||||
) -> Result<McpServerRefreshResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
Self::queue_mcp_server_refresh_for_config(&self.thread_manager, &config).await?;
|
||||
Ok(McpServerRefreshResponse {})
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn load_thread(
|
||||
&self,
|
||||
thread_id: &str,
|
||||
) -> Result<(ThreadId, Arc<CodexThread>), JSONRPCErrorError> {
|
||||
let thread_id = ThreadId::from_string(thread_id).map_err(|err| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid thread id: {err}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
let thread = self
|
||||
.thread_manager
|
||||
.get_thread(thread_id)
|
||||
.await
|
||||
.map_err(|_| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("thread not found: {thread_id}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
Ok((thread_id, thread))
|
||||
}
|
||||
|
||||
pub(super) async fn queue_mcp_server_refresh_for_config(
|
||||
thread_manager: &Arc<ThreadManager>,
|
||||
config: &Config,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let configured_servers = thread_manager
|
||||
.mcp_manager()
|
||||
.configured_servers(config)
|
||||
.await;
|
||||
let mcp_servers = match serde_json::to_value(configured_servers) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to serialize MCP servers: {err}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let mcp_oauth_credentials_store_mode =
|
||||
match serde_json::to_value(config.mcp_oauth_credentials_store_mode) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!(
|
||||
"failed to serialize MCP OAuth credentials store mode: {err}"
|
||||
),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let refresh_config = McpServerRefreshConfig {
|
||||
mcp_servers,
|
||||
mcp_oauth_credentials_store_mode,
|
||||
};
|
||||
|
||||
// Refresh requests are queued per thread; each thread rebuilds MCP connections on its next
|
||||
// active turn to avoid work for threads that never resume.
|
||||
thread_manager.refresh_mcp_servers(refresh_config).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn mcp_server_oauth_login_response(
|
||||
&self,
|
||||
params: McpServerOauthLoginParams,
|
||||
) -> Result<McpServerOauthLoginResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let McpServerOauthLoginParams {
|
||||
name,
|
||||
scopes,
|
||||
timeout_secs,
|
||||
} = params;
|
||||
|
||||
let configured_servers = self
|
||||
.thread_manager
|
||||
.mcp_manager()
|
||||
.configured_servers(&config)
|
||||
.await;
|
||||
let Some(server) = configured_servers.get(&name) else {
|
||||
return Err(invalid_request(format!(
|
||||
"No MCP server named '{name}' found."
|
||||
)));
|
||||
};
|
||||
|
||||
let (url, http_headers, env_http_headers) = match &server.transport {
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
..
|
||||
} => (url.clone(), http_headers.clone(), env_http_headers.clone()),
|
||||
_ => {
|
||||
return Err(invalid_request(
|
||||
"OAuth login is only supported for streamable HTTP servers.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let discovered_scopes = if scopes.is_none() && server.scopes.is_none() {
|
||||
discover_supported_scopes(&server.transport).await
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let resolved_scopes =
|
||||
resolve_oauth_scopes(scopes, server.scopes.clone(), discovered_scopes);
|
||||
|
||||
let handle = perform_oauth_login_return_url(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
&resolved_scopes.scopes,
|
||||
server.oauth_resource.as_deref(),
|
||||
timeout_secs,
|
||||
config.mcp_oauth_callback_port,
|
||||
config.mcp_oauth_callback_url.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to login to MCP server '{name}': {err}")))?;
|
||||
let authorization_url = handle.authorization_url().to_string();
|
||||
let notification_name = name.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let (success, error) = match handle.wait().await {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
|
||||
Ok(McpServerOauthLoginResponse { authorization_url })
|
||||
}
|
||||
|
||||
async fn list_mcp_server_status(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: ListMcpServerStatusParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let request = request_id.clone();
|
||||
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let mcp_config = config
|
||||
.to_mcp_config(self.thread_manager.plugins_manager().as_ref())
|
||||
.await;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let runtime_environment = match environment_manager.default_environment() {
|
||||
Some(environment) => {
|
||||
// Status listing has no turn cwd. This fallback is used only
|
||||
// by executor-backed stdio MCPs whose config omits `cwd`.
|
||||
McpRuntimeEnvironment::new(environment, config.cwd.to_path_buf())
|
||||
}
|
||||
None => McpRuntimeEnvironment::new(
|
||||
environment_manager.local_environment(),
|
||||
config.cwd.to_path_buf(),
|
||||
),
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
Self::list_mcp_server_status_task(
|
||||
outgoing,
|
||||
request,
|
||||
params,
|
||||
config,
|
||||
mcp_config,
|
||||
auth,
|
||||
runtime_environment,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_mcp_server_status_task(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ListMcpServerStatusParams,
|
||||
config: Config,
|
||||
mcp_config: codex_mcp::McpConfig,
|
||||
auth: Option<CodexAuth>,
|
||||
runtime_environment: McpRuntimeEnvironment,
|
||||
) {
|
||||
let result = Self::list_mcp_server_status_response(
|
||||
request_id.request_id.to_string(),
|
||||
params,
|
||||
config,
|
||||
mcp_config,
|
||||
auth,
|
||||
runtime_environment,
|
||||
)
|
||||
.await;
|
||||
outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn list_mcp_server_status_response(
|
||||
request_id: String,
|
||||
params: ListMcpServerStatusParams,
|
||||
config: Config,
|
||||
mcp_config: codex_mcp::McpConfig,
|
||||
auth: Option<CodexAuth>,
|
||||
runtime_environment: McpRuntimeEnvironment,
|
||||
) -> Result<ListMcpServerStatusResponse, JSONRPCErrorError> {
|
||||
let detail = match params.detail.unwrap_or(McpServerStatusDetail::Full) {
|
||||
McpServerStatusDetail::Full => McpSnapshotDetail::Full,
|
||||
McpServerStatusDetail::ToolsAndAuthOnly => McpSnapshotDetail::ToolsAndAuthOnly,
|
||||
};
|
||||
|
||||
let snapshot = collect_mcp_server_status_snapshot_with_detail(
|
||||
&mcp_config,
|
||||
auth.as_ref(),
|
||||
request_id,
|
||||
runtime_environment,
|
||||
detail,
|
||||
)
|
||||
.await;
|
||||
|
||||
let effective_servers = effective_mcp_servers(&mcp_config, auth.as_ref());
|
||||
let McpServerStatusSnapshot {
|
||||
tools_by_server,
|
||||
resources,
|
||||
resource_templates,
|
||||
auth_statuses,
|
||||
} = snapshot;
|
||||
|
||||
let mut server_names: Vec<String> = config
|
||||
.mcp_servers
|
||||
.keys()
|
||||
.cloned()
|
||||
// Include built-in/plugin MCP servers that are present in the
|
||||
// effective runtime config even when they are not user-declared in
|
||||
// `config.mcp_servers`.
|
||||
.chain(effective_servers.keys().cloned())
|
||||
.chain(auth_statuses.keys().cloned())
|
||||
.chain(resources.keys().cloned())
|
||||
.chain(resource_templates.keys().cloned())
|
||||
.collect();
|
||||
server_names.sort();
|
||||
server_names.dedup();
|
||||
|
||||
let total = server_names.len();
|
||||
let limit = params.limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = limit.min(total);
|
||||
let start = match params.cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total MCP servers {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
|
||||
let data: Vec<McpServerStatus> = server_names[start..end]
|
||||
.iter()
|
||||
.map(|name| McpServerStatus {
|
||||
name: name.clone(),
|
||||
tools: tools_by_server.get(name).cloned().unwrap_or_default(),
|
||||
resources: resources.get(name).cloned().unwrap_or_default(),
|
||||
resource_templates: resource_templates.get(name).cloned().unwrap_or_default(),
|
||||
auth_status: auth_statuses
|
||||
.get(name)
|
||||
.cloned()
|
||||
.unwrap_or(CoreMcpAuthStatus::Unsupported)
|
||||
.into(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ListMcpServerStatusResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn read_mcp_resource(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpResourceReadParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let McpResourceReadParams {
|
||||
thread_id,
|
||||
server,
|
||||
uri,
|
||||
} = params;
|
||||
|
||||
if let Some(thread_id) = thread_id {
|
||||
let (_, thread) = self.load_thread(&thread_id).await?;
|
||||
let request_id = request_id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let result = thread.read_mcp_resource(&server, &uri).await;
|
||||
Self::send_mcp_resource_read_response(outgoing, request_id, result).await;
|
||||
});
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let mcp_config = config
|
||||
.to_mcp_config(self.thread_manager.plugins_manager().as_ref())
|
||||
.await;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let runtime_environment = {
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let environment = environment_manager
|
||||
.default_environment()
|
||||
.unwrap_or_else(|| environment_manager.local_environment());
|
||||
// Resource reads without a thread have no turn cwd. This fallback
|
||||
// is used only by executor-backed stdio MCPs whose config omits `cwd`.
|
||||
McpRuntimeEnvironment::new(environment, config.cwd.to_path_buf())
|
||||
};
|
||||
let request_id = request_id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let result = read_mcp_resource_without_thread(
|
||||
&mcp_config,
|
||||
auth.as_ref(),
|
||||
runtime_environment,
|
||||
&server,
|
||||
&uri,
|
||||
)
|
||||
.await
|
||||
.and_then(|result| serde_json::to_value(result).map_err(anyhow::Error::from));
|
||||
Self::send_mcp_resource_read_response(outgoing, request_id, result).await;
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_mcp_resource_read_response(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
result: anyhow::Result<serde_json::Value>,
|
||||
) {
|
||||
let result = result
|
||||
.map_err(|error| internal_error(format!("{error:#}")))
|
||||
.and_then(|result| {
|
||||
serde_json::from_value::<McpResourceReadResponse>(result).map_err(|error| {
|
||||
internal_error(format!(
|
||||
"failed to deserialize MCP resource read response: {error}"
|
||||
))
|
||||
})
|
||||
});
|
||||
outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn call_mcp_server_tool(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpServerToolCallParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let thread_id = params.thread_id.clone();
|
||||
let (_, thread) = self.load_thread(&thread_id).await?;
|
||||
let meta = with_mcp_tool_call_thread_id_meta(params.meta, &thread_id);
|
||||
let request_id = request_id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let result = thread
|
||||
.call_mcp_tool(¶ms.server, ¶ms.tool, params.arguments, meta)
|
||||
.await
|
||||
.map(McpServerToolCallResponse::from)
|
||||
.map_err(|error| internal_error(format!("{error:#}")));
|
||||
outgoing.send_result(request_id, result).await;
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn with_mcp_tool_call_thread_id_meta(
|
||||
meta: Option<serde_json::Value>,
|
||||
thread_id: &str,
|
||||
) -> Option<serde_json::Value> {
|
||||
match meta {
|
||||
Some(serde_json::Value::Object(mut map)) => {
|
||||
map.insert(
|
||||
MCP_TOOL_THREAD_ID_META_KEY.to_string(),
|
||||
serde_json::Value::String(thread_id.to_string()),
|
||||
);
|
||||
Some(serde_json::Value::Object(map))
|
||||
}
|
||||
None => {
|
||||
let mut map = serde_json::Map::new();
|
||||
map.insert(
|
||||
MCP_TOOL_THREAD_ID_META_KEY.to_string(),
|
||||
serde_json::Value::String(thread_id.to_string()),
|
||||
);
|
||||
Some(serde_json::Value::Object(map))
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
@@ -1,708 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ProcessExitedNotification;
|
||||
use codex_app_server_protocol::ProcessKillParams;
|
||||
use codex_app_server_protocol::ProcessKillResponse;
|
||||
use codex_app_server_protocol::ProcessOutputDeltaNotification;
|
||||
use codex_app_server_protocol::ProcessOutputStream;
|
||||
use codex_app_server_protocol::ProcessResizePtyParams;
|
||||
use codex_app_server_protocol::ProcessResizePtyResponse;
|
||||
use codex_app_server_protocol::ProcessSpawnParams;
|
||||
use codex_app_server_protocol::ProcessSpawnResponse;
|
||||
use codex_app_server_protocol::ProcessTerminalSize;
|
||||
use codex_app_server_protocol::ProcessWriteStdinParams;
|
||||
use codex_app_server_protocol::ProcessWriteStdinResponse;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_core::exec::ExecExpiration;
|
||||
use codex_core::exec::ExecExpirationOutcome;
|
||||
use codex_core::exec::IO_DRAIN_TIMEOUT_MS;
|
||||
use codex_protocol::exec_output::bytes_to_string_smart;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_pty::DEFAULT_OUTPUT_BYTES_CAP;
|
||||
use codex_utils_pty::ProcessHandle;
|
||||
use codex_utils_pty::SpawnedProcess;
|
||||
use codex_utils_pty::TerminalSize;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::watch;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_params;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
|
||||
const EXEC_TIMEOUT_EXIT_CODE: i32 = 124;
|
||||
const OUTPUT_CHUNK_SIZE_HINT: usize = 64 * 1024;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ProcessExecRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
process_exec_manager: ProcessExecManager,
|
||||
}
|
||||
|
||||
impl ProcessExecRequestProcessor {
|
||||
pub(crate) fn new(outgoing: Arc<OutgoingMessageSender>) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
process_exec_manager: ProcessExecManager::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn process_spawn(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ProcessSpawnParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let ProcessSpawnParams {
|
||||
command,
|
||||
process_handle,
|
||||
cwd,
|
||||
tty,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
timeout_ms,
|
||||
env: env_overrides,
|
||||
size,
|
||||
} = params;
|
||||
let method_name = "process/spawn";
|
||||
tracing::debug!("{method_name} command: {command:?}");
|
||||
if command.is_empty() {
|
||||
return Err(invalid_request("command must not be empty"));
|
||||
}
|
||||
if process_handle.is_empty() {
|
||||
return Err(invalid_request("processHandle must not be empty"));
|
||||
}
|
||||
if size.is_some() && !tty {
|
||||
return Err(invalid_params("process/spawn size requires tty: true"));
|
||||
}
|
||||
let mut env = std::env::vars().collect::<HashMap<_, _>>();
|
||||
if let Some(env_overrides) = env_overrides {
|
||||
for (key, value) in env_overrides {
|
||||
match value {
|
||||
Some(value) => {
|
||||
env.insert(key, value);
|
||||
}
|
||||
None => {
|
||||
env.remove(&key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let expiration = match timeout_ms {
|
||||
Some(Some(timeout_ms)) => match u64::try_from(timeout_ms) {
|
||||
Ok(timeout_ms) => timeout_ms.into(),
|
||||
Err(_) => {
|
||||
return Err(invalid_params(format!(
|
||||
"{method_name} timeoutMs must be non-negative, got {timeout_ms}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Some(None) => ExecExpiration::Cancellation(CancellationToken::new()),
|
||||
None => ExecExpiration::DefaultTimeout,
|
||||
};
|
||||
let output_bytes_cap = output_bytes_cap.unwrap_or(Some(DEFAULT_OUTPUT_BYTES_CAP));
|
||||
let size = size.map(terminal_size_from_protocol).transpose()?;
|
||||
|
||||
self.process_exec_manager
|
||||
.start(StartProcessParams {
|
||||
outgoing: self.outgoing.clone(),
|
||||
request_id,
|
||||
process_handle,
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
expiration,
|
||||
tty,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
size,
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn process_write_stdin(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ProcessWriteStdinParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.process_exec_manager
|
||||
.write_stdin(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn process_resize_pty(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ProcessResizePtyParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.process_exec_manager
|
||||
.resize_pty(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn process_kill(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ProcessKillParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.process_exec_manager
|
||||
.kill(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn connection_closed(&self, connection_id: ConnectionId) {
|
||||
self.process_exec_manager
|
||||
.connection_closed(connection_id)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct ProcessExecManager {
|
||||
sessions: Arc<Mutex<HashMap<ConnectionProcessHandle, ProcessSession>>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
struct ConnectionProcessHandle {
|
||||
connection_id: ConnectionId,
|
||||
process_handle: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ProcessSession {
|
||||
control_tx: mpsc::Sender<ProcessControlRequest>,
|
||||
}
|
||||
|
||||
enum ProcessControl {
|
||||
Write { delta: Vec<u8>, close_stdin: bool },
|
||||
Resize { size: TerminalSize },
|
||||
Kill,
|
||||
}
|
||||
|
||||
struct ProcessControlRequest {
|
||||
control: ProcessControl,
|
||||
response_tx: Option<oneshot::Sender<Result<(), JSONRPCErrorError>>>,
|
||||
}
|
||||
|
||||
struct StartProcessParams {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
process_handle: String,
|
||||
command: Vec<String>,
|
||||
cwd: AbsolutePathBuf,
|
||||
env: HashMap<String, String>,
|
||||
expiration: ExecExpiration,
|
||||
tty: bool,
|
||||
stream_stdin: bool,
|
||||
stream_stdout_stderr: bool,
|
||||
output_bytes_cap: Option<usize>,
|
||||
size: Option<TerminalSize>,
|
||||
}
|
||||
|
||||
struct RunProcessParams {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
process_handle: String,
|
||||
spawned: SpawnedProcess,
|
||||
control_rx: mpsc::Receiver<ProcessControlRequest>,
|
||||
stream_stdin: bool,
|
||||
stream_stdout_stderr: bool,
|
||||
expiration: ExecExpiration,
|
||||
output_bytes_cap: Option<usize>,
|
||||
}
|
||||
|
||||
struct SpawnProcessOutputParams {
|
||||
connection_id: ConnectionId,
|
||||
process_handle: String,
|
||||
output_rx: mpsc::Receiver<Vec<u8>>,
|
||||
stdio_timeout_rx: watch::Receiver<bool>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
stream: ProcessOutputStream,
|
||||
stream_output: bool,
|
||||
output_bytes_cap: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ProcessOutputCapture {
|
||||
text: String,
|
||||
cap_reached: bool,
|
||||
}
|
||||
|
||||
impl ProcessExecManager {
|
||||
async fn start(&self, params: StartProcessParams) -> Result<(), JSONRPCErrorError> {
|
||||
let StartProcessParams {
|
||||
outgoing,
|
||||
request_id,
|
||||
process_handle,
|
||||
command,
|
||||
cwd,
|
||||
env,
|
||||
expiration,
|
||||
tty,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
size,
|
||||
} = params;
|
||||
|
||||
let (program, args) = command
|
||||
.split_first()
|
||||
.ok_or_else(|| invalid_request("command must not be empty"))?;
|
||||
let stream_stdin = tty || stream_stdin;
|
||||
let stream_stdout_stderr = tty || stream_stdout_stderr;
|
||||
let arg0 = None;
|
||||
let (control_tx, control_rx) = mpsc::channel(32);
|
||||
let process_key = ConnectionProcessHandle {
|
||||
connection_id: request_id.connection_id,
|
||||
process_handle: process_handle.clone(),
|
||||
};
|
||||
|
||||
{
|
||||
let mut sessions = self.sessions.lock().await;
|
||||
match sessions.entry(process_key.clone()) {
|
||||
Entry::Occupied(_) => {
|
||||
return Err(invalid_request(format!(
|
||||
"duplicate active process handle: {process_handle:?}",
|
||||
)));
|
||||
}
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(ProcessSession { control_tx });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let spawned = if tty {
|
||||
codex_utils_pty::spawn_pty_process(
|
||||
program,
|
||||
args,
|
||||
cwd.as_path(),
|
||||
&env,
|
||||
&arg0,
|
||||
size.unwrap_or_default(),
|
||||
)
|
||||
.await
|
||||
} else if stream_stdin {
|
||||
codex_utils_pty::spawn_pipe_process(program, args, cwd.as_path(), &env, &arg0).await
|
||||
} else {
|
||||
codex_utils_pty::spawn_pipe_process_no_stdin(program, args, cwd.as_path(), &env, &arg0)
|
||||
.await
|
||||
};
|
||||
let spawned = match spawned {
|
||||
Ok(spawned) => spawned,
|
||||
Err(err) => {
|
||||
self.sessions.lock().await.remove(&process_key);
|
||||
return Err(internal_error(format!("failed to spawn process: {err}")));
|
||||
}
|
||||
};
|
||||
|
||||
outgoing
|
||||
.send_response(request_id.clone(), ProcessSpawnResponse {})
|
||||
.await;
|
||||
|
||||
let sessions = Arc::clone(&self.sessions);
|
||||
tokio::spawn(async move {
|
||||
run_process(RunProcessParams {
|
||||
outgoing,
|
||||
request_id,
|
||||
process_handle,
|
||||
spawned,
|
||||
control_rx,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
expiration,
|
||||
output_bytes_cap,
|
||||
})
|
||||
.await;
|
||||
sessions.lock().await.remove(&process_key);
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn write_stdin(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ProcessWriteStdinParams,
|
||||
) -> Result<ProcessWriteStdinResponse, JSONRPCErrorError> {
|
||||
if params.delta_base64.is_none() && !params.close_stdin {
|
||||
return Err(invalid_params(
|
||||
"process/writeStdin requires deltaBase64 or closeStdin",
|
||||
));
|
||||
}
|
||||
|
||||
let delta = match params.delta_base64 {
|
||||
Some(delta_base64) => STANDARD
|
||||
.decode(delta_base64)
|
||||
.map_err(|err| invalid_params(format!("invalid deltaBase64: {err}")))?,
|
||||
None => Vec::new(),
|
||||
};
|
||||
|
||||
self.send_control(
|
||||
request_id.connection_id,
|
||||
params.process_handle,
|
||||
ProcessControl::Write {
|
||||
delta,
|
||||
close_stdin: params.close_stdin,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(ProcessWriteStdinResponse {})
|
||||
}
|
||||
|
||||
async fn kill(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ProcessKillParams,
|
||||
) -> Result<ProcessKillResponse, JSONRPCErrorError> {
|
||||
self.send_control(
|
||||
request_id.connection_id,
|
||||
params.process_handle,
|
||||
ProcessControl::Kill,
|
||||
)
|
||||
.await?;
|
||||
Ok(ProcessKillResponse {})
|
||||
}
|
||||
|
||||
async fn resize_pty(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ProcessResizePtyParams,
|
||||
) -> Result<ProcessResizePtyResponse, JSONRPCErrorError> {
|
||||
self.send_control(
|
||||
request_id.connection_id,
|
||||
params.process_handle,
|
||||
ProcessControl::Resize {
|
||||
size: terminal_size_from_protocol(params.size)?,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(ProcessResizePtyResponse {})
|
||||
}
|
||||
|
||||
async fn connection_closed(&self, connection_id: ConnectionId) {
|
||||
let controls = {
|
||||
let mut sessions = self.sessions.lock().await;
|
||||
let process_handles = sessions
|
||||
.keys()
|
||||
.filter(|process_handle| process_handle.connection_id == connection_id)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
let mut controls = Vec::with_capacity(process_handles.len());
|
||||
for process_handle in process_handles {
|
||||
if let Some(control) = sessions.remove(&process_handle) {
|
||||
controls.push(control);
|
||||
}
|
||||
}
|
||||
controls
|
||||
};
|
||||
|
||||
for control in controls {
|
||||
let _ = control
|
||||
.control_tx
|
||||
.send(ProcessControlRequest {
|
||||
control: ProcessControl::Kill,
|
||||
response_tx: None,
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_control(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
process_handle: String,
|
||||
control: ProcessControl,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let process_key = ConnectionProcessHandle {
|
||||
connection_id,
|
||||
process_handle,
|
||||
};
|
||||
let session = self
|
||||
.sessions
|
||||
.lock()
|
||||
.await
|
||||
.get(&process_key)
|
||||
.cloned()
|
||||
.ok_or_else(|| no_active_process_error(&process_key.process_handle))?;
|
||||
let (response_tx, response_rx) = oneshot::channel();
|
||||
session
|
||||
.control_tx
|
||||
.send(ProcessControlRequest {
|
||||
control,
|
||||
response_tx: Some(response_tx),
|
||||
})
|
||||
.await
|
||||
.map_err(|_| process_no_longer_running_error(&process_key.process_handle))?;
|
||||
response_rx
|
||||
.await
|
||||
.map_err(|_| process_no_longer_running_error(&process_key.process_handle))?
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_process(params: RunProcessParams) {
|
||||
let RunProcessParams {
|
||||
outgoing,
|
||||
request_id,
|
||||
process_handle,
|
||||
spawned,
|
||||
control_rx,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
expiration,
|
||||
output_bytes_cap,
|
||||
} = params;
|
||||
let mut control_rx = control_rx;
|
||||
let mut control_open = true;
|
||||
let expiration = expiration.wait_with_outcome();
|
||||
tokio::pin!(expiration);
|
||||
let SpawnedProcess {
|
||||
session,
|
||||
stdout_rx,
|
||||
stderr_rx,
|
||||
exit_rx,
|
||||
} = spawned;
|
||||
tokio::pin!(exit_rx);
|
||||
let mut expiration_outcome = None;
|
||||
let (stdio_timeout_tx, stdio_timeout_rx) = watch::channel(false);
|
||||
|
||||
let stdout_handle = collect_spawn_process_output(SpawnProcessOutputParams {
|
||||
connection_id: request_id.connection_id,
|
||||
process_handle: process_handle.clone(),
|
||||
output_rx: stdout_rx,
|
||||
stdio_timeout_rx: stdio_timeout_rx.clone(),
|
||||
outgoing: Arc::clone(&outgoing),
|
||||
stream: ProcessOutputStream::Stdout,
|
||||
stream_output: stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
});
|
||||
let stderr_handle = collect_spawn_process_output(SpawnProcessOutputParams {
|
||||
connection_id: request_id.connection_id,
|
||||
process_handle: process_handle.clone(),
|
||||
output_rx: stderr_rx,
|
||||
stdio_timeout_rx,
|
||||
outgoing: Arc::clone(&outgoing),
|
||||
stream: ProcessOutputStream::Stderr,
|
||||
stream_output: stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
});
|
||||
|
||||
let exit_code = loop {
|
||||
tokio::select! {
|
||||
control = control_rx.recv(), if control_open => {
|
||||
match control {
|
||||
Some(ProcessControlRequest { control, response_tx }) => {
|
||||
let result = match control {
|
||||
ProcessControl::Write { delta, close_stdin } => {
|
||||
handle_process_write(
|
||||
&session,
|
||||
stream_stdin,
|
||||
delta,
|
||||
close_stdin,
|
||||
).await
|
||||
}
|
||||
ProcessControl::Resize { size } => {
|
||||
handle_process_resize(&session, size)
|
||||
}
|
||||
ProcessControl::Kill => {
|
||||
session.request_terminate();
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
if let Some(response_tx) = response_tx
|
||||
&& response_tx.send(result).is_err()
|
||||
{
|
||||
tracing::debug!(
|
||||
process_handle = %process_handle,
|
||||
"process control response receiver dropped"
|
||||
);
|
||||
}
|
||||
},
|
||||
None => {
|
||||
control_open = false;
|
||||
session.request_terminate();
|
||||
}
|
||||
}
|
||||
}
|
||||
outcome = &mut expiration, if expiration_outcome.is_none() => {
|
||||
expiration_outcome = Some(outcome);
|
||||
session.request_terminate();
|
||||
}
|
||||
exit = &mut exit_rx => {
|
||||
if matches!(expiration_outcome, Some(ExecExpirationOutcome::TimedOut)) {
|
||||
break EXEC_TIMEOUT_EXIT_CODE;
|
||||
} else {
|
||||
break exit.unwrap_or(-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Give stdout/stderr readers a bounded grace period to drain after process exit.
|
||||
let timeout_handle = tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_millis(IO_DRAIN_TIMEOUT_MS)).await;
|
||||
let _ = stdio_timeout_tx.send(true);
|
||||
});
|
||||
|
||||
let stdout = stdout_handle.await.unwrap_or_default();
|
||||
let stderr = stderr_handle.await.unwrap_or_default();
|
||||
timeout_handle.abort();
|
||||
|
||||
outgoing
|
||||
.send_server_notification_to_connection_and_wait(
|
||||
request_id.connection_id,
|
||||
ServerNotification::ProcessExited(ProcessExitedNotification {
|
||||
process_handle,
|
||||
exit_code,
|
||||
stdout: stdout.text,
|
||||
stdout_cap_reached: stdout.cap_reached,
|
||||
stderr: stderr.text,
|
||||
stderr_cap_reached: stderr.cap_reached,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
fn collect_spawn_process_output(
|
||||
params: SpawnProcessOutputParams,
|
||||
) -> tokio::task::JoinHandle<ProcessOutputCapture> {
|
||||
let SpawnProcessOutputParams {
|
||||
connection_id,
|
||||
process_handle,
|
||||
mut output_rx,
|
||||
mut stdio_timeout_rx,
|
||||
outgoing,
|
||||
stream,
|
||||
stream_output,
|
||||
output_bytes_cap,
|
||||
} = params;
|
||||
tokio::spawn(async move {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let mut observed_num_bytes = 0usize;
|
||||
let mut cap_reached = false;
|
||||
loop {
|
||||
let mut chunk = tokio::select! {
|
||||
chunk = output_rx.recv() => match chunk {
|
||||
Some(chunk) => chunk,
|
||||
None => break,
|
||||
},
|
||||
_ = stdio_timeout_rx.wait_for(|&v| v) => break,
|
||||
};
|
||||
while chunk.len() < OUTPUT_CHUNK_SIZE_HINT
|
||||
&& let Ok(next_chunk) = output_rx.try_recv()
|
||||
{
|
||||
chunk.extend_from_slice(&next_chunk);
|
||||
}
|
||||
let capped_chunk = match output_bytes_cap {
|
||||
Some(output_bytes_cap) => {
|
||||
let capped_chunk_len = output_bytes_cap
|
||||
.saturating_sub(observed_num_bytes)
|
||||
.min(chunk.len());
|
||||
observed_num_bytes += capped_chunk_len;
|
||||
&chunk[0..capped_chunk_len]
|
||||
}
|
||||
None => chunk.as_slice(),
|
||||
};
|
||||
cap_reached = Some(observed_num_bytes) == output_bytes_cap;
|
||||
if stream_output {
|
||||
outgoing
|
||||
.send_server_notification_to_connection_and_wait(
|
||||
connection_id,
|
||||
ServerNotification::ProcessOutputDelta(ProcessOutputDeltaNotification {
|
||||
process_handle: process_handle.clone(),
|
||||
stream,
|
||||
delta_base64: STANDARD.encode(capped_chunk),
|
||||
cap_reached,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
buffer.extend_from_slice(capped_chunk);
|
||||
}
|
||||
if cap_reached {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ProcessOutputCapture {
|
||||
text: bytes_to_string_smart(&buffer),
|
||||
cap_reached,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_process_write(
|
||||
session: &ProcessHandle,
|
||||
stream_stdin: bool,
|
||||
delta: Vec<u8>,
|
||||
close_stdin: bool,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
if !stream_stdin {
|
||||
return Err(invalid_request(
|
||||
"stdin streaming is not enabled for this process",
|
||||
));
|
||||
}
|
||||
if !delta.is_empty() {
|
||||
session
|
||||
.writer_sender()
|
||||
.send(delta)
|
||||
.await
|
||||
.map_err(|_| invalid_request("stdin is already closed"))?;
|
||||
}
|
||||
if close_stdin {
|
||||
// Closing drops our sender; the writer task still drains any bytes
|
||||
// accepted above before its receiver observes EOF and closes stdin.
|
||||
session.close_stdin();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_process_resize(
|
||||
session: &ProcessHandle,
|
||||
size: TerminalSize,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
session
|
||||
.resize(size)
|
||||
.map_err(|err| invalid_request(format!("failed to resize PTY: {err}")))
|
||||
}
|
||||
|
||||
fn terminal_size_from_protocol(
|
||||
size: ProcessTerminalSize,
|
||||
) -> Result<TerminalSize, JSONRPCErrorError> {
|
||||
if size.rows == 0 || size.cols == 0 {
|
||||
return Err(invalid_params(
|
||||
"process size rows and cols must be greater than 0",
|
||||
));
|
||||
}
|
||||
Ok(TerminalSize {
|
||||
rows: size.rows,
|
||||
cols: size.cols,
|
||||
})
|
||||
}
|
||||
|
||||
fn no_active_process_error(process_handle: &str) -> JSONRPCErrorError {
|
||||
invalid_request(format!(
|
||||
"no active process for process handle {process_handle:?}"
|
||||
))
|
||||
}
|
||||
|
||||
fn process_no_longer_running_error(process_handle: &str) -> JSONRPCErrorError {
|
||||
invalid_request(format!("process {process_handle:?} is no longer running"))
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
pub(super) fn environment_selection_error_message(err: CodexErr) -> String {
|
||||
match err {
|
||||
CodexErr::InvalidRequest(message) => message,
|
||||
err => err.to_string(),
|
||||
}
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::fuzzy_file_search::FuzzyFileSearchSession;
|
||||
use crate::fuzzy_file_search::run_fuzzy_file_search;
|
||||
use crate::fuzzy_file_search::start_fuzzy_file_search_session;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_app_server_protocol::FuzzyFileSearchParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchResponse;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStartParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStartResponse;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStopParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStopResponse;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionUpdateParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionUpdateResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SearchRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
|
||||
fuzzy_search_sessions: Arc<Mutex<HashMap<String, FuzzyFileSearchSession>>>,
|
||||
}
|
||||
|
||||
impl SearchRequestProcessor {
|
||||
pub(crate) fn new(outgoing: Arc<OutgoingMessageSender>) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
pending_fuzzy_searches: Arc::new(Mutex::new(HashMap::new())),
|
||||
fuzzy_search_sessions: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search(
|
||||
&self,
|
||||
params: FuzzyFileSearchParams,
|
||||
) -> Result<FuzzyFileSearchResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchParams {
|
||||
query,
|
||||
roots,
|
||||
cancellation_token,
|
||||
} = params;
|
||||
|
||||
let cancel_flag = match cancellation_token.clone() {
|
||||
Some(token) => {
|
||||
let mut pending_fuzzy_searches = self.pending_fuzzy_searches.lock().await;
|
||||
// if a cancellation_token is provided and a pending_request exists for
|
||||
// that token, cancel it
|
||||
if let Some(existing) = pending_fuzzy_searches.get(&token) {
|
||||
existing.store(true, Ordering::Relaxed);
|
||||
}
|
||||
let flag = Arc::new(AtomicBool::new(false));
|
||||
pending_fuzzy_searches.insert(token.clone(), flag.clone());
|
||||
flag
|
||||
}
|
||||
None => Arc::new(AtomicBool::new(false)),
|
||||
};
|
||||
|
||||
let results = match query.as_str() {
|
||||
"" => vec![],
|
||||
_ => run_fuzzy_file_search(query, roots, cancel_flag.clone()).await,
|
||||
};
|
||||
|
||||
if let Some(token) = cancellation_token {
|
||||
let mut pending_fuzzy_searches = self.pending_fuzzy_searches.lock().await;
|
||||
if let Some(current_flag) = pending_fuzzy_searches.get(&token)
|
||||
&& Arc::ptr_eq(current_flag, &cancel_flag)
|
||||
{
|
||||
pending_fuzzy_searches.remove(&token);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(FuzzyFileSearchResponse { files: results })
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search_session_start_response(
|
||||
&self,
|
||||
params: FuzzyFileSearchSessionStartParams,
|
||||
) -> Result<FuzzyFileSearchSessionStartResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchSessionStartParams { session_id, roots } = params;
|
||||
if session_id.is_empty() {
|
||||
return Err(invalid_request("sessionId must not be empty"));
|
||||
}
|
||||
|
||||
let session =
|
||||
start_fuzzy_file_search_session(session_id.clone(), roots, self.outgoing.clone())
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to start fuzzy file search session: {err}"))
|
||||
})?;
|
||||
self.fuzzy_search_sessions
|
||||
.lock()
|
||||
.await
|
||||
.insert(session_id, session);
|
||||
Ok(FuzzyFileSearchSessionStartResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search_session_update_response(
|
||||
&self,
|
||||
params: FuzzyFileSearchSessionUpdateParams,
|
||||
) -> Result<FuzzyFileSearchSessionUpdateResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchSessionUpdateParams { session_id, query } = params;
|
||||
let found = {
|
||||
let sessions = self.fuzzy_search_sessions.lock().await;
|
||||
if let Some(session) = sessions.get(&session_id) {
|
||||
session.update_query(query);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
if !found {
|
||||
return Err(invalid_request(format!(
|
||||
"fuzzy file search session not found: {session_id}"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(FuzzyFileSearchSessionUpdateResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search_session_stop(
|
||||
&self,
|
||||
params: FuzzyFileSearchSessionStopParams,
|
||||
) -> Result<FuzzyFileSearchSessionStopResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchSessionStopParams { session_id } = params;
|
||||
self.fuzzy_search_sessions.lock().await.remove(&session_id);
|
||||
|
||||
Ok(FuzzyFileSearchSessionStopResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
fn invalid_request(message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn internal_error(message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
@@ -1,760 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
pub(super) const THREAD_UNLOADING_DELAY: Duration = Duration::from_secs(30 * 60);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct ListenerTaskContext {
|
||||
pub(super) thread_manager: Arc<ThreadManager>,
|
||||
pub(super) thread_state_manager: ThreadStateManager,
|
||||
pub(super) outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(super) pending_thread_unloads: Arc<Mutex<HashSet<ThreadId>>>,
|
||||
pub(super) analytics_events_client: AnalyticsEventsClient,
|
||||
pub(super) thread_watch_manager: ThreadWatchManager,
|
||||
pub(super) thread_list_state_permit: Arc<Semaphore>,
|
||||
pub(super) fallback_model_provider: String,
|
||||
pub(super) codex_home: PathBuf,
|
||||
}
|
||||
|
||||
struct UnloadingState {
|
||||
delay: Duration,
|
||||
has_subscribers_rx: watch::Receiver<bool>,
|
||||
has_subscribers: (bool, Instant),
|
||||
thread_status_rx: watch::Receiver<ThreadStatus>,
|
||||
is_active: (bool, Instant),
|
||||
}
|
||||
|
||||
impl UnloadingState {
|
||||
async fn new(
|
||||
listener_task_context: &ListenerTaskContext,
|
||||
thread_id: ThreadId,
|
||||
delay: Duration,
|
||||
) -> Option<Self> {
|
||||
let has_subscribers_rx = listener_task_context
|
||||
.thread_state_manager
|
||||
.subscribe_to_has_connections(thread_id)
|
||||
.await?;
|
||||
let thread_status_rx = listener_task_context
|
||||
.thread_watch_manager
|
||||
.subscribe(thread_id)
|
||||
.await?;
|
||||
let has_subscribers = (*has_subscribers_rx.borrow(), Instant::now());
|
||||
let is_active = (
|
||||
matches!(*thread_status_rx.borrow(), ThreadStatus::Active { .. }),
|
||||
Instant::now(),
|
||||
);
|
||||
Some(Self {
|
||||
delay,
|
||||
has_subscribers_rx,
|
||||
has_subscribers,
|
||||
thread_status_rx,
|
||||
is_active,
|
||||
})
|
||||
}
|
||||
|
||||
fn unloading_target(&self) -> Option<Instant> {
|
||||
match (self.has_subscribers, self.is_active) {
|
||||
((false, has_no_subscribers_since), (false, is_inactive_since)) => {
|
||||
Some(std::cmp::max(has_no_subscribers_since, is_inactive_since) + self.delay)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn sync_receiver_values(&mut self) {
|
||||
let has_subscribers = *self.has_subscribers_rx.borrow();
|
||||
if self.has_subscribers.0 != has_subscribers {
|
||||
self.has_subscribers = (has_subscribers, Instant::now());
|
||||
}
|
||||
|
||||
let is_active = matches!(*self.thread_status_rx.borrow(), ThreadStatus::Active { .. });
|
||||
if self.is_active.0 != is_active {
|
||||
self.is_active = (is_active, Instant::now());
|
||||
}
|
||||
}
|
||||
|
||||
fn should_unload_now(&mut self) -> bool {
|
||||
self.sync_receiver_values();
|
||||
self.unloading_target()
|
||||
.is_some_and(|target| target <= Instant::now())
|
||||
}
|
||||
|
||||
fn note_thread_activity_observed(&mut self) {
|
||||
if !self.is_active.0 {
|
||||
self.is_active = (false, Instant::now());
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_for_unloading_trigger(&mut self) -> bool {
|
||||
loop {
|
||||
self.sync_receiver_values();
|
||||
let unloading_target = self.unloading_target();
|
||||
if let Some(target) = unloading_target
|
||||
&& target <= Instant::now()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
let unloading_sleep = async {
|
||||
if let Some(target) = unloading_target {
|
||||
tokio::time::sleep_until(target.into()).await;
|
||||
} else {
|
||||
futures::future::pending::<()>().await;
|
||||
}
|
||||
};
|
||||
tokio::select! {
|
||||
_ = unloading_sleep => return true,
|
||||
changed = self.has_subscribers_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
return false;
|
||||
}
|
||||
self.sync_receiver_values();
|
||||
},
|
||||
changed = self.thread_status_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
return false;
|
||||
}
|
||||
self.sync_receiver_values();
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) enum ThreadShutdownResult {
|
||||
Complete,
|
||||
SubmitFailed,
|
||||
TimedOut,
|
||||
}
|
||||
|
||||
pub(super) enum EnsureConversationListenerResult {
|
||||
Attached,
|
||||
ConnectionClosed,
|
||||
}
|
||||
|
||||
#[expect(
|
||||
clippy::await_holding_invalid_type,
|
||||
reason = "listener subscription must be serialized against pending unloads"
|
||||
)]
|
||||
pub(super) async fn ensure_conversation_listener(
|
||||
listener_task_context: ListenerTaskContext,
|
||||
conversation_id: ThreadId,
|
||||
connection_id: ConnectionId,
|
||||
raw_events_enabled: bool,
|
||||
) -> Result<EnsureConversationListenerResult, JSONRPCErrorError> {
|
||||
let conversation = match listener_task_context
|
||||
.thread_manager
|
||||
.get_thread(conversation_id)
|
||||
.await
|
||||
{
|
||||
Ok(conv) => conv,
|
||||
Err(_) => {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("thread not found: {conversation_id}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
};
|
||||
let thread_state = {
|
||||
let pending_thread_unloads = listener_task_context.pending_thread_unloads.lock().await;
|
||||
if pending_thread_unloads.contains(&conversation_id) {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!(
|
||||
"thread {conversation_id} is closing; retry after the thread is closed"
|
||||
),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
let Some(thread_state) = listener_task_context
|
||||
.thread_state_manager
|
||||
.try_ensure_connection_subscribed(conversation_id, connection_id, raw_events_enabled)
|
||||
.await
|
||||
else {
|
||||
return Ok(EnsureConversationListenerResult::ConnectionClosed);
|
||||
};
|
||||
thread_state
|
||||
};
|
||||
if let Err(error) = ensure_listener_task_running(
|
||||
listener_task_context.clone(),
|
||||
conversation_id,
|
||||
conversation,
|
||||
thread_state,
|
||||
)
|
||||
.await
|
||||
{
|
||||
let _ = listener_task_context
|
||||
.thread_state_manager
|
||||
.unsubscribe_connection_from_thread(conversation_id, connection_id)
|
||||
.await;
|
||||
return Err(error);
|
||||
}
|
||||
Ok(EnsureConversationListenerResult::Attached)
|
||||
}
|
||||
|
||||
pub(super) fn log_listener_attach_result(
|
||||
result: Result<EnsureConversationListenerResult, JSONRPCErrorError>,
|
||||
thread_id: ThreadId,
|
||||
connection_id: ConnectionId,
|
||||
thread_kind: &'static str,
|
||||
) {
|
||||
match result {
|
||||
Ok(EnsureConversationListenerResult::Attached) => {}
|
||||
Ok(EnsureConversationListenerResult::ConnectionClosed) => {
|
||||
tracing::debug!(
|
||||
thread_id = %thread_id,
|
||||
connection_id = ?connection_id,
|
||||
"skipping auto-attach for closed connection"
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
"failed to attach listener for {thread_kind} {thread_id}: {message}",
|
||||
message = err.message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn ensure_listener_task_running(
|
||||
listener_task_context: ListenerTaskContext,
|
||||
conversation_id: ThreadId,
|
||||
conversation: Arc<CodexThread>,
|
||||
thread_state: Arc<Mutex<ThreadState>>,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let (cancel_tx, mut cancel_rx) = oneshot::channel();
|
||||
let Some(mut unloading_state) = UnloadingState::new(
|
||||
&listener_task_context,
|
||||
conversation_id,
|
||||
THREAD_UNLOADING_DELAY,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!(
|
||||
"thread {conversation_id} is closing; retry after the thread is closed"
|
||||
),
|
||||
data: None,
|
||||
});
|
||||
};
|
||||
let (mut listener_command_rx, listener_generation) = {
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
if thread_state.listener_matches(&conversation) {
|
||||
return Ok(());
|
||||
}
|
||||
thread_state.set_listener(cancel_tx, &conversation)
|
||||
};
|
||||
let ListenerTaskContext {
|
||||
outgoing,
|
||||
thread_manager,
|
||||
thread_state_manager,
|
||||
pending_thread_unloads,
|
||||
analytics_events_client,
|
||||
thread_watch_manager,
|
||||
thread_list_state_permit,
|
||||
fallback_model_provider,
|
||||
codex_home,
|
||||
} = listener_task_context;
|
||||
let outgoing_for_task = Arc::clone(&outgoing);
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
_ = &mut cancel_rx => {
|
||||
// Listener was superseded or the thread is being torn down.
|
||||
break;
|
||||
}
|
||||
listener_command = listener_command_rx.recv() => {
|
||||
let Some(listener_command) = listener_command else {
|
||||
break;
|
||||
};
|
||||
handle_thread_listener_command(
|
||||
conversation_id,
|
||||
&conversation,
|
||||
codex_home.as_path(),
|
||||
&thread_state_manager,
|
||||
&thread_state,
|
||||
&thread_watch_manager,
|
||||
&outgoing_for_task,
|
||||
&pending_thread_unloads,
|
||||
listener_command,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
event = conversation.next_event() => {
|
||||
let event = match event {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
tracing::warn!("thread.next_event() failed with: {err}");
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// Track the event before emitting any typed translations
|
||||
// so thread-local state such as raw event opt-in stays
|
||||
// synchronized with the conversation.
|
||||
let raw_events_enabled = {
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
thread_state.track_current_turn_event(&event.id, &event.msg);
|
||||
thread_state.experimental_raw_events
|
||||
};
|
||||
let subscribed_connection_ids = thread_state_manager
|
||||
.subscribed_connection_ids(conversation_id)
|
||||
.await;
|
||||
let thread_outgoing = ThreadScopedOutgoingMessageSender::new(
|
||||
outgoing_for_task.clone(),
|
||||
subscribed_connection_ids,
|
||||
conversation_id,
|
||||
);
|
||||
|
||||
if let EventMsg::RawResponseItem(raw_response_item_event) = &event.msg
|
||||
&& !raw_events_enabled
|
||||
{
|
||||
maybe_emit_hook_prompt_item_completed(
|
||||
conversation_id,
|
||||
&event.id,
|
||||
&raw_response_item_event.item,
|
||||
&thread_outgoing,
|
||||
)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
|
||||
apply_bespoke_event_handling(
|
||||
event.clone(),
|
||||
conversation_id,
|
||||
conversation.clone(),
|
||||
thread_manager.clone(),
|
||||
Some(analytics_events_client.clone()),
|
||||
thread_outgoing,
|
||||
thread_state.clone(),
|
||||
thread_watch_manager.clone(),
|
||||
thread_list_state_permit.clone(),
|
||||
fallback_model_provider.clone(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
unloading_watchers_open = unloading_state.wait_for_unloading_trigger() => {
|
||||
if !unloading_watchers_open {
|
||||
break;
|
||||
}
|
||||
if !unloading_state.should_unload_now() {
|
||||
continue;
|
||||
}
|
||||
if matches!(conversation.agent_status().await, AgentStatus::Running) {
|
||||
unloading_state.note_thread_activity_observed();
|
||||
continue;
|
||||
}
|
||||
{
|
||||
let mut pending_thread_unloads = pending_thread_unloads.lock().await;
|
||||
if pending_thread_unloads.contains(&conversation_id) {
|
||||
continue;
|
||||
}
|
||||
if !unloading_state.should_unload_now() {
|
||||
continue;
|
||||
}
|
||||
pending_thread_unloads.insert(conversation_id);
|
||||
}
|
||||
unload_thread_without_subscribers(
|
||||
thread_manager.clone(),
|
||||
outgoing_for_task.clone(),
|
||||
pending_thread_unloads.clone(),
|
||||
thread_state_manager.clone(),
|
||||
thread_watch_manager.clone(),
|
||||
conversation_id,
|
||||
conversation.clone(),
|
||||
)
|
||||
.await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
if thread_state.listener_generation == listener_generation {
|
||||
thread_state.clear_listener();
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn wait_for_thread_shutdown(thread: &Arc<CodexThread>) -> ThreadShutdownResult {
|
||||
match tokio::time::timeout(Duration::from_secs(10), thread.shutdown_and_wait()).await {
|
||||
Ok(Ok(())) => ThreadShutdownResult::Complete,
|
||||
Ok(Err(_)) => ThreadShutdownResult::SubmitFailed,
|
||||
Err(_) => ThreadShutdownResult::TimedOut,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn unload_thread_without_subscribers(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pending_thread_unloads: Arc<Mutex<HashSet<ThreadId>>>,
|
||||
thread_state_manager: ThreadStateManager,
|
||||
thread_watch_manager: ThreadWatchManager,
|
||||
thread_id: ThreadId,
|
||||
thread: Arc<CodexThread>,
|
||||
) {
|
||||
info!("thread {thread_id} has no subscribers and is idle; shutting down");
|
||||
|
||||
// Any pending app-server -> client requests for this thread can no longer be
|
||||
// answered; cancel their callbacks before shutdown/unload.
|
||||
outgoing
|
||||
.cancel_requests_for_thread(thread_id, /*error*/ None)
|
||||
.await;
|
||||
thread_state_manager.remove_thread_state(thread_id).await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
match wait_for_thread_shutdown(&thread).await {
|
||||
ThreadShutdownResult::Complete => {
|
||||
if thread_manager.remove_thread(&thread_id).await.is_none() {
|
||||
info!("thread {thread_id} was already removed before teardown finalized");
|
||||
thread_watch_manager
|
||||
.remove_thread(&thread_id.to_string())
|
||||
.await;
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
return;
|
||||
}
|
||||
thread_watch_manager
|
||||
.remove_thread(&thread_id.to_string())
|
||||
.await;
|
||||
let notification = ThreadClosedNotification {
|
||||
thread_id: thread_id.to_string(),
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadClosed(notification))
|
||||
.await;
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
}
|
||||
ThreadShutdownResult::SubmitFailed => {
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
warn!("failed to submit Shutdown to thread {thread_id}");
|
||||
}
|
||||
ThreadShutdownResult::TimedOut => {
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
warn!("thread {thread_id} shutdown timed out; leaving thread loaded");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) async fn handle_thread_listener_command(
|
||||
conversation_id: ThreadId,
|
||||
conversation: &Arc<CodexThread>,
|
||||
codex_home: &Path,
|
||||
thread_state_manager: &ThreadStateManager,
|
||||
thread_state: &Arc<Mutex<ThreadState>>,
|
||||
thread_watch_manager: &ThreadWatchManager,
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
pending_thread_unloads: &Arc<Mutex<HashSet<ThreadId>>>,
|
||||
listener_command: ThreadListenerCommand,
|
||||
) {
|
||||
match listener_command {
|
||||
ThreadListenerCommand::SendThreadResumeResponse(resume_request) => {
|
||||
handle_pending_thread_resume_request(
|
||||
conversation_id,
|
||||
conversation,
|
||||
codex_home,
|
||||
thread_state_manager,
|
||||
thread_state,
|
||||
thread_watch_manager,
|
||||
outgoing,
|
||||
pending_thread_unloads,
|
||||
*resume_request,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
ThreadListenerCommand::EmitThreadGoalUpdated { goal } => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalUpdated(
|
||||
ThreadGoalUpdatedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: None,
|
||||
goal,
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
ThreadListenerCommand::EmitThreadGoalCleared => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalCleared(
|
||||
ThreadGoalClearedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
ThreadListenerCommand::EmitThreadGoalSnapshot { state_db } => {
|
||||
send_thread_goal_snapshot_notification(outgoing, conversation_id, &state_db).await;
|
||||
}
|
||||
ThreadListenerCommand::ResolveServerRequest {
|
||||
request_id,
|
||||
completion_tx,
|
||||
} => {
|
||||
resolve_pending_server_request(
|
||||
conversation_id,
|
||||
thread_state_manager,
|
||||
outgoing,
|
||||
request_id,
|
||||
)
|
||||
.await;
|
||||
let _ = completion_tx.send(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[expect(
|
||||
clippy::await_holding_invalid_type,
|
||||
reason = "running-thread resume subscription must be serialized against pending unloads"
|
||||
)]
|
||||
pub(super) async fn handle_pending_thread_resume_request(
|
||||
conversation_id: ThreadId,
|
||||
conversation: &Arc<CodexThread>,
|
||||
_codex_home: &Path,
|
||||
thread_state_manager: &ThreadStateManager,
|
||||
thread_state: &Arc<Mutex<ThreadState>>,
|
||||
thread_watch_manager: &ThreadWatchManager,
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
pending_thread_unloads: &Arc<Mutex<HashSet<ThreadId>>>,
|
||||
pending: crate::thread_state::PendingThreadResumeRequest,
|
||||
) {
|
||||
let active_turn = {
|
||||
let state = thread_state.lock().await;
|
||||
state.active_turn_snapshot()
|
||||
};
|
||||
tracing::debug!(
|
||||
thread_id = %conversation_id,
|
||||
request_id = ?pending.request_id,
|
||||
active_turn_present = active_turn.is_some(),
|
||||
active_turn_id = ?active_turn.as_ref().map(|turn| turn.id.as_str()),
|
||||
active_turn_status = ?active_turn.as_ref().map(|turn| &turn.status),
|
||||
"composing running thread resume response"
|
||||
);
|
||||
let has_live_in_progress_turn =
|
||||
matches!(conversation.agent_status().await, AgentStatus::Running)
|
||||
|| active_turn
|
||||
.as_ref()
|
||||
.is_some_and(|turn| matches!(turn.status, TurnStatus::InProgress));
|
||||
|
||||
let request_id = pending.request_id;
|
||||
let connection_id = request_id.connection_id;
|
||||
let mut thread = pending.thread_summary;
|
||||
if pending.include_turns {
|
||||
populate_thread_turns_from_history(
|
||||
&mut thread,
|
||||
&pending.history_items,
|
||||
active_turn.as_ref(),
|
||||
);
|
||||
}
|
||||
|
||||
let thread_status = thread_watch_manager
|
||||
.loaded_status_for_thread(&thread.id)
|
||||
.await;
|
||||
|
||||
set_thread_status_and_interrupt_stale_turns(
|
||||
&mut thread,
|
||||
thread_status,
|
||||
has_live_in_progress_turn,
|
||||
);
|
||||
|
||||
{
|
||||
let pending_thread_unloads = pending_thread_unloads.lock().await;
|
||||
if pending_thread_unloads.contains(&conversation_id) {
|
||||
drop(pending_thread_unloads);
|
||||
outgoing
|
||||
.send_error(
|
||||
request_id,
|
||||
invalid_request(format!(
|
||||
"thread {conversation_id} is closing; retry thread/resume after the thread is closed"
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
if !thread_state_manager
|
||||
.try_add_connection_to_thread(conversation_id, connection_id)
|
||||
.await
|
||||
{
|
||||
tracing::debug!(
|
||||
thread_id = %conversation_id,
|
||||
connection_id = ?connection_id,
|
||||
"skipping running thread resume for closed connection"
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if pending.emit_thread_goal_update
|
||||
&& let Err(err) = conversation.apply_goal_resume_runtime_effects().await
|
||||
{
|
||||
tracing::warn!("failed to apply goal resume runtime effects: {err}");
|
||||
}
|
||||
|
||||
let ThreadConfigSnapshot {
|
||||
model,
|
||||
model_provider_id,
|
||||
service_tier,
|
||||
approval_policy,
|
||||
approvals_reviewer,
|
||||
permission_profile,
|
||||
active_permission_profile,
|
||||
cwd,
|
||||
reasoning_effort,
|
||||
..
|
||||
} = pending.config_snapshot;
|
||||
let instruction_sources = pending.instruction_sources;
|
||||
let sandbox = thread_response_sandbox_policy(&permission_profile, cwd.as_path());
|
||||
let active_permission_profile =
|
||||
thread_response_active_permission_profile(active_permission_profile);
|
||||
|
||||
let response = ThreadResumeResponse {
|
||||
thread,
|
||||
model,
|
||||
model_provider: model_provider_id,
|
||||
service_tier,
|
||||
cwd,
|
||||
instruction_sources,
|
||||
approval_policy: approval_policy.into(),
|
||||
approvals_reviewer: approvals_reviewer.into(),
|
||||
sandbox,
|
||||
permission_profile: Some(permission_profile.into()),
|
||||
active_permission_profile,
|
||||
reasoning_effort,
|
||||
};
|
||||
let token_usage_thread = pending.include_turns.then(|| response.thread.clone());
|
||||
outgoing.send_response(request_id, response).await;
|
||||
// Match cold resume: metadata-only resume should attach the listener without
|
||||
// paying the cost of turn reconstruction for historical usage replay.
|
||||
if let Some(token_usage_thread) = token_usage_thread {
|
||||
let token_usage_turn_id = latest_token_usage_turn_id_from_rollout_items(
|
||||
&pending.history_items,
|
||||
token_usage_thread.turns.as_slice(),
|
||||
);
|
||||
// Rejoining a loaded thread has the same UI contract as a cold resume, but
|
||||
// uses the live conversation state instead of reconstructing a new session.
|
||||
send_thread_token_usage_update_to_connection(
|
||||
outgoing,
|
||||
connection_id,
|
||||
conversation_id,
|
||||
&token_usage_thread,
|
||||
conversation.as_ref(),
|
||||
token_usage_turn_id,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
if pending.emit_thread_goal_update {
|
||||
if let Some(state_db) = pending.thread_goal_state_db {
|
||||
send_thread_goal_snapshot_notification(outgoing, conversation_id, &state_db).await;
|
||||
} else {
|
||||
tracing::warn!(
|
||||
thread_id = %conversation_id,
|
||||
"state db unavailable when reading thread goal for running thread resume"
|
||||
);
|
||||
}
|
||||
}
|
||||
outgoing
|
||||
.replay_requests_to_connection_for_thread(connection_id, conversation_id)
|
||||
.await;
|
||||
// App-server owns resume response and snapshot ordering, so wait until
|
||||
// replay completes before letting core start goal continuation.
|
||||
if pending.emit_thread_goal_update
|
||||
&& let Err(err) = conversation.continue_active_goal_if_idle().await
|
||||
{
|
||||
tracing::warn!("failed to continue active goal after running-thread resume: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn send_thread_goal_snapshot_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
thread_id: ThreadId,
|
||||
state_db: &StateDbHandle,
|
||||
) {
|
||||
match state_db.get_thread_goal(thread_id).await {
|
||||
Ok(Some(goal)) => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalUpdated(
|
||||
ThreadGoalUpdatedNotification {
|
||||
thread_id: thread_id.to_string(),
|
||||
turn_id: None,
|
||||
goal: api_thread_goal_from_state(goal),
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
Ok(None) => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalCleared(
|
||||
ThreadGoalClearedNotification {
|
||||
thread_id: thread_id.to_string(),
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
thread_id = %thread_id,
|
||||
"failed to read thread goal for resume snapshot: {err}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn populate_thread_turns_from_history(
|
||||
thread: &mut Thread,
|
||||
items: &[RolloutItem],
|
||||
active_turn: Option<&Turn>,
|
||||
) {
|
||||
let mut turns = build_api_turns_from_rollout_items(items);
|
||||
if let Some(active_turn) = active_turn {
|
||||
merge_turn_history_with_active_turn(&mut turns, active_turn.clone());
|
||||
}
|
||||
thread.turns = turns;
|
||||
}
|
||||
|
||||
pub(super) async fn resolve_pending_server_request(
|
||||
conversation_id: ThreadId,
|
||||
thread_state_manager: &ThreadStateManager,
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
request_id: RequestId,
|
||||
) {
|
||||
let thread_id = conversation_id.to_string();
|
||||
let subscribed_connection_ids = thread_state_manager
|
||||
.subscribed_connection_ids(conversation_id)
|
||||
.await;
|
||||
let outgoing = ThreadScopedOutgoingMessageSender::new(
|
||||
outgoing.clone(),
|
||||
subscribed_connection_ids,
|
||||
conversation_id,
|
||||
);
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ServerRequestResolved(
|
||||
ServerRequestResolvedNotification {
|
||||
thread_id,
|
||||
request_id,
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(super) fn merge_turn_history_with_active_turn(turns: &mut Vec<Turn>, active_turn: Turn) {
|
||||
turns.retain(|turn| turn.id != active_turn.id);
|
||||
turns.push(active_turn);
|
||||
}
|
||||
|
||||
pub(super) fn set_thread_status_and_interrupt_stale_turns(
|
||||
thread: &mut Thread,
|
||||
loaded_status: ThreadStatus,
|
||||
has_live_in_progress_turn: bool,
|
||||
) {
|
||||
let status = resolve_thread_status(loaded_status, has_live_in_progress_turn);
|
||||
if !matches!(status, ThreadStatus::Active { .. }) {
|
||||
for turn in &mut thread.turns {
|
||||
if matches!(turn.status, TurnStatus::InProgress) {
|
||||
turn.status = TurnStatus::Interrupted;
|
||||
}
|
||||
}
|
||||
}
|
||||
thread.status = status;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user