mirror of
https://github.com/openai/codex.git
synced 2026-05-08 21:32:33 +00:00
Compare commits
1 Commits
fix/window
...
codex/linu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e3834378f |
19
.bazelrc
19
.bazelrc
@@ -153,25 +153,6 @@ common:ci-macos --config=remote
|
||||
common:ci-macos --strategy=remote
|
||||
common:ci-macos --strategy=TestRunner=darwin-sandbox,local
|
||||
|
||||
# On Windows, use Linux remote execution for build actions but keep test actions
|
||||
# on the Windows runner so Bazel's normal test sharding and flaky-test retries
|
||||
# still run against Windows binaries.
|
||||
common:ci-windows-cross --config=ci-windows
|
||||
common:ci-windows-cross --build_metadata=TAG_windows_cross_compile=true
|
||||
common:ci-windows-cross --config=remote
|
||||
common:ci-windows-cross --host_platform=//:rbe
|
||||
common:ci-windows-cross --strategy=remote
|
||||
common:ci-windows-cross --strategy=TestRunner=local
|
||||
common:ci-windows-cross --local_test_jobs=4
|
||||
common:ci-windows-cross --test_env=RUST_TEST_THREADS=1
|
||||
# Native Windows CI still covers these tests. The cross-built gnullvm binaries
|
||||
# currently crash in V8-backed code-mode tests and hang in PowerShell AST parser
|
||||
# tests when those binaries are run on the Windows runner.
|
||||
common:ci-windows-cross --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::,powershell
|
||||
common:ci-windows-cross --platforms=//:windows_x86_64_gnullvm
|
||||
common:ci-windows-cross --extra_execution_platforms=//:rbe,//:windows_x86_64_msvc
|
||||
common:ci-windows-cross --extra_toolchains=//:windows_gnullvm_tests_on_msvc_host_toolchain
|
||||
|
||||
# Linux-only V8 CI config.
|
||||
common:ci-v8 --config=ci
|
||||
common:ci-v8 --build_metadata=TAG_workflow=v8
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
|
||||
version = 1
|
||||
name = "codex"
|
||||
|
||||
[setup]
|
||||
script = ""
|
||||
|
||||
[[actions]]
|
||||
name = "Run"
|
||||
icon = "run"
|
||||
command = "cargo +1.93.0 run --manifest-path=codex-rs/Cargo.toml --bin codex -- -c mcp_oauth_credentials_store=file"
|
||||
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
@@ -5,9 +5,9 @@ tool entries, such as Maven, that can change independently of this repo and
|
||||
cause avoidable cache misses.
|
||||
|
||||
This script derives a smaller, cache-stable PATH that keeps the Windows
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths,
|
||||
MinGW runtime DLL paths for gnullvm-built tests, Git, PowerShell, Node, Python,
|
||||
DotSlash, and the standard Windows system directories.
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths, Git,
|
||||
PowerShell, Node, Python, DotSlash, and the standard Windows system
|
||||
directories.
|
||||
`setup-bazel-ci` runs this after exporting the MSVC environment, and the script
|
||||
publishes the result via `GITHUB_ENV` as `CODEX_BAZEL_WINDOWS_PATH` so later
|
||||
steps can pass that explicit PATH to Bazel.
|
||||
@@ -49,8 +49,6 @@ foreach ($pathEntry in ($env:PATH -split ';')) {
|
||||
$pathEntry -like '*Microsoft Visual Studio*' -or
|
||||
$pathEntry -like '*Windows Kits*' -or
|
||||
$pathEntry -like '*Microsoft SDKs*' -or
|
||||
$pathEntry -eq 'C:\mingw64\bin' -or
|
||||
$pathEntry -like 'C:\msys64\*\bin' -or
|
||||
$pathEntry -like 'C:\Program Files\Git\*' -or
|
||||
$pathEntry -like 'C:\Program Files\PowerShell\*' -or
|
||||
$pathEntry -like 'C:\hostedtoolcache\windows\node\*' -or
|
||||
@@ -87,12 +85,6 @@ if ($pwshCommand) {
|
||||
Add-StablePathEntry (Split-Path $pwshCommand.Source -Parent)
|
||||
}
|
||||
|
||||
foreach ($mingwPath in @('C:\mingw64\bin', 'C:\msys64\mingw64\bin', 'C:\msys64\ucrt64\bin')) {
|
||||
if (Test-Path $mingwPath) {
|
||||
Add-StablePathEntry $mingwPath
|
||||
}
|
||||
}
|
||||
|
||||
if ($windowsAppsPath) {
|
||||
Add-StablePathEntry $windowsAppsPath
|
||||
}
|
||||
|
||||
110
.github/scripts/run-bazel-ci.sh
vendored
110
.github/scripts/run-bazel-ci.sh
vendored
@@ -6,7 +6,6 @@ print_failed_bazel_test_logs=0
|
||||
print_failed_bazel_action_summary=0
|
||||
remote_download_toplevel=0
|
||||
windows_msvc_host_platform=0
|
||||
windows_cross_compile=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
@@ -26,10 +25,6 @@ while [[ $# -gt 0 ]]; do
|
||||
windows_msvc_host_platform=1
|
||||
shift
|
||||
;;
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -42,7 +37,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] [--windows-cross-compile] -- <bazel args> -- <targets>" >&2
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] -- <bazel args> -- <targets>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -66,11 +61,7 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
ci_config=ci-windows
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -114,8 +105,8 @@ print_bazel_test_log_tails() {
|
||||
while IFS= read -r target; do
|
||||
failed_targets+=("$target")
|
||||
done < <(
|
||||
grep -E '^(FAIL: //|ERROR: .* Testing //)' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#; s#^ERROR: .* Testing (//[^ ]+) failed:.*#\1#' \
|
||||
grep -E '^FAIL: //' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
|
||||
| sort -u
|
||||
)
|
||||
|
||||
@@ -253,12 +244,6 @@ if [[ ${#bazel_args[@]} -eq 0 || ${#bazel_targets[@]} -eq 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Fork PRs do not receive the BuildBuddy secret needed for the remote
|
||||
# cross-compile config. Preserve the previous local Windows build shape.
|
||||
windows_msvc_host_platform=1
|
||||
fi
|
||||
|
||||
post_config_bazel_args=()
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_msvc_host_platform -eq 1 ]]; then
|
||||
has_host_platform_override=0
|
||||
@@ -284,25 +269,6 @@ if [[ $remote_download_toplevel -eq 1 ]]; then
|
||||
post_config_bazel_args+=(--remote_download_toplevel)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# `--enable_platform_specific_config` expands `common:windows` on Windows
|
||||
# hosts after ordinary rc configs, which can override `ci-windows-cross`'s
|
||||
# RBE host platform. Repeat the host platform on the command line so V8 and
|
||||
# other genrules execute on Linux RBE workers instead of Git Bash locally.
|
||||
#
|
||||
# Bazel also derives the default genrule shell from the client host. Without
|
||||
# an explicit shell executable, remote Linux actions can be asked to run
|
||||
# `C:\Program Files\Git\usr\bin\bash.exe`.
|
||||
post_config_bazel_args+=(--host_platform=//:rbe --shell_executable=/bin/bash)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The Windows cross-compile config depends on remote execution. Fork PRs do
|
||||
# not receive the BuildBuddy secret, so fall back to the existing local build
|
||||
# shape and keep its lower concurrency cap.
|
||||
post_config_bazel_args+=(--jobs=8)
|
||||
fi
|
||||
|
||||
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
|
||||
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
|
||||
# each job its own repo contents cache so they do not fight over the shared
|
||||
@@ -321,57 +287,37 @@ if [[ -n "${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR:-}" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
pass_windows_build_env=1
|
||||
if [[ $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Remote build actions execute on Linux RBE workers. Passing the Windows
|
||||
# runner's build environment there makes Bazel genrules try to execute
|
||||
# C:\Program Files\Git\usr\bin\bash.exe on Linux.
|
||||
pass_windows_build_env=0
|
||||
fi
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z "${CODEX_BAZEL_WINDOWS_PATH:-}" ]]; then
|
||||
echo "CODEX_BAZEL_WINDOWS_PATH must be set for Windows Bazel CI." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
elif [[ $windows_cross_compile -eq 1 ]]; then
|
||||
# Remote build actions run on Linux RBE workers. Give their shell snippets
|
||||
# a Linux PATH while preserving CODEX_BAZEL_WINDOWS_PATH below for local
|
||||
# Windows test execution.
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=/usr/bin:/bin"
|
||||
"--host_action_env=PATH=/usr/bin:/bin"
|
||||
)
|
||||
fi
|
||||
post_config_bazel_args+=("--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}")
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
fi
|
||||
|
||||
bazel_console_log="$(mktemp)"
|
||||
|
||||
13
.github/scripts/run-bazel-query-ci.sh
vendored
13
.github/scripts/run-bazel-query-ci.sh
vendored
@@ -6,13 +6,8 @@ set -euo pipefail
|
||||
# invocation so target-discovery queries can reuse the same Bazel server.
|
||||
|
||||
query_args=()
|
||||
windows_cross_compile=0
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -25,7 +20,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 [--windows-cross-compile] [<bazel query args>...] -- <query expression>" >&2
|
||||
echo "Usage: $0 [<bazel query args>...] -- <query expression>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -37,11 +32,7 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
ci_config=ci-windows
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
133
.github/workflows/bazel.yml
vendored
133
.github/workflows/bazel.yml
vendored
@@ -17,10 +17,13 @@ concurrency:
|
||||
cancel-in-progress: ${{ github.ref_name != 'main' }}
|
||||
jobs:
|
||||
test:
|
||||
# PRs use a fast Windows cross-compiled test leg for pre-merge signal.
|
||||
# Post-merge pushes to main also run the native Windows test job below,
|
||||
# which keeps V8/code-mode coverage without putting PR latency back on the
|
||||
# critical path.
|
||||
# Even though a no-cache-hit Windows build seems to exceed the 30-minute
|
||||
# limit on occasion, the more common reason for exceeding the limit is a
|
||||
# true test failure in a rust_test() marked "flaky" that gets run 3x.
|
||||
# In that case, extra time generally does not give us more signal.
|
||||
#
|
||||
# Ultimately we need true distributed builds (e.g.,
|
||||
# https://www.buildbuddy.io/docs/rbe-setup/) to speed things up.
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -44,16 +47,13 @@ jobs:
|
||||
# - os: ubuntu-24.04-arm
|
||||
# target: aarch64-unknown-linux-gnu
|
||||
|
||||
# Windows fast path: build the windows-gnullvm binaries with Linux
|
||||
# RBE, then run the resulting Windows tests on the Windows runner.
|
||||
# The main-only native Windows job below preserves full V8/code-mode
|
||||
# coverage post-merge.
|
||||
# Windows
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
# Configure a human readable name for each job
|
||||
name: Bazel test on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
@@ -91,7 +91,6 @@ jobs:
|
||||
)
|
||||
|
||||
bazel_wrapper_args=(
|
||||
--print-failed-action-summary
|
||||
--print-failed-test-logs
|
||||
)
|
||||
bazel_test_args=(
|
||||
@@ -101,19 +100,8 @@ jobs:
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
)
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
bazel_wrapper_args+=(
|
||||
--windows-cross-compile
|
||||
--remote-download-toplevel
|
||||
)
|
||||
# Tradeoff: the Linux-RBE-built windows-gnullvm V8 archive
|
||||
# currently crashes during direct V8/code-mode smoke tests on the
|
||||
# Windows runner. Keep the broader fast Windows suite in PR CI and
|
||||
# rely on the main-only native Windows job below for full
|
||||
# V8/code-mode signal while we investigate the cross-built archive.
|
||||
bazel_targets+=(
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_test_args+=(--jobs=8)
|
||||
fi
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
@@ -142,75 +130,6 @@ jobs:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
test-windows-native-main:
|
||||
# Native Windows Bazel tests are slower and frequently approach the
|
||||
# 30-minute PR budget, but they provide the full V8/code-mode signal that
|
||||
# the fast cross-compiled PR leg intentionally trades away. Run this only
|
||||
# for post-merge commits to main and give it a larger timeout.
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 40
|
||||
runs-on: windows-latest
|
||||
name: Bazel test on windows-latest for x86_64-pc-windows-gnullvm (native main)
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
with:
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
install-test-prereqs: "true"
|
||||
|
||||
- name: bazel test //...
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
bazel_targets=(
|
||||
//...
|
||||
# Keep standalone V8 library targets out of the ordinary Bazel CI
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
)
|
||||
|
||||
bazel_test_args=(
|
||||
test
|
||||
--test_tag_filters=-argument-comment-lint
|
||||
--test_verbose_timeout_warnings
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--build_metadata=TAG_windows_native_main=true
|
||||
)
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
--print-failed-action-summary \
|
||||
--print-failed-test-logs \
|
||||
-- \
|
||||
"${bazel_test_args[@]}" \
|
||||
-- \
|
||||
"${bazel_targets[@]}"
|
||||
|
||||
- name: Upload Bazel execution logs
|
||||
if: always() && !cancelled()
|
||||
continue-on-error: true
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: bazel-execution-logs-test-windows-native-x86_64-pc-windows-gnullvm
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
clippy:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
@@ -251,24 +170,17 @@ jobs:
|
||||
--build_metadata=TAG_job=clippy
|
||||
)
|
||||
bazel_wrapper_args=()
|
||||
bazel_target_list_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# Keep this aligned with the fast Windows Bazel test job: use
|
||||
# Linux RBE for clippy build actions while targeting Windows
|
||||
# gnullvm. Fork/community PRs without the BuildBuddy secret fall
|
||||
# back inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
bazel_target_list_args+=(--windows-cross-compile)
|
||||
if [[ -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The fork fallback can see incompatible explicit Windows-cross
|
||||
# internal test binaries in the generated target list. Preserve
|
||||
# the old local-fallback behavior there.
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
# Keep this aligned with the Windows Bazel test job. With the
|
||||
# default `//:local_windows` host platform, Windows `rust_test`
|
||||
# targets such as `//codex-rs/core:core-all-test` can be skipped
|
||||
# by `--skip_incompatible_explicit_targets`, which hides clippy
|
||||
# diagnostics from integration-test modules.
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh "${bazel_target_list_args[@]}")"
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh)"
|
||||
bazel_targets=()
|
||||
while IFS= read -r target; do
|
||||
bazel_targets+=("${target}")
|
||||
@@ -340,12 +252,7 @@ jobs:
|
||||
# Rust debug assertions explicitly.
|
||||
bazel_wrapper_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# This is build-only signal, so use the same Linux-RBE
|
||||
# cross-compile path as the fast Windows test and clippy jobs.
|
||||
# Fork/community PRs without the BuildBuddy secret fall back
|
||||
# inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
fi
|
||||
|
||||
bazel_build_args=(
|
||||
|
||||
4
.github/workflows/cargo-deny.yml
vendored
4
.github/workflows/cargo-deny.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@82eb9f621fbc699dd0918f3ea06864c14cc84246 # v2
|
||||
with:
|
||||
rust-version: 1.93.0
|
||||
rust-version: stable
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
|
||||
12
.github/workflows/issue-labeler.yml
vendored
12
.github/workflows/issue-labeler.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
6. iOS — Issues with the Codex iOS app.
|
||||
|
||||
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior. Use "memory" for agentic memory storage/retrieval and "performance" for high process memory utilization or memory leaks.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior.
|
||||
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
2. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
|
||||
@@ -68,15 +68,7 @@ jobs:
|
||||
21. session - Issues involving session or thread management, including resume, fork, archive, rename/title, thread history, rollout persistence, compaction, checkpoints, retention, and cross-session state.
|
||||
22. config - Issues involving config.toml, config keys, config key merging, config updates, profiles, hooks config, project config, agent role TOMLs, instruction/personality config, and config schema behavior.
|
||||
23. plan - Issues involving plan mode, planning workflows, or plan-specific tools/behavior.
|
||||
24. computer-use - Issues involving agentic computer use or SkyComputerUseService.
|
||||
25. browser - Issues involving agentic browser use, IAB, or the built-in browser within the Codex app.
|
||||
26. memory - Issues involving agentic memory storage and retrieval.
|
||||
27. imagen - Issues involving image generation.
|
||||
28. remote - Issues involving remote access, remote control, or SSH.
|
||||
29. performance - Issues involving slow, laggy performance, high memory utilization, or memory leaks.
|
||||
30. automations - Issues involving scheduled automation tasks or heartbeats.
|
||||
31. pets - Issues involving pets avatars and animations.
|
||||
32. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, plan, computer-use, browser, memory, imagen, remote, performance, automations, or pets.
|
||||
24. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, or plan.
|
||||
|
||||
Issue number: ${{ github.event.issue.number }}
|
||||
|
||||
|
||||
2
.github/workflows/rust-release.yml
vendored
2
.github/workflows/rust-release.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- uses: dtolnay/rust-toolchain@c2b55edffaf41a251c410bb32bed22afefa800f1 # 1.92
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
34
BUILD.bazel
34
BUILD.bazel
@@ -30,40 +30,6 @@ platform(
|
||||
parents = ["@platforms//host"],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_gnullvm",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_msvc",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
)
|
||||
|
||||
toolchain(
|
||||
name = "windows_gnullvm_tests_on_msvc_host_toolchain",
|
||||
exec_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
target_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
toolchain = "@bazel_tools//tools/test:empty_toolchain",
|
||||
toolchain_type = "@bazel_tools//tools/test:default_test_toolchain_type",
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "rbe",
|
||||
actual = "@rbe_platform",
|
||||
|
||||
@@ -6,6 +6,4 @@ ignore = [
|
||||
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
|
||||
"RUSTSEC-2024-0320", # yaml-rust via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2025-0141", # bincode via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2026-0118", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
"RUSTSEC-2026-0119", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
]
|
||||
|
||||
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
|
||||
65
codex-rs/Cargo.lock
generated
65
codex-rs/Cargo.lock
generated
@@ -1857,8 +1857,8 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-analytics",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-app-server-transport",
|
||||
"codex-arg0",
|
||||
"codex-backend-client",
|
||||
"codex-chatgpt",
|
||||
@@ -1891,17 +1891,23 @@ dependencies = [
|
||||
"codex-state",
|
||||
"codex-thread-store",
|
||||
"codex-tools",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-cli",
|
||||
"codex-utils-json-to-toml",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"core_test_support",
|
||||
"flate2",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"reqwest",
|
||||
"rmcp",
|
||||
@@ -1999,45 +2005,6 @@ dependencies = [
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-app-server-transport"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-config",
|
||||
"codex-core",
|
||||
"codex-login",
|
||||
"codex-model-provider",
|
||||
"codex-state",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"tempfile",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-apply-patch"
|
||||
version = "0.0.0"
|
||||
@@ -2220,7 +2187,6 @@ dependencies = [
|
||||
"opentelemetry_sdk",
|
||||
"pretty_assertions",
|
||||
"rand 0.9.3",
|
||||
"rcgen",
|
||||
"reqwest",
|
||||
"rustls",
|
||||
"rustls-native-certs",
|
||||
@@ -3038,23 +3004,6 @@ dependencies = [
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-mcp"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-output-truncation",
|
||||
"pretty_assertions",
|
||||
"rmcp",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-read"
|
||||
version = "0.0.0"
|
||||
|
||||
@@ -8,7 +8,6 @@ members = [
|
||||
"ansi-escape",
|
||||
"async-utils",
|
||||
"app-server",
|
||||
"app-server-transport",
|
||||
"app-server-client",
|
||||
"app-server-protocol",
|
||||
"app-server-test-client",
|
||||
@@ -52,7 +51,6 @@ members = [
|
||||
"login",
|
||||
"codex-mcp",
|
||||
"mcp-server",
|
||||
"memories/mcp",
|
||||
"memories/read",
|
||||
"memories/write",
|
||||
"model-provider-info",
|
||||
@@ -129,7 +127,6 @@ codex-ansi-escape = { path = "ansi-escape" }
|
||||
codex-api = { path = "codex-api" }
|
||||
codex-aws-auth = { path = "aws-auth" }
|
||||
codex-app-server = { path = "app-server" }
|
||||
codex-app-server-transport = { path = "app-server-transport" }
|
||||
codex-app-server-client = { path = "app-server-client" }
|
||||
codex-app-server-protocol = { path = "app-server-protocol" }
|
||||
codex-app-server-test-client = { path = "app-server-test-client" }
|
||||
@@ -169,7 +166,6 @@ codex-keyring-store = { path = "keyring-store" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
codex-lmstudio = { path = "lmstudio" }
|
||||
codex-login = { path = "login" }
|
||||
codex-memories-mcp = { path = "memories/mcp" }
|
||||
codex-memories-read = { path = "memories/read" }
|
||||
codex-memories-write = { path = "memories/write" }
|
||||
codex-mcp = { path = "codex-mcp" }
|
||||
@@ -324,10 +320,6 @@ quick-xml = "0.38.4"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
rcgen = { version = "0.14.7", default-features = false, features = [
|
||||
"aws_lc_rs",
|
||||
"pem",
|
||||
] }
|
||||
regex = "1.12.3"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = { version = "0.12", features = ["cookies"] }
|
||||
@@ -463,7 +455,6 @@ unwrap_used = "deny"
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = [
|
||||
"codex-agent-graph-store",
|
||||
"codex-memories-mcp",
|
||||
"icu_provider",
|
||||
"openssl-sys",
|
||||
"codex-utils-readiness",
|
||||
|
||||
@@ -46,7 +46,7 @@ Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.t
|
||||
|
||||
### Notifications
|
||||
|
||||
The legacy `notify` setting is deprecated and will be removed in a future release. Existing configurations still work, but new automation should use lifecycle hooks instead. The [notify documentation](../docs/config.md#notify) explains the remaining compatibility behavior. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
|
||||
### `codex exec` to run Codex programmatically/non-interactively
|
||||
|
||||
|
||||
@@ -300,15 +300,7 @@ impl fmt::Display for TypedRequestError {
|
||||
write!(f, "{method} transport error: {source}")
|
||||
}
|
||||
Self::Server { method, source } => {
|
||||
write!(
|
||||
f,
|
||||
"{method} failed: {} (code {})",
|
||||
source.message, source.code
|
||||
)?;
|
||||
if let Some(data) = source.data.as_ref() {
|
||||
write!(f, ", data: {data}")?;
|
||||
}
|
||||
Ok(())
|
||||
write!(f, "{method} failed: {}", source.message)
|
||||
}
|
||||
Self::Deserialize { method, source } => {
|
||||
write!(f, "{method} response decode error: {source}")
|
||||
@@ -1923,15 +1915,11 @@ mod tests {
|
||||
method: "thread/read".to_string(),
|
||||
source: JSONRPCErrorError {
|
||||
code: -32603,
|
||||
data: Some(serde_json::json!({"detail": "config lock mismatch"})),
|
||||
data: None,
|
||||
message: "internal".to_string(),
|
||||
},
|
||||
};
|
||||
assert_eq!(std::error::Error::source(&server).is_some(), false);
|
||||
assert_eq!(
|
||||
server.to_string(),
|
||||
"thread/read failed: internal (code -32603), data: {\"detail\":\"config lock mismatch\"}"
|
||||
);
|
||||
|
||||
let deserialize = TypedRequestError::Deserialize {
|
||||
method: "thread/start".to_string(),
|
||||
|
||||
@@ -2217,25 +2217,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginUninstallParams": {
|
||||
"properties": {
|
||||
"pluginId": {
|
||||
@@ -2850,28 +2831,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -5077,30 +5036,6 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
|
||||
@@ -762,30 +762,6 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/v2/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -12358,31 +12334,6 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -12393,7 +12344,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/PluginShareListItem"
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -12440,40 +12391,6 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -13791,28 +13708,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
@@ -1521,30 +1521,6 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -9011,31 +8987,6 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -9046,7 +8997,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -9093,40 +9044,6 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -10444,28 +10361,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
@@ -167,31 +167,6 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -329,7 +304,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -732,28 +732,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
@@ -862,28 +862,6 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -14,4 +14,4 @@ export type ResponseItem = { "type": "message", role: string, content: Array<Con
|
||||
/**
|
||||
* Set when using the Responses API.
|
||||
*/
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "context_compaction", encrypted_content?: string, } | { "type": "other" };
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "other" };
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { AbsolutePathBuf } from "../AbsolutePathBuf";
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
|
||||
export type PluginShareListItem = { plugin: PluginSummary, shareUrl: string, localPluginPath: AbsolutePathBuf | null, };
|
||||
@@ -1,6 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { PluginShareListItem } from "./PluginShareListItem";
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
|
||||
export type PluginShareListResponse = { data: Array<PluginShareListItem>, };
|
||||
export type PluginShareListResponse = { data: Array<PluginSummary>, };
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadParams = { remoteMarketplaceName: string, remotePluginId: string, skillName: string, };
|
||||
@@ -1,5 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadResponse = { contents: string | null, };
|
||||
@@ -283,13 +283,10 @@ export type { PluginReadParams } from "./PluginReadParams";
|
||||
export type { PluginReadResponse } from "./PluginReadResponse";
|
||||
export type { PluginShareDeleteParams } from "./PluginShareDeleteParams";
|
||||
export type { PluginShareDeleteResponse } from "./PluginShareDeleteResponse";
|
||||
export type { PluginShareListItem } from "./PluginShareListItem";
|
||||
export type { PluginShareListParams } from "./PluginShareListParams";
|
||||
export type { PluginShareListResponse } from "./PluginShareListResponse";
|
||||
export type { PluginShareSaveParams } from "./PluginShareSaveParams";
|
||||
export type { PluginShareSaveResponse } from "./PluginShareSaveResponse";
|
||||
export type { PluginSkillReadParams } from "./PluginSkillReadParams";
|
||||
export type { PluginSkillReadResponse } from "./PluginSkillReadResponse";
|
||||
export type { PluginSource } from "./PluginSource";
|
||||
export type { PluginSummary } from "./PluginSummary";
|
||||
export type { PluginUninstallParams } from "./PluginUninstallParams";
|
||||
|
||||
@@ -612,11 +612,6 @@ client_request_definitions! {
|
||||
serialization: global("config"),
|
||||
response: v2::PluginReadResponse,
|
||||
},
|
||||
PluginSkillRead => "plugin/skill/read" {
|
||||
params: v2::PluginSkillReadParams,
|
||||
serialization: global("config"),
|
||||
response: v2::PluginSkillReadResponse,
|
||||
},
|
||||
PluginShareSave => "plugin/share/save" {
|
||||
params: v2::PluginShareSaveParams,
|
||||
serialization: global("config"),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::protocol::common::ServerNotification;
|
||||
use crate::protocol::item_builders::build_command_execution_begin_item;
|
||||
use crate::protocol::item_builders::build_command_execution_end_item;
|
||||
use crate::protocol::item_builders::build_file_change_begin_item;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use crate::protocol::v2::AgentMessageDeltaNotification;
|
||||
use crate::protocol::v2::CollabAgentState;
|
||||
@@ -12,6 +13,9 @@ use crate::protocol::v2::DynamicToolCallStatus;
|
||||
use crate::protocol::v2::FileChangePatchUpdatedNotification;
|
||||
use crate::protocol::v2::ItemCompletedNotification;
|
||||
use crate::protocol::v2::ItemStartedNotification;
|
||||
use crate::protocol::v2::McpToolCallError;
|
||||
use crate::protocol::v2::McpToolCallResult;
|
||||
use crate::protocol::v2::McpToolCallStatus;
|
||||
use crate::protocol::v2::PlanDeltaNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryPartAddedNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryTextDeltaNotification;
|
||||
@@ -20,6 +24,7 @@ use crate::protocol::v2::TerminalInteractionNotification;
|
||||
use crate::protocol::v2::ThreadItem;
|
||||
use codex_protocol::dynamic_tools::DynamicToolCallOutputContentItem as CoreDynamicToolCallOutputContentItem;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Build the v2 app-server notification that directly corresponds to a single core event.
|
||||
@@ -71,6 +76,64 @@ pub fn item_event_to_server_notification(
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::McpToolCallBegin(begin_event) => {
|
||||
let item = ThreadItem::McpToolCall {
|
||||
id: begin_event.call_id,
|
||||
server: begin_event.invocation.server,
|
||||
tool: begin_event.invocation.tool,
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: begin_event.invocation.arguments.unwrap_or(JsonValue::Null),
|
||||
mcp_app_resource_uri: begin_event.mcp_app_resource_uri,
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::McpToolCallEnd(end_event) => {
|
||||
let status = if end_event.is_success() {
|
||||
McpToolCallStatus::Completed
|
||||
} else {
|
||||
McpToolCallStatus::Failed
|
||||
};
|
||||
let duration_ms = i64::try_from(end_event.duration.as_millis()).ok();
|
||||
let (result, error) = match &end_event.result {
|
||||
Ok(value) => (
|
||||
Some(Box::new(McpToolCallResult {
|
||||
content: value.content.clone(),
|
||||
structured_content: value.structured_content.clone(),
|
||||
meta: value.meta.clone(),
|
||||
})),
|
||||
None,
|
||||
),
|
||||
Err(message) => (
|
||||
None,
|
||||
Some(McpToolCallError {
|
||||
message: message.clone(),
|
||||
}),
|
||||
),
|
||||
};
|
||||
let item = ThreadItem::McpToolCall {
|
||||
id: end_event.call_id,
|
||||
server: end_event.invocation.server,
|
||||
tool: end_event.invocation.tool,
|
||||
status,
|
||||
arguments: end_event.invocation.arguments.unwrap_or(JsonValue::Null),
|
||||
mcp_app_resource_uri: end_event.mcp_app_resource_uri,
|
||||
result,
|
||||
error,
|
||||
duration_ms,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentSpawnBegin(begin_event) => {
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
@@ -387,6 +450,13 @@ pub fn item_event_to_server_notification(
|
||||
item: item_completed_event.item.into(),
|
||||
})
|
||||
}
|
||||
EventMsg::PatchApplyBegin(patch_begin_event) => {
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_file_change_begin_item(&patch_begin_event),
|
||||
})
|
||||
}
|
||||
EventMsg::PatchApplyUpdated(event) => {
|
||||
ServerNotification::FileChangePatchUpdated(FileChangePatchUpdatedNotification {
|
||||
thread_id,
|
||||
@@ -438,11 +508,17 @@ pub fn item_event_to_server_notification(
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::mcp::CallToolResult;
|
||||
use codex_protocol::protocol::CollabResumeBeginEvent;
|
||||
use codex_protocol::protocol::CollabResumeEndEvent;
|
||||
use codex_protocol::protocol::ExecCommandOutputDeltaEvent;
|
||||
use codex_protocol::protocol::ExecOutputStream;
|
||||
use codex_protocol::protocol::McpInvocation;
|
||||
use codex_protocol::protocol::McpToolCallBeginEvent;
|
||||
use codex_protocol::protocol::McpToolCallEndEvent;
|
||||
use pretty_assertions::assert_eq;
|
||||
use rmcp::model::Content;
|
||||
use std::time::Duration;
|
||||
|
||||
fn assert_item_started_server_notification(
|
||||
notification: ServerNotification,
|
||||
@@ -553,6 +629,179 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_begin_maps_to_item_started_notification_with_args() {
|
||||
let begin_event = McpToolCallBeginEvent {
|
||||
call_id: "call_123".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: Some(serde_json::json!({"server": ""})),
|
||||
},
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallBegin(begin_event.clone()),
|
||||
"thread-1",
|
||||
"turn_1",
|
||||
);
|
||||
assert_item_started_server_notification(
|
||||
notification,
|
||||
ItemStartedNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn_1".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: begin_event.call_id,
|
||||
server: begin_event.invocation.server,
|
||||
tool: begin_event.invocation.tool,
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: serde_json::json!({"server": ""}),
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_begin_maps_to_item_started_notification_without_args() {
|
||||
let begin_event = McpToolCallBeginEvent {
|
||||
call_id: "call_456".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: None,
|
||||
},
|
||||
mcp_app_resource_uri: None,
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallBegin(begin_event.clone()),
|
||||
"thread-2",
|
||||
"turn_2",
|
||||
);
|
||||
assert_item_started_server_notification(
|
||||
notification,
|
||||
ItemStartedNotification {
|
||||
thread_id: "thread-2".to_string(),
|
||||
turn_id: "turn_2".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: begin_event.call_id,
|
||||
server: begin_event.invocation.server,
|
||||
tool: begin_event.invocation.tool,
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_end_maps_to_item_completed_notification_on_success() {
|
||||
let content = vec![
|
||||
serde_json::to_value(Content::text("{\"resources\":[]}"))
|
||||
.expect("content should serialize"),
|
||||
];
|
||||
let result = CallToolResult {
|
||||
content: content.clone(),
|
||||
is_error: Some(false),
|
||||
structured_content: None,
|
||||
meta: Some(serde_json::json!({
|
||||
"ui/resourceUri": "ui://widget/list-resources.html"
|
||||
})),
|
||||
};
|
||||
|
||||
let end_event = McpToolCallEndEvent {
|
||||
call_id: "call_789".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: Some(serde_json::json!({"server": ""})),
|
||||
},
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
duration: Duration::from_nanos(92708),
|
||||
result: Ok(result),
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallEnd(end_event.clone()),
|
||||
"thread-3",
|
||||
"turn_3",
|
||||
);
|
||||
assert_item_completed_server_notification(
|
||||
notification,
|
||||
ItemCompletedNotification {
|
||||
thread_id: "thread-3".to_string(),
|
||||
turn_id: "turn_3".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: end_event.call_id,
|
||||
server: end_event.invocation.server,
|
||||
tool: end_event.invocation.tool,
|
||||
status: McpToolCallStatus::Completed,
|
||||
arguments: serde_json::json!({"server": ""}),
|
||||
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
|
||||
result: Some(Box::new(McpToolCallResult {
|
||||
content,
|
||||
structured_content: None,
|
||||
meta: Some(serde_json::json!({
|
||||
"ui/resourceUri": "ui://widget/list-resources.html"
|
||||
})),
|
||||
})),
|
||||
error: None,
|
||||
duration_ms: Some(0),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_tool_call_end_maps_to_item_completed_notification_on_error() {
|
||||
let end_event = McpToolCallEndEvent {
|
||||
call_id: "call_err".to_string(),
|
||||
invocation: McpInvocation {
|
||||
server: "codex".to_string(),
|
||||
tool: "list_mcp_resources".to_string(),
|
||||
arguments: None,
|
||||
},
|
||||
mcp_app_resource_uri: None,
|
||||
duration: Duration::from_millis(1),
|
||||
result: Err("boom".to_string()),
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::McpToolCallEnd(end_event.clone()),
|
||||
"thread-4",
|
||||
"turn_4",
|
||||
);
|
||||
assert_item_completed_server_notification(
|
||||
notification,
|
||||
ItemCompletedNotification {
|
||||
thread_id: "thread-4".to_string(),
|
||||
turn_id: "turn_4".to_string(),
|
||||
item: ThreadItem::McpToolCall {
|
||||
id: end_event.call_id,
|
||||
server: end_event.invocation.server,
|
||||
tool: end_event.invocation.tool,
|
||||
status: McpToolCallStatus::Failed,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: None,
|
||||
error: Some(McpToolCallError {
|
||||
message: "boom".to_string(),
|
||||
}),
|
||||
duration_ms: Some(1),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exec_command_output_delta_maps_to_command_execution_output_delta() {
|
||||
let notification = item_event_to_server_notification(
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
//! Shared builders for app-server [`ThreadItem`] values derived from compatibility events.
|
||||
//! Shared builders for synthetic [`ThreadItem`] values emitted by the app-server layer.
|
||||
//!
|
||||
//! Most live tool items now come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! These builders remain for approval flows, rebuilt legacy history, and other pre-execution
|
||||
//! paths where the underlying tool has not started or never starts at all.
|
||||
//! These items do not come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! Instead, the app-server synthesizes them so clients can render a coherent lifecycle for
|
||||
//! approvals and other pre-execution flows before the underlying tool has started or when the
|
||||
//! tool never starts at all.
|
||||
//!
|
||||
//! Keeping these builders in one place is useful for two reasons:
|
||||
//! - Live notifications and rebuilt `thread/read` history both need to construct the same
|
||||
|
||||
@@ -356,10 +356,7 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
@@ -380,10 +377,7 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ use std::path::PathBuf;
|
||||
|
||||
use crate::RequestId;
|
||||
use crate::protocol::common::AuthMode;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use codex_experimental_api_macros::ExperimentalApi;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::account::ProviderAccount;
|
||||
@@ -31,8 +30,6 @@ use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::config_types::WebSearchToolConfig;
|
||||
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
|
||||
use codex_protocol::items::McpToolCallError as CoreMcpToolCallError;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::mcp::CallToolResult as CoreMcpCallToolResult;
|
||||
use codex_protocol::mcp::Resource as McpResource;
|
||||
@@ -2785,24 +2782,6 @@ impl From<CoreMcpCallToolResult> for McpServerToolCallResponse {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpCallToolResult> for McpToolCallResult {
|
||||
fn from(result: CoreMcpCallToolResult) -> Self {
|
||||
Self {
|
||||
content: result.content,
|
||||
structured_content: result.structured_content,
|
||||
meta: result.meta,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallError> for McpToolCallError {
|
||||
fn from(error: CoreMcpToolCallError) -> Self {
|
||||
Self {
|
||||
message: error.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -3631,9 +3610,8 @@ pub struct ThreadStartParams {
|
||||
#[experimental("thread/start.experimentalRawEvents")]
|
||||
#[serde(default)]
|
||||
pub experimental_raw_events: bool,
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on resume/fork/read.
|
||||
#[experimental("thread/start.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -3763,9 +3741,8 @@ pub struct ThreadResumeParams {
|
||||
#[experimental("thread/resume.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
#[experimental("thread/resume.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -3869,9 +3846,8 @@ pub struct ThreadForkParams {
|
||||
#[experimental("thread/fork.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
#[experimental("thread/fork.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -4633,22 +4609,6 @@ pub struct PluginReadResponse {
|
||||
pub plugin: PluginDetail,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadParams {
|
||||
pub remote_marketplace_name: String,
|
||||
pub remote_plugin_id: String,
|
||||
pub skill_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadResponse {
|
||||
pub contents: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -4675,7 +4635,7 @@ pub struct PluginShareListParams {}
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListResponse {
|
||||
pub data: Vec<PluginShareListItem>,
|
||||
pub data: Vec<PluginSummary>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -4690,15 +4650,6 @@ pub struct PluginShareDeleteParams {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareDeleteResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListItem {
|
||||
pub plugin: PluginSummary,
|
||||
pub share_url: String,
|
||||
pub local_plugin_path: Option<AbsolutePathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(rename_all = "snake_case")]
|
||||
@@ -6486,10 +6437,6 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
query: search.query,
|
||||
action: Some(WebSearchAction::from(search.action)),
|
||||
},
|
||||
CoreTurnItem::ImageView(image) => ThreadItem::ImageView {
|
||||
id: image.id,
|
||||
path: image.path,
|
||||
},
|
||||
CoreTurnItem::ImageGeneration(image) => ThreadItem::ImageGeneration {
|
||||
id: image.id,
|
||||
status: image.status,
|
||||
@@ -6497,32 +6444,6 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
result: image.result,
|
||||
saved_path: image.saved_path,
|
||||
},
|
||||
CoreTurnItem::FileChange(file_change) => ThreadItem::FileChange {
|
||||
id: file_change.id,
|
||||
changes: convert_patch_changes(&file_change.changes),
|
||||
status: file_change
|
||||
.status
|
||||
.as_ref()
|
||||
.map(PatchApplyStatus::from)
|
||||
.unwrap_or(PatchApplyStatus::InProgress),
|
||||
},
|
||||
CoreTurnItem::McpToolCall(mcp) => {
|
||||
let duration_ms = mcp
|
||||
.duration
|
||||
.and_then(|duration| i64::try_from(duration.as_millis()).ok());
|
||||
|
||||
ThreadItem::McpToolCall {
|
||||
id: mcp.id,
|
||||
server: mcp.server,
|
||||
tool: mcp.tool,
|
||||
status: McpToolCallStatus::from(mcp.status),
|
||||
arguments: mcp.arguments,
|
||||
mcp_app_resource_uri: mcp.mcp_app_resource_uri,
|
||||
result: mcp.result.map(McpToolCallResult::from).map(Box::new),
|
||||
error: mcp.error.map(McpToolCallError::from),
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
CoreTurnItem::ContextCompaction(compaction) => {
|
||||
ThreadItem::ContextCompaction { id: compaction.id }
|
||||
}
|
||||
@@ -6632,16 +6553,6 @@ impl From<&CorePatchApplyStatus> for PatchApplyStatus {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallStatus> for McpToolCallStatus {
|
||||
fn from(value: CoreMcpToolCallStatus) -> Self {
|
||||
match value {
|
||||
CoreMcpToolCallStatus::InProgress => McpToolCallStatus::InProgress,
|
||||
CoreMcpToolCallStatus::Completed => McpToolCallStatus::Completed,
|
||||
CoreMcpToolCallStatus::Failed => McpToolCallStatus::Failed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -8142,15 +8053,10 @@ mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::items::AgentMessageContent;
|
||||
use codex_protocol::items::AgentMessageItem;
|
||||
use codex_protocol::items::FileChangeItem;
|
||||
use codex_protocol::items::ImageViewItem;
|
||||
use codex_protocol::items::McpToolCallItem;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::ReasoningItem;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::items::UserMessageItem;
|
||||
use codex_protocol::items::WebSearchItem;
|
||||
use codex_protocol::mcp::CallToolResult;
|
||||
use codex_protocol::models::WebSearchAction as CoreWebSearchAction;
|
||||
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
@@ -8160,7 +8066,6 @@ mod tests {
|
||||
use serde_json::json;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
fn absolute_path_string(path: &str) -> String {
|
||||
let path = format!("/{}", path.trim_start_matches('/'));
|
||||
@@ -10428,111 +10333,6 @@ mod tests {
|
||||
}),
|
||||
}
|
||||
);
|
||||
|
||||
let image_view_item = TurnItem::ImageView(ImageViewItem {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(image_view_item),
|
||||
ThreadItem::ImageView {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
}
|
||||
);
|
||||
|
||||
let file_change_item = TurnItem::FileChange(FileChangeItem {
|
||||
id: "patch-1".to_string(),
|
||||
changes: [(
|
||||
PathBuf::from("README.md"),
|
||||
codex_protocol::protocol::FileChange::Add {
|
||||
content: "hello\n".to_string(),
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
status: Some(codex_protocol::protocol::PatchApplyStatus::Completed),
|
||||
auto_approved: None,
|
||||
stdout: Some("Done!".to_string()),
|
||||
stderr: Some(String::new()),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(file_change_item),
|
||||
ThreadItem::FileChange {
|
||||
id: "patch-1".to_string(),
|
||||
changes: vec![FileUpdateChange {
|
||||
path: "README.md".to_string(),
|
||||
kind: PatchChangeKind::Add,
|
||||
diff: "hello\n".to_string(),
|
||||
}],
|
||||
status: PatchApplyStatus::Completed,
|
||||
}
|
||||
);
|
||||
|
||||
let mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
status: CoreMcpToolCallStatus::InProgress,
|
||||
result: None,
|
||||
error: None,
|
||||
duration: None,
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
}
|
||||
);
|
||||
|
||||
let completed_mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
status: CoreMcpToolCallStatus::Completed,
|
||||
result: Some(CallToolResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
is_error: Some(false),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
}),
|
||||
error: None,
|
||||
duration: Some(Duration::from_millis(42)),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(completed_mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::Completed,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: Some(Box::new(McpToolCallResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
})),
|
||||
error: None,
|
||||
duration_ms: Some(42),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -10867,23 +10667,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_skill_read_params_serialization_uses_remote_plugin_id() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginSkillReadParams {
|
||||
remote_marketplace_name: "chatgpt-global".to_string(),
|
||||
remote_plugin_id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
skill_name: "plan-work".to_string(),
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"remoteMarketplaceName": "chatgpt-global",
|
||||
"remotePluginId": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"skillName": "plan-work",
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_params_and_response_serialization_use_camel_case_fields() {
|
||||
let plugin_path = if cfg!(windows) {
|
||||
@@ -10949,41 +10732,33 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_list_response_serializes_share_items() {
|
||||
fn plugin_share_list_response_serializes_plugin_summaries() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginShareListResponse {
|
||||
data: vec![PluginShareListItem {
|
||||
plugin: PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
availability: PluginAvailability::Available,
|
||||
interface: None,
|
||||
},
|
||||
share_url: "https://chatgpt.example/plugins/share/share-key-1".to_string(),
|
||||
local_plugin_path: None,
|
||||
data: vec![PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
availability: PluginAvailability::Available,
|
||||
interface: None,
|
||||
}],
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"data": [{
|
||||
"plugin": {
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"availability": "AVAILABLE",
|
||||
"interface": null,
|
||||
},
|
||||
"shareUrl": "https://chatgpt.example/plugins/share/share-key-1",
|
||||
"localPluginPath": null,
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"availability": "AVAILABLE",
|
||||
"interface": null,
|
||||
}],
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "app-server-transport",
|
||||
crate_name = "codex_app_server_transport",
|
||||
)
|
||||
@@ -1,58 +0,0 @@
|
||||
[package]
|
||||
name = "codex-app-server-transport"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_app_server_transport"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
axum = { workspace = true, default-features = false, features = [
|
||||
"http1",
|
||||
"json",
|
||||
"tokio",
|
||||
"ws",
|
||||
] }
|
||||
base64 = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-api = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-model-provider = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-rustls-provider = { workspace = true }
|
||||
constant_time_eq = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
time = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
url = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = { workspace = true }
|
||||
codex-config = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
@@ -1,20 +0,0 @@
|
||||
mod outgoing_message;
|
||||
mod transport;
|
||||
|
||||
pub use outgoing_message::ConnectionId;
|
||||
pub use outgoing_message::OutgoingError;
|
||||
pub use outgoing_message::OutgoingMessage;
|
||||
pub use outgoing_message::OutgoingResponse;
|
||||
pub use outgoing_message::QueuedOutgoingMessage;
|
||||
pub use transport::AppServerTransport;
|
||||
pub use transport::AppServerTransportParseError;
|
||||
pub use transport::CHANNEL_CAPACITY;
|
||||
pub use transport::ConnectionOrigin;
|
||||
pub use transport::RemoteControlHandle;
|
||||
pub use transport::TransportEvent;
|
||||
pub use transport::app_server_control_socket_path;
|
||||
pub use transport::auth;
|
||||
pub use transport::start_control_socket_acceptor;
|
||||
pub use transport::start_remote_control;
|
||||
pub use transport::start_stdio_connection;
|
||||
pub use transport::start_websocket_acceptor;
|
||||
@@ -1,58 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::Result;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub struct ConnectionId(pub u64);
|
||||
|
||||
impl fmt::Display for ConnectionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing message from the server to the client.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum OutgoingMessage {
|
||||
Request(ServerRequest),
|
||||
/// AppServerNotification is specific to the case where this is run as an
|
||||
/// "app server" as opposed to an MCP server.
|
||||
AppServerNotification(ServerNotification),
|
||||
Response(OutgoingResponse),
|
||||
Error(OutgoingError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingResponse {
|
||||
pub id: RequestId,
|
||||
pub result: Result,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingError {
|
||||
pub error: JSONRPCErrorError,
|
||||
pub id: RequestId,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct QueuedOutgoingMessage {
|
||||
pub message: OutgoingMessage,
|
||||
pub write_complete_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl QueuedOutgoingMessage {
|
||||
pub fn new(message: OutgoingMessage) -> Self {
|
||||
Self {
|
||||
message,
|
||||
write_complete_tx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,478 +0,0 @@
|
||||
pub mod auth;
|
||||
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::OutgoingError;
|
||||
use crate::outgoing_message::OutgoingMessage;
|
||||
use crate::outgoing_message::QueuedOutgoingMessage;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::error;
|
||||
use tracing::warn;
|
||||
|
||||
/// Size of the bounded channels used to communicate between tasks. The value
|
||||
/// is a balance between throughput and memory usage - 128 messages should be
|
||||
/// plenty for an interactive CLI.
|
||||
pub const CHANNEL_CAPACITY: usize = 128;
|
||||
|
||||
mod remote_control;
|
||||
mod stdio;
|
||||
mod unix_socket;
|
||||
#[cfg(test)]
|
||||
mod unix_socket_tests;
|
||||
mod websocket;
|
||||
|
||||
pub use remote_control::RemoteControlHandle;
|
||||
pub use remote_control::start_remote_control;
|
||||
pub use stdio::start_stdio_connection;
|
||||
pub use unix_socket::start_control_socket_acceptor;
|
||||
pub use websocket::start_websocket_acceptor;
|
||||
|
||||
const OVERLOADED_ERROR_CODE: i64 = -32001;
|
||||
|
||||
const APP_SERVER_CONTROL_SOCKET_DIR_NAME: &str = "app-server-control";
|
||||
const APP_SERVER_CONTROL_SOCKET_FILE_NAME: &str = "app-server-control.sock";
|
||||
|
||||
pub fn app_server_control_socket_path(codex_home: &Path) -> std::io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(
|
||||
codex_home
|
||||
.join(APP_SERVER_CONTROL_SOCKET_DIR_NAME)
|
||||
.join(APP_SERVER_CONTROL_SOCKET_FILE_NAME),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum AppServerTransport {
|
||||
Stdio,
|
||||
UnixSocket { socket_path: AbsolutePathBuf },
|
||||
WebSocket { bind_address: SocketAddr },
|
||||
Off,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum AppServerTransportParseError {
|
||||
UnsupportedListenUrl(String),
|
||||
InvalidUnixSocketPath { listen_url: String, message: String },
|
||||
InvalidWebSocketListenUrl(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AppServerTransportParseError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AppServerTransportParseError::UnsupportedListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"unsupported --listen URL `{listen_url}`; expected `stdio://`, `unix://`, `unix://PATH`, `ws://IP:PORT`, or `off`"
|
||||
),
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url,
|
||||
message,
|
||||
} => write!(
|
||||
f,
|
||||
"invalid unix socket --listen URL `{listen_url}`; failed to resolve socket path: {message}"
|
||||
),
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"invalid websocket --listen URL `{listen_url}`; expected `ws://IP:PORT`"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for AppServerTransportParseError {}
|
||||
|
||||
impl AppServerTransport {
|
||||
pub const DEFAULT_LISTEN_URL: &'static str = "stdio://";
|
||||
|
||||
pub fn from_listen_url(listen_url: &str) -> Result<Self, AppServerTransportParseError> {
|
||||
if listen_url == Self::DEFAULT_LISTEN_URL {
|
||||
return Ok(Self::Stdio);
|
||||
}
|
||||
|
||||
if let Some(raw_socket_path) = listen_url.strip_prefix("unix://") {
|
||||
let socket_path = if raw_socket_path.is_empty() {
|
||||
let codex_home = find_codex_home().map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: format!("failed to resolve CODEX_HOME: {err}"),
|
||||
}
|
||||
})?;
|
||||
app_server_control_socket_path(&codex_home).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
} else {
|
||||
AbsolutePathBuf::relative_to_current_dir(raw_socket_path).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
};
|
||||
return Ok(Self::UnixSocket { socket_path });
|
||||
}
|
||||
|
||||
if listen_url == "off" {
|
||||
return Ok(Self::Off);
|
||||
}
|
||||
|
||||
if let Some(socket_addr) = listen_url.strip_prefix("ws://") {
|
||||
let bind_address = socket_addr.parse::<SocketAddr>().map_err(|_| {
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url.to_string())
|
||||
})?;
|
||||
return Ok(Self::WebSocket { bind_address });
|
||||
}
|
||||
|
||||
Err(AppServerTransportParseError::UnsupportedListenUrl(
|
||||
listen_url.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AppServerTransport {
|
||||
type Err = AppServerTransportParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::from_listen_url(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TransportEvent {
|
||||
ConnectionOpened {
|
||||
connection_id: ConnectionId,
|
||||
origin: ConnectionOrigin,
|
||||
writer: mpsc::Sender<QueuedOutgoingMessage>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
},
|
||||
ConnectionClosed {
|
||||
connection_id: ConnectionId,
|
||||
},
|
||||
IncomingMessage {
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConnectionOrigin {
|
||||
Stdio,
|
||||
InProcess,
|
||||
WebSocket,
|
||||
RemoteControl,
|
||||
}
|
||||
|
||||
impl ConnectionOrigin {
|
||||
pub fn allows_device_key_requests(self) -> bool {
|
||||
// Device-key endpoints are only for local connections that own the app-server instance.
|
||||
// Do not include remote transports such as SSH or remote-control websocket connections.
|
||||
matches!(self, Self::Stdio | Self::InProcess)
|
||||
}
|
||||
}
|
||||
|
||||
static CONNECTION_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
fn next_connection_id() -> ConnectionId {
|
||||
ConnectionId(CONNECTION_ID_COUNTER.fetch_add(1, Ordering::Relaxed))
|
||||
}
|
||||
|
||||
async fn forward_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
payload: &str,
|
||||
) -> bool {
|
||||
match serde_json::from_str::<JSONRPCMessage>(payload) {
|
||||
Ok(message) => {
|
||||
enqueue_incoming_message(transport_event_tx, writer, connection_id, message).await
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Failed to deserialize JSONRPCMessage: {err}");
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn enqueue_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
) -> bool {
|
||||
let event = TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message,
|
||||
};
|
||||
match transport_event_tx.try_send(event) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Request(request),
|
||||
})) => {
|
||||
let overload_error = OutgoingMessage::Error(OutgoingError {
|
||||
id: request.id,
|
||||
error: JSONRPCErrorError {
|
||||
code: OVERLOADED_ERROR_CODE,
|
||||
message: "Server overloaded; retry later.".to_string(),
|
||||
data: None,
|
||||
},
|
||||
});
|
||||
match writer.try_send(QueuedOutgoingMessage::new(overload_error)) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(_overload_error)) => {
|
||||
warn!(
|
||||
"dropping overload response for connection {:?}: outbound queue is full",
|
||||
connection_id
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Full(event)) => transport_event_tx.send(event).await.is_ok(),
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_outgoing_message(outgoing_message: OutgoingMessage) -> Option<String> {
|
||||
let value = match serde_json::to_value(outgoing_message) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("Failed to convert OutgoingMessage to JSON value: {err}");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
match serde_json::to_string(&value) {
|
||||
Ok(json) => Some(json),
|
||||
Err(err) => {
|
||||
error!("Failed to serialize JSONRPCMessage: {err}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[test]
|
||||
fn listen_off_parses_as_off_transport() {
|
||||
assert_eq!(
|
||||
AppServerTransport::from_listen_url("off"),
|
||||
Ok(AppServerTransport::Off)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_returns_overload_error_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
assert!(
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request).await
|
||||
);
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should stay queued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let overload = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should receive overload error");
|
||||
let overload_json =
|
||||
serde_json::to_value(overload.message).expect("serialize overload error");
|
||||
assert_eq!(
|
||||
overload_json,
|
||||
json!({
|
||||
"id": 7,
|
||||
"error": {
|
||||
"code": OVERLOADED_ERROR_CODE,
|
||||
"message": "Server overloaded; retry later."
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_response_waits_instead_of_dropping_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, _writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let response = JSONRPCMessage::Response(JSONRPCResponse {
|
||||
id: RequestId::Integer(7),
|
||||
result: json!({"ok": true}),
|
||||
});
|
||||
let transport_event_tx_for_enqueue = transport_event_tx.clone();
|
||||
let writer_tx_for_enqueue = writer_tx.clone();
|
||||
let enqueue_handle = tokio::spawn(async move {
|
||||
enqueue_incoming_message(
|
||||
&transport_event_tx_for_enqueue,
|
||||
&writer_tx_for_enqueue,
|
||||
connection_id,
|
||||
response,
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should be dequeued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let enqueue_result = enqueue_handle.await.expect("enqueue task should not panic");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let forwarded_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("response should be forwarded instead of dropped");
|
||||
match forwarded_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message: JSONRPCMessage::Response(JSONRPCResponse { id, result }),
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(id, RequestId::Integer(7));
|
||||
assert_eq!(result, json!({"ok": true}));
|
||||
}
|
||||
_ => panic!("expected forwarded response message"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_does_not_block_when_writer_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, _transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
}),
|
||||
})
|
||||
.await
|
||||
.expect("transport queue should accept first message");
|
||||
|
||||
writer_tx
|
||||
.send(QueuedOutgoingMessage::new(
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "queued".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
))
|
||||
.await
|
||||
.expect("writer queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
|
||||
let enqueue_result = timeout(
|
||||
Duration::from_millis(100),
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request),
|
||||
)
|
||||
.await
|
||||
.expect("enqueue should not block while writer queue is full");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let queued_outgoing = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("writer queue should still contain original message");
|
||||
let queued_json =
|
||||
serde_json::to_value(queued_outgoing.message).expect("serialize queued message");
|
||||
assert_eq!(
|
||||
queued_json,
|
||||
json!({
|
||||
"method": "configWarning",
|
||||
"params": {
|
||||
"summary": "queued",
|
||||
"details": null,
|
||||
},
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,7 @@ axum = { workspace = true, default-features = false, features = [
|
||||
"ws",
|
||||
] }
|
||||
codex-analytics = { workspace = true }
|
||||
codex-api = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-cloud-requirements = { workspace = true }
|
||||
codex-config = { workspace = true }
|
||||
@@ -57,7 +58,6 @@ codex-model-provider = { workspace = true }
|
||||
codex-models-manager = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-app-server-transport = { workspace = true }
|
||||
codex-feedback = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-rollout = { workspace = true }
|
||||
@@ -65,11 +65,18 @@ codex-sandboxing = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-thread-store = { workspace = true }
|
||||
codex-tools = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-json-to-toml = { workspace = true }
|
||||
codex-utils-rustls-provider = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
constant_time_eq = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
@@ -86,6 +93,7 @@ tokio = { workspace = true, features = [
|
||||
"signal",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] }
|
||||
url = { workspace = true }
|
||||
@@ -103,7 +111,6 @@ core_test_support = { workspace = true }
|
||||
codex-model-provider-info = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
flate2 = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
opentelemetry = { workspace = true }
|
||||
opentelemetry_sdk = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -203,7 +203,6 @@ Example with notification opt-out:
|
||||
- `marketplace/upgrade` — upgrade all configured Git plugin marketplaces, or one named marketplace when `marketplaceName` is provided. Returns selected marketplace names, upgraded roots, and per-marketplace errors.
|
||||
- `plugin/list` — list discovered plugin marketplaces and plugin state, including effective marketplace install/auth policy metadata, plugin `availability` (`AVAILABLE` by default or `DISABLED_BY_ADMIN` for remote plugins blocked upstream), fail-open `marketplaceLoadErrors` entries for marketplace files that could not be parsed or loaded, and best-effort `featuredPluginIds` for the official curated marketplace. `interface.category` uses the marketplace category when present; otherwise it falls back to the plugin manifest category (**under development; do not call from production clients yet**).
|
||||
- `plugin/read` — read one plugin by `marketplacePath` plus `pluginName`, returning marketplace info, a list-style `summary`, manifest descriptions/interface metadata, and bundled skills/apps/MCP server names. Returned plugin skills include their current `enabled` state after local config filtering. Plugin app summaries also include `needsAuth` when the server can determine connector accessibility (**under development; do not call from production clients yet**).
|
||||
- `plugin/skill/read` — read remote plugin skill markdown on demand by `remoteMarketplaceName`, `remotePluginId`, and `skillName`. This lets clients preview uninstalled remote plugin skills without downloading the plugin bundle.
|
||||
- `skills/changed` — notification emitted when watched local skill files change.
|
||||
- `app/list` — list available apps.
|
||||
- `device/key/create` — create or load a controller-local device signing key for an account/client binding. This local-key API is available only over local transports such as stdio and in-process; remote transports reject it. Hardware-backed providers are the target protection class; an OS-protected non-extractable fallback is allowed only with `protectionPolicy: "allow_os_protected_nonextractable"` and returns the reported `protectionClass`.
|
||||
@@ -307,6 +306,8 @@ To branch from a stored session, call `thread/fork` with the `thread.id`. This c
|
||||
|
||||
Like `thread/resume`, experimental clients can pass `excludeTurns: true` to `thread/fork` to return only thread metadata in `thread.turns` and page history with `thread/turns/list`. In that mode the server skips replaying restored `thread/tokenUsage/updated`, which keeps the fork path from rebuilding turns just to attribute historical usage.
|
||||
|
||||
Experimental API: `thread/start`, `thread/resume`, and `thread/fork` accept `persistExtendedHistory: true` to persist a richer subset of ThreadItems for non-lossy history when calling `thread/read`, `thread/resume`, and `thread/fork` later. This does not backfill events that were not persisted previously.
|
||||
|
||||
### Example: List threads (with pagination & filters)
|
||||
|
||||
`thread/list` lets you render a history UI. Results default to `createdAt` (newest first) descending. Pass any combination of:
|
||||
@@ -401,7 +402,7 @@ Later, after the idle unload timeout:
|
||||
|
||||
### Example: Read a thread
|
||||
|
||||
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want thread history loaded into `thread.turns`. The returned thread includes `agentNickname` and `agentRole` for AgentControl-spawned thread sub-agents when available.
|
||||
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want the full rollout history loaded into `thread.turns`. The returned thread includes `agentNickname` and `agentRole` for AgentControl-spawned thread sub-agents when available.
|
||||
|
||||
```json
|
||||
{ "method": "thread/read", "id": 22, "params": { "threadId": "thr_123" } }
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
use crate::codex_message_processor::read_rollout_items_from_rollout;
|
||||
use crate::codex_message_processor::read_summary_from_rollout;
|
||||
use crate::codex_message_processor::summary_to_thread;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::outgoing_message::ClientRequestResult;
|
||||
use crate::outgoing_message::ThreadScopedOutgoingMessageSender;
|
||||
use crate::request_processors::build_api_turns_from_rollout_items;
|
||||
use crate::request_processors::read_rollout_items_from_rollout;
|
||||
use crate::request_processors::read_summary_from_rollout;
|
||||
use crate::request_processors::summary_to_thread;
|
||||
use crate::server_request_error::is_turn_transition_server_request_error;
|
||||
use crate::thread_state::ThreadState;
|
||||
use crate::thread_state::TurnSummary;
|
||||
@@ -30,6 +29,7 @@ use codex_app_server_protocol::ExecPolicyAmendment as V2ExecPolicyAmendment;
|
||||
use codex_app_server_protocol::FileChangeApprovalDecision;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalParams;
|
||||
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
|
||||
use codex_app_server_protocol::FileUpdateChange;
|
||||
use codex_app_server_protocol::GrantedPermissionProfile as V2GrantedPermissionProfile;
|
||||
use codex_app_server_protocol::GuardianWarningNotification;
|
||||
use codex_app_server_protocol::HookCompletedNotification;
|
||||
@@ -46,6 +46,7 @@ use codex_app_server_protocol::ModelVerificationNotification;
|
||||
use codex_app_server_protocol::NetworkApprovalContext as V2NetworkApprovalContext;
|
||||
use codex_app_server_protocol::NetworkPolicyAmendment as V2NetworkPolicyAmendment;
|
||||
use codex_app_server_protocol::NetworkPolicyRuleAction as V2NetworkPolicyRuleAction;
|
||||
use codex_app_server_protocol::PatchApplyStatus;
|
||||
use codex_app_server_protocol::PermissionsRequestApprovalParams;
|
||||
use codex_app_server_protocol::PermissionsRequestApprovalResponse;
|
||||
use codex_app_server_protocol::RawResponseItemCompletedNotification;
|
||||
@@ -81,7 +82,11 @@ use codex_app_server_protocol::TurnPlanUpdatedNotification;
|
||||
use codex_app_server_protocol::TurnStartedNotification;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::WarningNotification;
|
||||
use codex_app_server_protocol::build_file_change_approval_request_item;
|
||||
use codex_app_server_protocol::build_file_change_end_item;
|
||||
use codex_app_server_protocol::build_item_from_guardian_event;
|
||||
use codex_app_server_protocol::build_turns_from_rollout_items;
|
||||
use codex_app_server_protocol::convert_patch_changes;
|
||||
use codex_app_server_protocol::guardian_auto_approval_review_notification;
|
||||
use codex_app_server_protocol::item_event_to_server_notification;
|
||||
use codex_core::CodexThread;
|
||||
@@ -519,7 +524,28 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let permission_guard = thread_watch_manager
|
||||
.note_permission_requested(&conversation_id.to_string())
|
||||
.await;
|
||||
// Until we migrate the core to be aware of a first class FileChangeItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
let item_id = event.call_id.clone();
|
||||
let patch_changes = convert_patch_changes(&event.changes);
|
||||
let first_start = {
|
||||
let mut state = thread_state.lock().await;
|
||||
state
|
||||
.turn_summary
|
||||
.file_change_started
|
||||
.insert(item_id.clone())
|
||||
};
|
||||
if first_start {
|
||||
let item = build_file_change_approval_request_item(&event);
|
||||
let notification = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemStarted(notification))
|
||||
.await;
|
||||
}
|
||||
|
||||
let params = FileChangeRequestApprovalParams {
|
||||
thread_id: conversation_id.to_string(),
|
||||
@@ -533,10 +559,14 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
.await;
|
||||
tokio::spawn(async move {
|
||||
on_file_change_request_approval_response(
|
||||
event_turn_id,
|
||||
conversation_id,
|
||||
item_id,
|
||||
patch_changes,
|
||||
pending_request_id,
|
||||
rx,
|
||||
conversation,
|
||||
outgoing,
|
||||
thread_state.clone(),
|
||||
permission_guard,
|
||||
)
|
||||
@@ -836,11 +866,9 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
crate::dynamic_tools::on_call_response(call_id, rx, conversation).await;
|
||||
});
|
||||
}
|
||||
EventMsg::McpToolCallBegin(_) | EventMsg::McpToolCallEnd(_) => {
|
||||
// Deprecated MCP tool-call events are still fanned out for legacy clients.
|
||||
// App-server v2 receives the canonical TurnItem::McpToolCall lifecycle instead.
|
||||
}
|
||||
msg @ (EventMsg::DynamicToolCallResponse(_)
|
||||
| EventMsg::McpToolCallBegin(_)
|
||||
| EventMsg::McpToolCallEnd(_)
|
||||
| EventMsg::CollabAgentSpawnBegin(_)
|
||||
| EventMsg::CollabAgentSpawnEnd(_)
|
||||
| EventMsg::CollabAgentInteractionBegin(_)
|
||||
@@ -956,7 +984,28 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
EventMsg::ViewImageToolCall(_) => {}
|
||||
EventMsg::ViewImageToolCall(view_image_event) => {
|
||||
let item = ThreadItem::ImageView {
|
||||
id: view_image_event.call_id.clone(),
|
||||
path: view_image_event.path.clone(),
|
||||
};
|
||||
let started = ItemStartedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
item: item.clone(),
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemStarted(started))
|
||||
.await;
|
||||
let completed = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: event_turn_id.clone(),
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemCompleted(completed))
|
||||
.await;
|
||||
}
|
||||
EventMsg::EnteredReviewMode(review_request) => {
|
||||
let review = review_request
|
||||
.user_facing_hint
|
||||
@@ -1055,9 +1104,40 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
EventMsg::PatchApplyBegin(_) | EventMsg::PatchApplyEnd(_) => {
|
||||
// Core still fans out these deprecated events for legacy clients;
|
||||
// v2 clients receive the canonical FileChange item instead.
|
||||
EventMsg::PatchApplyBegin(patch_begin_event) => {
|
||||
// Until we migrate the core to be aware of a first class FileChangeItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
let item_id = patch_begin_event.call_id.clone();
|
||||
|
||||
let first_start = {
|
||||
let mut state = thread_state.lock().await;
|
||||
state
|
||||
.turn_summary
|
||||
.file_change_started
|
||||
.insert(item_id.clone())
|
||||
};
|
||||
if first_start {
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::PatchApplyBegin(patch_begin_event),
|
||||
&conversation_id.to_string(),
|
||||
&event_turn_id,
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
}
|
||||
}
|
||||
EventMsg::PatchApplyEnd(patch_end_event) => {
|
||||
// Until we migrate the core to be aware of a first class FileChangeItem
|
||||
// and emit the corresponding EventMsg, we repurpose the call_id as the item_id.
|
||||
let item_id = patch_end_event.call_id.clone();
|
||||
complete_file_change_item(
|
||||
conversation_id,
|
||||
item_id,
|
||||
build_file_change_end_item(&patch_end_event),
|
||||
event_turn_id.clone(),
|
||||
&outgoing,
|
||||
&thread_state,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
EventMsg::ExecCommandBegin(exec_command_begin_event) => {
|
||||
if matches!(
|
||||
@@ -1179,7 +1259,7 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
let mut thread = summary_to_thread(summary, &fallback_cwd);
|
||||
match read_rollout_items_from_rollout(rollout_path.as_path()).await {
|
||||
Ok(items) => {
|
||||
thread.turns = build_api_turns_from_rollout_items(&items);
|
||||
thread.turns = build_turns_from_rollout_items(&items);
|
||||
thread.status = thread_watch_manager
|
||||
.loaded_status_for_thread(&thread.id)
|
||||
.await;
|
||||
@@ -1345,6 +1425,31 @@ async fn emit_turn_completed_with_status(
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn complete_file_change_item(
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
item: ThreadItem,
|
||||
turn_id: String,
|
||||
outgoing: &ThreadScopedOutgoingMessageSender,
|
||||
thread_state: &Arc<Mutex<ThreadState>>,
|
||||
) {
|
||||
thread_state
|
||||
.lock()
|
||||
.await
|
||||
.turn_summary
|
||||
.file_change_started
|
||||
.remove(&item_id);
|
||||
|
||||
let notification = ItemCompletedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id,
|
||||
item,
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ItemCompleted(notification))
|
||||
.await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn start_command_execution_item(
|
||||
conversation_id: &ThreadId,
|
||||
@@ -1897,28 +2002,38 @@ fn render_review_output_text(output: &ReviewOutputEvent) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn map_file_change_approval_decision(decision: FileChangeApprovalDecision) -> ReviewDecision {
|
||||
fn map_file_change_approval_decision(
|
||||
decision: FileChangeApprovalDecision,
|
||||
) -> (ReviewDecision, Option<PatchApplyStatus>) {
|
||||
match decision {
|
||||
FileChangeApprovalDecision::Accept => ReviewDecision::Approved,
|
||||
FileChangeApprovalDecision::AcceptForSession => ReviewDecision::ApprovedForSession,
|
||||
FileChangeApprovalDecision::Decline => ReviewDecision::Denied,
|
||||
FileChangeApprovalDecision::Cancel => ReviewDecision::Abort,
|
||||
FileChangeApprovalDecision::Accept => (ReviewDecision::Approved, None),
|
||||
FileChangeApprovalDecision::AcceptForSession => (ReviewDecision::ApprovedForSession, None),
|
||||
FileChangeApprovalDecision::Decline => {
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
FileChangeApprovalDecision::Cancel => {
|
||||
(ReviewDecision::Abort, Some(PatchApplyStatus::Declined))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn on_file_change_request_approval_response(
|
||||
event_turn_id: String,
|
||||
conversation_id: ThreadId,
|
||||
item_id: String,
|
||||
changes: Vec<FileUpdateChange>,
|
||||
pending_request_id: RequestId,
|
||||
receiver: oneshot::Receiver<ClientRequestResult>,
|
||||
codex: Arc<CodexThread>,
|
||||
outgoing: ThreadScopedOutgoingMessageSender,
|
||||
thread_state: Arc<Mutex<ThreadState>>,
|
||||
permission_guard: ThreadWatchActiveGuard,
|
||||
) {
|
||||
let response = receiver.await;
|
||||
resolve_server_request_on_thread_listener(&thread_state, pending_request_id).await;
|
||||
drop(permission_guard);
|
||||
let decision = match response {
|
||||
let (decision, completion_status) = match response {
|
||||
Ok(Ok(value)) => {
|
||||
let response = serde_json::from_value::<FileChangeRequestApprovalResponse>(value)
|
||||
.unwrap_or_else(|err| {
|
||||
@@ -1928,19 +2043,39 @@ async fn on_file_change_request_approval_response(
|
||||
}
|
||||
});
|
||||
|
||||
map_file_change_approval_decision(response.decision)
|
||||
let (decision, completion_status) =
|
||||
map_file_change_approval_decision(response.decision);
|
||||
// Allow EventMsg::PatchApplyEnd to emit ItemCompleted for accepted patches.
|
||||
// Only short-circuit on declines/cancels/failures.
|
||||
(decision, completion_status)
|
||||
}
|
||||
Ok(Err(err)) if is_turn_transition_server_request_error(&err) => return,
|
||||
Ok(Err(err)) => {
|
||||
error!("request failed with client error: {err:?}");
|
||||
ReviewDecision::Denied
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Failed))
|
||||
}
|
||||
Err(err) => {
|
||||
error!("request failed: {err:?}");
|
||||
ReviewDecision::Denied
|
||||
(ReviewDecision::Denied, Some(PatchApplyStatus::Failed))
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(status) = completion_status {
|
||||
complete_file_change_item(
|
||||
conversation_id,
|
||||
item_id.clone(),
|
||||
ThreadItem::FileChange {
|
||||
id: item_id.clone(),
|
||||
changes,
|
||||
status,
|
||||
},
|
||||
event_turn_id.clone(),
|
||||
&outgoing,
|
||||
&thread_state,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Err(err) = codex
|
||||
.submit(Op::PatchApproval {
|
||||
id: item_id,
|
||||
@@ -2552,7 +2687,12 @@ mod tests {
|
||||
thread_id: conversation_id,
|
||||
thread: conversation,
|
||||
..
|
||||
} = thread_manager.start_thread(config.clone()).await?;
|
||||
} = thread_manager
|
||||
.start_thread(
|
||||
config.clone(),
|
||||
codex_core::thread_store_from_config(&config),
|
||||
)
|
||||
.await?;
|
||||
let thread_state = new_thread_state();
|
||||
let thread_watch_manager = ThreadWatchManager::new();
|
||||
let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY);
|
||||
@@ -2751,9 +2891,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn file_change_accept_for_session_maps_to_approved_for_session() {
|
||||
let decision =
|
||||
let (decision, completion_status) =
|
||||
map_file_change_approval_decision(FileChangeApprovalDecision::AcceptForSession);
|
||||
assert_eq!(decision, ReviewDecision::ApprovedForSession);
|
||||
assert_eq!(completion_status, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
11116
codex-rs/app-server/src/codex_message_processor.rs
Normal file
11116
codex-rs/app-server/src/codex_message_processor.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,66 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::AppsListResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_chatgpt::connectors;
|
||||
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
|
||||
pub(super) fn merge_loaded_apps(
|
||||
all_connectors: Option<&[AppInfo]>,
|
||||
accessible_connectors: Option<&[AppInfo]>,
|
||||
) -> Vec<AppInfo> {
|
||||
let all_connectors_loaded = all_connectors.is_some();
|
||||
let all = all_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
let accessible = accessible_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
connectors::merge_connectors_with_accessible(all, accessible, all_connectors_loaded)
|
||||
}
|
||||
|
||||
pub(super) fn should_send_app_list_updated_notification(
|
||||
connectors: &[AppInfo],
|
||||
accessible_loaded: bool,
|
||||
all_loaded: bool,
|
||||
) -> bool {
|
||||
connectors.iter().any(|connector| connector.is_accessible) || (accessible_loaded && all_loaded)
|
||||
}
|
||||
|
||||
pub(super) fn paginate_apps(
|
||||
connectors: &[AppInfo],
|
||||
start: usize,
|
||||
limit: Option<u32>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let total = connectors.len();
|
||||
if start > total {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("cursor {start} exceeds total apps {total}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = connectors[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(AppsListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
pub(super) async fn send_app_list_updated_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
data: Vec<AppInfo>,
|
||||
) {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
}
|
||||
@@ -0,0 +1,149 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppSummary;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_core::config::Config;
|
||||
use codex_exec_server::EnvironmentManager;
|
||||
use codex_plugin::AppConnectorId;
|
||||
use tracing::warn;
|
||||
|
||||
pub(super) async fn load_plugin_app_summaries(
|
||||
config: &Config,
|
||||
plugin_apps: &[AppConnectorId],
|
||||
environment_manager: &EnvironmentManager,
|
||||
) -> Vec<AppSummary> {
|
||||
if plugin_apps.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let connectors =
|
||||
match connectors::list_all_connectors_with_options(config, /*force_refetch*/ false).await {
|
||||
Ok(connectors) => connectors,
|
||||
Err(err) => {
|
||||
warn!("failed to load app metadata for plugin/read: {err:#}");
|
||||
connectors::list_cached_all_connectors(config)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let plugin_connectors = connectors::connectors_for_plugin_apps(connectors, plugin_apps);
|
||||
|
||||
let accessible_connectors =
|
||||
match connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
config,
|
||||
/*force_refetch*/ false,
|
||||
environment_manager,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(status) if status.codex_apps_ready => status.connectors,
|
||||
Ok(_) => {
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("failed to load app auth state for plugin/read: {err:#}");
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
};
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
plugin_connectors
|
||||
.into_iter()
|
||||
.map(|connector| {
|
||||
let needs_auth = !accessible_ids.contains(connector.id.as_str());
|
||||
AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(super) fn plugin_apps_needing_auth(
|
||||
all_connectors: &[AppInfo],
|
||||
accessible_connectors: &[AppInfo],
|
||||
plugin_apps: &[AppConnectorId],
|
||||
codex_apps_ready: bool,
|
||||
) -> Vec<AppSummary> {
|
||||
if !codex_apps_ready {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
let plugin_app_ids = plugin_apps
|
||||
.iter()
|
||||
.map(|connector_id| connector_id.0.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
all_connectors
|
||||
.iter()
|
||||
.filter(|connector| {
|
||||
plugin_app_ids.contains(connector.id.as_str())
|
||||
&& !accessible_ids.contains(connector.id.as_str())
|
||||
})
|
||||
.cloned()
|
||||
.map(|connector| AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth: true,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_plugin::AppConnectorId;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::plugin_apps_needing_auth;
|
||||
|
||||
#[test]
|
||||
fn plugin_apps_needing_auth_returns_empty_when_codex_apps_is_not_ready() {
|
||||
let all_connectors = vec![AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha connector".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
branding: None,
|
||||
app_metadata: None,
|
||||
labels: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
plugin_display_names: Vec::new(),
|
||||
}];
|
||||
|
||||
assert_eq!(
|
||||
plugin_apps_needing_auth(
|
||||
&all_connectors,
|
||||
&[],
|
||||
&[AppConnectorId("alpha".to_string())],
|
||||
/*codex_apps_ready*/ false,
|
||||
),
|
||||
Vec::new()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_config::types::McpServerConfig;
|
||||
use codex_core::config::Config;
|
||||
use codex_mcp::McpOAuthLoginSupport;
|
||||
use codex_mcp::oauth_login_support;
|
||||
use codex_mcp::resolve_oauth_scopes;
|
||||
use codex_mcp::should_retry_without_scopes;
|
||||
use codex_rmcp_client::perform_oauth_login_silent;
|
||||
use tracing::warn;
|
||||
|
||||
use super::CodexMessageProcessor;
|
||||
|
||||
impl CodexMessageProcessor {
|
||||
pub(super) async fn start_plugin_mcp_oauth_logins(
|
||||
&self,
|
||||
config: &Config,
|
||||
plugin_mcp_servers: HashMap<String, McpServerConfig>,
|
||||
) {
|
||||
for (name, server) in plugin_mcp_servers {
|
||||
let oauth_config = match oauth_login_support(&server.transport).await {
|
||||
McpOAuthLoginSupport::Supported(config) => config,
|
||||
McpOAuthLoginSupport::Unsupported => continue,
|
||||
McpOAuthLoginSupport::Unknown(err) => {
|
||||
warn!(
|
||||
"MCP server may or may not require login for plugin install {name}: {err}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let resolved_scopes = resolve_oauth_scopes(
|
||||
/*explicit_scopes*/ None,
|
||||
server.scopes.clone(),
|
||||
oauth_config.discovered_scopes.clone(),
|
||||
);
|
||||
|
||||
let store_mode = config.mcp_oauth_credentials_store_mode;
|
||||
let callback_port = config.mcp_oauth_callback_port;
|
||||
let callback_url = config.mcp_oauth_callback_url.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let notification_name = name.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let first_attempt = perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers.clone(),
|
||||
oauth_config.env_http_headers.clone(),
|
||||
&resolved_scopes.scopes,
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let final_result = match first_attempt {
|
||||
Err(err) if should_retry_without_scopes(&resolved_scopes, &err) => {
|
||||
perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers,
|
||||
oauth_config.env_http_headers,
|
||||
&[],
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
result => result,
|
||||
};
|
||||
|
||||
let (success, error) = match final_result {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,249 +3,15 @@ use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use codex_app_server_protocol::PluginAvailability;
|
||||
use codex_app_server_protocol::PluginInstallPolicy;
|
||||
use codex_config::types::McpServerConfig;
|
||||
use codex_core_plugins::remote::is_valid_remote_plugin_id;
|
||||
use codex_core_plugins::remote::validate_remote_plugin_id;
|
||||
use codex_mcp::McpOAuthLoginSupport;
|
||||
use codex_mcp::oauth_login_support;
|
||||
use codex_mcp::should_retry_without_scopes;
|
||||
use codex_rmcp_client::perform_oauth_login_silent;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct PluginRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
fn plugin_skills_to_info(
|
||||
skills: &[codex_core::skills::SkillMetadata],
|
||||
disabled_skill_paths: &HashSet<AbsolutePathBuf>,
|
||||
) -> Vec<SkillSummary> {
|
||||
skills
|
||||
.iter()
|
||||
.map(|skill| SkillSummary {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
short_description: skill.short_description.clone(),
|
||||
interface: skill.interface.clone().map(|interface| {
|
||||
codex_app_server_protocol::SkillInterface {
|
||||
display_name: interface.display_name,
|
||||
short_description: interface.short_description,
|
||||
icon_small: interface.icon_small,
|
||||
icon_large: interface.icon_large,
|
||||
brand_color: interface.brand_color,
|
||||
default_prompt: interface.default_prompt,
|
||||
}
|
||||
}),
|
||||
path: Some(skill.path_to_skills_md.clone()),
|
||||
enabled: !disabled_skill_paths.contains(&skill.path_to_skills_md),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn local_plugin_interface_to_info(interface: PluginManifestInterface) -> PluginInterface {
|
||||
PluginInterface {
|
||||
display_name: interface.display_name,
|
||||
short_description: interface.short_description,
|
||||
long_description: interface.long_description,
|
||||
developer_name: interface.developer_name,
|
||||
category: interface.category,
|
||||
capabilities: interface.capabilities,
|
||||
website_url: interface.website_url,
|
||||
privacy_policy_url: interface.privacy_policy_url,
|
||||
terms_of_service_url: interface.terms_of_service_url,
|
||||
default_prompt: interface.default_prompt,
|
||||
brand_color: interface.brand_color,
|
||||
composer_icon: interface.composer_icon,
|
||||
composer_icon_url: None,
|
||||
logo: interface.logo,
|
||||
logo_url: None,
|
||||
screenshots: interface.screenshots,
|
||||
screenshot_urls: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn marketplace_plugin_source_to_info(source: MarketplacePluginSource) -> PluginSource {
|
||||
match source {
|
||||
MarketplacePluginSource::Local { path } => PluginSource::Local { path },
|
||||
MarketplacePluginSource::Git {
|
||||
url,
|
||||
path,
|
||||
ref_name,
|
||||
sha,
|
||||
} => PluginSource::Git {
|
||||
url,
|
||||
path,
|
||||
ref_name,
|
||||
sha,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl PluginRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
analytics_events_client,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_list(
|
||||
impl CodexMessageProcessor {
|
||||
pub(super) async fn plugin_list(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_read(
|
||||
&self,
|
||||
params: PluginReadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_read_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_skill_read(
|
||||
&self,
|
||||
params: PluginSkillReadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_skill_read_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_share_save(
|
||||
&self,
|
||||
params: PluginShareSaveParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_share_save_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_share_list(
|
||||
&self,
|
||||
params: PluginShareListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_share_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_share_delete(
|
||||
&self,
|
||||
params: PluginShareDeleteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_share_delete_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_install(
|
||||
&self,
|
||||
params: PluginInstallParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_install_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn plugin_uninstall(
|
||||
&self,
|
||||
params: PluginUninstallParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.plugin_uninstall_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) fn effective_plugins_changed_callback(
|
||||
&self,
|
||||
config: Config,
|
||||
) -> Arc<dyn Fn() + Send + Sync> {
|
||||
let thread_manager = Arc::clone(&self.thread_manager);
|
||||
Arc::new(move || {
|
||||
Self::spawn_effective_plugins_changed_task(Arc::clone(&thread_manager), config.clone());
|
||||
})
|
||||
}
|
||||
|
||||
fn on_effective_plugins_changed(&self, config: Config) {
|
||||
Self::spawn_effective_plugins_changed_task(Arc::clone(&self.thread_manager), config);
|
||||
}
|
||||
|
||||
fn spawn_effective_plugins_changed_task(thread_manager: Arc<ThreadManager>, config: Config) {
|
||||
tokio::spawn(async move {
|
||||
thread_manager.plugins_manager().clear_cache();
|
||||
thread_manager.skills_manager().clear_cache();
|
||||
if thread_manager.list_thread_ids().await.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Err(err) =
|
||||
McpRequestProcessor::queue_mcp_server_refresh_for_config(&thread_manager, &config)
|
||||
.await
|
||||
{
|
||||
warn!("failed to queue MCP refresh after effective plugins changed: {err:?}");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn clear_plugin_related_caches(&self) {
|
||||
self.thread_manager.plugins_manager().clear_cache();
|
||||
self.thread_manager.skills_manager().clear_cache();
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
) {
|
||||
let result = self.plugin_list_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_list_response(
|
||||
@@ -405,6 +171,15 @@ impl PluginRequestProcessor {
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_read(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginReadParams,
|
||||
) {
|
||||
let result = self.plugin_read_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_read_response(
|
||||
&self,
|
||||
params: PluginReadParams,
|
||||
@@ -442,9 +217,12 @@ impl PluginRequestProcessor {
|
||||
.await
|
||||
.map_err(|err| Self::marketplace_error(err, "read plugin details"))?;
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let app_summaries =
|
||||
load_plugin_app_summaries(&config, &outcome.plugin.apps, &environment_manager)
|
||||
.await;
|
||||
let app_summaries = plugin_app_helpers::load_plugin_app_summaries(
|
||||
&config,
|
||||
&outcome.plugin.apps,
|
||||
&environment_manager,
|
||||
)
|
||||
.await;
|
||||
let visible_skills = outcome
|
||||
.plugin
|
||||
.skills
|
||||
@@ -483,15 +261,15 @@ impl PluginRequestProcessor {
|
||||
if !config.features.enabled(Feature::Plugins)
|
||||
|| !config.features.enabled(Feature::RemotePlugin)
|
||||
{
|
||||
return Err(invalid_request(format!(
|
||||
"remote plugin read is not enabled for marketplace {remote_marketplace_name}"
|
||||
)));
|
||||
return Err(invalid_request("remote plugin read is not enabled"));
|
||||
}
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
chatgpt_base_url: config.chatgpt_base_url.clone(),
|
||||
};
|
||||
validate_remote_plugin_id(&plugin_name)?;
|
||||
if plugin_name.is_empty() || !is_valid_remote_plugin_id(&plugin_name) {
|
||||
return Err(invalid_request("invalid remote plugin id"));
|
||||
}
|
||||
let remote_detail = codex_core_plugins::remote::fetch_remote_plugin_detail(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
@@ -509,8 +287,12 @@ impl PluginRequestProcessor {
|
||||
.map(codex_plugin::AppConnectorId)
|
||||
.collect::<Vec<_>>();
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let app_summaries =
|
||||
load_plugin_app_summaries(&config, &plugin_apps, &environment_manager).await;
|
||||
let app_summaries = plugin_app_helpers::load_plugin_app_summaries(
|
||||
&config,
|
||||
&plugin_apps,
|
||||
&environment_manager,
|
||||
)
|
||||
.await;
|
||||
remote_plugin_detail_to_info(remote_detail, app_summaries)
|
||||
}
|
||||
};
|
||||
@@ -518,50 +300,13 @@ impl PluginRequestProcessor {
|
||||
Ok(PluginReadResponse { plugin })
|
||||
}
|
||||
|
||||
async fn plugin_skill_read_response(
|
||||
pub(super) async fn plugin_share_save(
|
||||
&self,
|
||||
params: PluginSkillReadParams,
|
||||
) -> Result<PluginSkillReadResponse, JSONRPCErrorError> {
|
||||
let PluginSkillReadParams {
|
||||
remote_marketplace_name,
|
||||
remote_plugin_id,
|
||||
skill_name,
|
||||
} = params;
|
||||
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
if !config.features.enabled(Feature::Plugins)
|
||||
|| !config.features.enabled(Feature::RemotePlugin)
|
||||
{
|
||||
return Err(invalid_request(format!(
|
||||
"remote plugin skill read is not enabled for marketplace {remote_marketplace_name}"
|
||||
)));
|
||||
}
|
||||
validate_remote_plugin_id(&remote_plugin_id)?;
|
||||
if skill_name.is_empty() {
|
||||
return Err(invalid_request(
|
||||
"invalid remote plugin skill name: cannot be empty",
|
||||
));
|
||||
}
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
chatgpt_base_url: config.chatgpt_base_url.clone(),
|
||||
};
|
||||
let remote_skill_detail = codex_core_plugins::remote::fetch_remote_plugin_skill_detail(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
&remote_marketplace_name,
|
||||
&remote_plugin_id,
|
||||
&skill_name,
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
remote_plugin_catalog_error_to_jsonrpc(err, "read remote plugin skill details")
|
||||
})?;
|
||||
|
||||
Ok(PluginSkillReadResponse {
|
||||
contents: remote_skill_detail.contents,
|
||||
})
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginShareSaveParams,
|
||||
) {
|
||||
let result = self.plugin_share_save_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_share_save_response(
|
||||
@@ -585,23 +330,29 @@ impl PluginRequestProcessor {
|
||||
let result = codex_core_plugins::remote::save_remote_plugin_share(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
config.codex_home.as_path(),
|
||||
&plugin_path,
|
||||
plugin_path.as_path(),
|
||||
remote_plugin_id.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| remote_plugin_catalog_error_to_jsonrpc(err, "save remote plugin share"))?;
|
||||
let remote_plugin_id = result.remote_plugin_id;
|
||||
self.clear_plugin_related_caches();
|
||||
Ok(PluginShareSaveResponse {
|
||||
remote_plugin_id,
|
||||
remote_plugin_id: result.remote_plugin_id,
|
||||
share_url: result.share_url.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_share_list(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
_params: PluginShareListParams,
|
||||
) {
|
||||
let result = self.plugin_share_list_response().await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_share_list_response(
|
||||
&self,
|
||||
_params: PluginShareListParams,
|
||||
) -> Result<PluginShareListResponse, JSONRPCErrorError> {
|
||||
let (config, auth) = self.load_plugin_share_config_and_auth().await?;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
@@ -610,28 +361,24 @@ impl PluginRequestProcessor {
|
||||
let data = codex_core_plugins::remote::list_remote_plugin_shares(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
config.codex_home.as_path(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| remote_plugin_catalog_error_to_jsonrpc(err, "list remote plugin shares"))?
|
||||
.into_iter()
|
||||
.map(|summary| {
|
||||
let RemoteCatalogPluginShareSummary {
|
||||
summary,
|
||||
share_url,
|
||||
local_plugin_path,
|
||||
} = summary;
|
||||
let plugin = remote_plugin_summary_to_info(summary);
|
||||
PluginShareListItem {
|
||||
plugin,
|
||||
share_url: share_url.unwrap_or_default(),
|
||||
local_plugin_path,
|
||||
}
|
||||
})
|
||||
.map(remote_plugin_summary_to_info)
|
||||
.collect();
|
||||
Ok(PluginShareListResponse { data })
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_share_delete(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginShareDeleteParams,
|
||||
) {
|
||||
let result = self.plugin_share_delete_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_share_delete_response(
|
||||
&self,
|
||||
params: PluginShareDeleteParams,
|
||||
@@ -648,7 +395,6 @@ impl PluginRequestProcessor {
|
||||
codex_core_plugins::remote::delete_remote_plugin_share(
|
||||
&remote_plugin_service_config,
|
||||
auth.as_ref(),
|
||||
config.codex_home.as_path(),
|
||||
&remote_plugin_id,
|
||||
)
|
||||
.await
|
||||
@@ -670,6 +416,15 @@ impl PluginRequestProcessor {
|
||||
Ok((config, auth))
|
||||
}
|
||||
|
||||
pub(super) async fn plugin_install(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginInstallParams,
|
||||
) {
|
||||
let result = self.plugin_install_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_install_response(
|
||||
&self,
|
||||
params: PluginInstallParams,
|
||||
@@ -759,11 +514,13 @@ impl PluginRequestProcessor {
|
||||
if !config.features.enabled(Feature::Plugins)
|
||||
|| !config.features.enabled(Feature::RemotePlugin)
|
||||
{
|
||||
return Err(invalid_request(format!(
|
||||
"remote plugin install is not enabled for marketplace {remote_marketplace_name}"
|
||||
)));
|
||||
return Err(invalid_request("remote plugin install is not enabled"));
|
||||
}
|
||||
if remote_plugin_id.is_empty() || !is_valid_remote_plugin_id(&remote_plugin_id) {
|
||||
return Err(invalid_request(
|
||||
"invalid remote plugin id: only ASCII letters, digits, `_`, `-`, and `~` are allowed",
|
||||
));
|
||||
}
|
||||
validate_remote_plugin_id(&remote_plugin_id)?;
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
@@ -923,7 +680,7 @@ impl PluginRequestProcessor {
|
||||
);
|
||||
}
|
||||
|
||||
plugin_apps_needing_auth(
|
||||
plugin_app_helpers::plugin_apps_needing_auth(
|
||||
&all_connectors,
|
||||
&accessible_connectors,
|
||||
plugin_apps,
|
||||
@@ -931,82 +688,13 @@ impl PluginRequestProcessor {
|
||||
)
|
||||
}
|
||||
|
||||
async fn start_plugin_mcp_oauth_logins(
|
||||
pub(super) async fn plugin_uninstall(
|
||||
&self,
|
||||
config: &Config,
|
||||
plugin_mcp_servers: HashMap<String, McpServerConfig>,
|
||||
request_id: ConnectionRequestId,
|
||||
params: PluginUninstallParams,
|
||||
) {
|
||||
for (name, server) in plugin_mcp_servers {
|
||||
let oauth_config = match oauth_login_support(&server.transport).await {
|
||||
McpOAuthLoginSupport::Supported(config) => config,
|
||||
McpOAuthLoginSupport::Unsupported => continue,
|
||||
McpOAuthLoginSupport::Unknown(err) => {
|
||||
warn!(
|
||||
"MCP server may or may not require login for plugin install {name}: {err}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let resolved_scopes = resolve_oauth_scopes(
|
||||
/*explicit_scopes*/ None,
|
||||
server.scopes.clone(),
|
||||
oauth_config.discovered_scopes.clone(),
|
||||
);
|
||||
|
||||
let store_mode = config.mcp_oauth_credentials_store_mode;
|
||||
let callback_port = config.mcp_oauth_callback_port;
|
||||
let callback_url = config.mcp_oauth_callback_url.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let notification_name = name.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let first_attempt = perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers.clone(),
|
||||
oauth_config.env_http_headers.clone(),
|
||||
&resolved_scopes.scopes,
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let final_result = match first_attempt {
|
||||
Err(err) if should_retry_without_scopes(&resolved_scopes, &err) => {
|
||||
perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers,
|
||||
oauth_config.env_http_headers,
|
||||
&[],
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
result => result,
|
||||
};
|
||||
|
||||
let (success, error) = match final_result {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
}
|
||||
let result = self.plugin_uninstall_response(params).await;
|
||||
self.outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn plugin_uninstall_response(
|
||||
@@ -1015,13 +703,13 @@ impl PluginRequestProcessor {
|
||||
) -> Result<PluginUninstallResponse, JSONRPCErrorError> {
|
||||
let PluginUninstallParams { plugin_id } = params;
|
||||
if codex_plugin::PluginId::parse(&plugin_id).is_err()
|
||||
&& !is_valid_remote_uninstall_plugin_id(&plugin_id)
|
||||
&& (plugin_id.is_empty() || !is_valid_remote_plugin_id(&plugin_id))
|
||||
{
|
||||
return Err(invalid_request(
|
||||
"invalid plugin id: expected a local plugin id in the form `plugin@marketplace` or a remote plugin id starting with `plugins~`, `plugins_`, `app_`, `asdk_app_`, or `connector_`",
|
||||
"invalid plugin id: expected a local plugin id or remote plugin id",
|
||||
));
|
||||
}
|
||||
if is_valid_remote_uninstall_plugin_id(&plugin_id) {
|
||||
if !plugin_id.is_empty() && is_valid_remote_plugin_id(&plugin_id) {
|
||||
return self.remote_plugin_uninstall_response(plugin_id).await;
|
||||
}
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
@@ -1112,7 +800,9 @@ impl PluginRequestProcessor {
|
||||
{
|
||||
return Err(invalid_request("remote plugin uninstall is not enabled"));
|
||||
}
|
||||
validate_remote_plugin_id(&plugin_id)?;
|
||||
if plugin_id.is_empty() || !is_valid_remote_plugin_id(&plugin_id) {
|
||||
return Err(invalid_request("invalid remote plugin id"));
|
||||
}
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let remote_plugin_service_config = RemotePluginServiceConfig {
|
||||
@@ -1148,115 +838,10 @@ impl PluginRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_valid_remote_uninstall_plugin_id(plugin_name: &str) -> bool {
|
||||
is_valid_remote_plugin_id(plugin_name)
|
||||
&& (plugin_name.starts_with("plugins~")
|
||||
|| plugin_name.starts_with("plugins_")
|
||||
|| plugin_name.starts_with("app_")
|
||||
|| plugin_name.starts_with("asdk_app_")
|
||||
|| plugin_name.starts_with("connector_"))
|
||||
}
|
||||
|
||||
async fn load_plugin_app_summaries(
|
||||
config: &Config,
|
||||
plugin_apps: &[codex_plugin::AppConnectorId],
|
||||
environment_manager: &EnvironmentManager,
|
||||
) -> Vec<AppSummary> {
|
||||
if plugin_apps.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let connectors =
|
||||
match connectors::list_all_connectors_with_options(config, /*force_refetch*/ false).await {
|
||||
Ok(connectors) => connectors,
|
||||
Err(err) => {
|
||||
warn!("failed to load app metadata for plugin/read: {err:#}");
|
||||
connectors::list_cached_all_connectors(config)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let plugin_connectors = connectors::connectors_for_plugin_apps(connectors, plugin_apps);
|
||||
|
||||
let accessible_connectors =
|
||||
match connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
config,
|
||||
/*force_refetch*/ false,
|
||||
environment_manager,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(status) if status.codex_apps_ready => status.connectors,
|
||||
Ok(_) => {
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("failed to load app auth state for plugin/read: {err:#}");
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
};
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
plugin_connectors
|
||||
.into_iter()
|
||||
.map(|connector| {
|
||||
let needs_auth = !accessible_ids.contains(connector.id.as_str());
|
||||
AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn plugin_apps_needing_auth(
|
||||
all_connectors: &[AppInfo],
|
||||
accessible_connectors: &[AppInfo],
|
||||
plugin_apps: &[codex_plugin::AppConnectorId],
|
||||
codex_apps_ready: bool,
|
||||
) -> Vec<AppSummary> {
|
||||
if !codex_apps_ready {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
let plugin_app_ids = plugin_apps
|
||||
.iter()
|
||||
.map(|connector_id| connector_id.0.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
all_connectors
|
||||
.iter()
|
||||
.filter(|connector| {
|
||||
plugin_app_ids.contains(connector.id.as_str())
|
||||
&& !accessible_ids.contains(connector.id.as_str())
|
||||
})
|
||||
.cloned()
|
||||
.map(|connector| AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth: true,
|
||||
})
|
||||
.collect()
|
||||
fn is_valid_remote_plugin_id(plugin_name: &str) -> bool {
|
||||
plugin_name
|
||||
.chars()
|
||||
.all(|ch| ch.is_ascii_alphanumeric() || ch == '-' || ch == '_' || ch == '~')
|
||||
}
|
||||
|
||||
fn remote_marketplace_to_info(marketplace: RemoteMarketplace) -> PluginMarketplaceEntry {
|
||||
@@ -1318,35 +903,42 @@ fn remote_plugin_catalog_error_to_jsonrpc(
|
||||
err: RemotePluginCatalogError,
|
||||
context: &str,
|
||||
) -> JSONRPCErrorError {
|
||||
let code = match &err {
|
||||
match err {
|
||||
RemotePluginCatalogError::AuthRequired | RemotePluginCatalogError::UnsupportedAuthMode => {
|
||||
INVALID_REQUEST_ERROR_CODE
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
RemotePluginCatalogError::UnexpectedStatus { status, .. } if status.as_u16() == 404 => {
|
||||
INVALID_REQUEST_ERROR_CODE
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
RemotePluginCatalogError::InvalidPluginPath { .. }
|
||||
| RemotePluginCatalogError::ArchiveTooLarge { .. }
|
||||
| RemotePluginCatalogError::UnknownMarketplace { .. } => INVALID_REQUEST_ERROR_CODE,
|
||||
| RemotePluginCatalogError::ArchiveTooLarge { .. } => JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
},
|
||||
RemotePluginCatalogError::AuthToken(_)
|
||||
| RemotePluginCatalogError::Request { .. }
|
||||
| RemotePluginCatalogError::UnexpectedStatus { .. }
|
||||
| RemotePluginCatalogError::Decode { .. }
|
||||
| RemotePluginCatalogError::InvalidBaseUrl(_)
|
||||
| RemotePluginCatalogError::InvalidBaseUrlPath
|
||||
| RemotePluginCatalogError::UnexpectedPluginId { .. }
|
||||
| RemotePluginCatalogError::UnexpectedSkillName { .. }
|
||||
| RemotePluginCatalogError::UnexpectedEnabledState { .. }
|
||||
| RemotePluginCatalogError::Archive { .. }
|
||||
| RemotePluginCatalogError::ArchiveJoin(_)
|
||||
| RemotePluginCatalogError::MissingUploadEtag
|
||||
| RemotePluginCatalogError::UnexpectedResponse(_)
|
||||
| RemotePluginCatalogError::CacheRemove(_) => INTERNAL_ERROR_CODE,
|
||||
};
|
||||
JSONRPCErrorError {
|
||||
code,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
| RemotePluginCatalogError::CacheRemove(_) => JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("{context}: {err}"),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,115 +1,68 @@
|
||||
use super::*;
|
||||
use codex_protocol::protocol::validate_thread_goal_objective;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ThreadGoalRequestProcessor {
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config: Arc<Config>,
|
||||
thread_state_manager: ThreadStateManager,
|
||||
}
|
||||
|
||||
impl ThreadGoalRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config: Arc<Config>,
|
||||
thread_state_manager: ThreadStateManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
thread_manager,
|
||||
outgoing,
|
||||
config,
|
||||
thread_state_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn thread_goal_set(
|
||||
impl CodexMessageProcessor {
|
||||
pub(super) async fn thread_goal_set(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalSetParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.thread_goal_set_inner(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn thread_goal_get(
|
||||
&self,
|
||||
params: ThreadGoalGetParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.thread_goal_get_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn thread_goal_clear(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalClearParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.thread_goal_clear_inner(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn emit_resume_goal_snapshot_and_continue(
|
||||
&self,
|
||||
thread_id: ThreadId,
|
||||
thread: &CodexThread,
|
||||
) {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
self.send_invalid_request_error(request_id, "goals feature is disabled".to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
self.emit_thread_goal_snapshot(thread_id).await;
|
||||
// App-server owns resume response and snapshot ordering, so wait until
|
||||
// those are sent before letting core start goal continuation.
|
||||
if let Err(err) = thread.continue_active_goal_if_idle().await {
|
||||
tracing::warn!("failed to continue active goal after resume: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn pending_resume_goal_state(
|
||||
&self,
|
||||
thread: &CodexThread,
|
||||
) -> (bool, Option<StateDbHandle>) {
|
||||
let emit_thread_goal_update = self.config.features.enabled(Feature::Goals);
|
||||
let thread_goal_state_db = if emit_thread_goal_update {
|
||||
if let Some(state_db) = thread.state_db() {
|
||||
Some(state_db)
|
||||
} else {
|
||||
open_state_db_for_direct_thread_lookup(&self.config).await
|
||||
let thread_id = match parse_thread_id_for_request(params.thread_id.as_str()) {
|
||||
Ok(thread_id) => thread_id,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(emit_thread_goal_update, thread_goal_state_db)
|
||||
}
|
||||
|
||||
async fn thread_goal_set_inner(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalSetParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
return Err(invalid_request("goals feature is disabled"));
|
||||
}
|
||||
|
||||
let thread_id = parse_thread_id_for_request(params.thread_id.as_str())?;
|
||||
let state_db = self.state_db_for_materialized_thread(thread_id).await?;
|
||||
let running_thread = self.thread_manager.get_thread(thread_id).await.ok();
|
||||
let rollout_path = match running_thread.as_ref() {
|
||||
Some(thread) => thread.rollout_path().ok_or_else(|| {
|
||||
invalid_request(format!(
|
||||
"ephemeral thread does not support goals: {thread_id}"
|
||||
))
|
||||
})?,
|
||||
None => find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to locate thread id {thread_id}: {err}"))
|
||||
})?
|
||||
.ok_or_else(|| invalid_request(format!("thread not found: {thread_id}")))?,
|
||||
Some(thread) => match thread.rollout_path() {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("ephemeral thread does not support goals: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string())
|
||||
.await
|
||||
{
|
||||
Ok(Some(path)) => path,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("thread not found: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
reconcile_rollout(
|
||||
Some(&state_db),
|
||||
@@ -131,51 +84,61 @@ impl ThreadGoalRequestProcessor {
|
||||
let objective = params.objective.as_deref().map(str::trim);
|
||||
|
||||
if let Some(objective) = objective {
|
||||
validate_thread_goal_objective(objective).map_err(invalid_request)?;
|
||||
}
|
||||
if objective.is_some() || params.token_budget.is_some() {
|
||||
validate_goal_budget(params.token_budget.flatten()).map_err(invalid_request)?;
|
||||
if let Err(message) = validate_thread_goal_objective(objective) {
|
||||
self.send_invalid_request_error(request_id, message).await;
|
||||
return;
|
||||
}
|
||||
if let Err(message) = validate_goal_budget(params.token_budget.flatten()) {
|
||||
self.send_invalid_request_error(request_id, message).await;
|
||||
return;
|
||||
}
|
||||
} else if let Some(token_budget) = params.token_budget
|
||||
&& let Err(message) = validate_goal_budget(token_budget)
|
||||
{
|
||||
self.send_invalid_request_error(request_id, message).await;
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(thread) = running_thread.as_ref() {
|
||||
thread.prepare_external_goal_mutation().await;
|
||||
}
|
||||
|
||||
let goal = (if let Some(objective) = objective {
|
||||
let existing_goal = state_db
|
||||
.get_thread_goal(thread_id)
|
||||
.await
|
||||
.map_err(|err| invalid_request(err.to_string()))?;
|
||||
if let Some(goal) = existing_goal.as_ref().filter(|goal| {
|
||||
goal.objective == objective
|
||||
&& goal.status != codex_state::ThreadGoalStatus::Complete
|
||||
}) {
|
||||
state_db
|
||||
.update_thread_goal(
|
||||
thread_id,
|
||||
codex_state::ThreadGoalUpdate {
|
||||
status,
|
||||
token_budget: params.token_budget,
|
||||
expected_goal_id: Some(goal.goal_id.clone()),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.and_then(|goal| {
|
||||
goal.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"cannot update goal for thread {thread_id}: no goal exists"
|
||||
let goal = if let Some(objective) = objective {
|
||||
match state_db.get_thread_goal(thread_id).await {
|
||||
Ok(goal) => {
|
||||
if let Some(goal) = goal.as_ref().filter(|goal| {
|
||||
goal.objective == objective
|
||||
&& goal.status != codex_state::ThreadGoalStatus::Complete
|
||||
}) {
|
||||
state_db
|
||||
.update_thread_goal(
|
||||
thread_id,
|
||||
codex_state::ThreadGoalUpdate {
|
||||
status,
|
||||
token_budget: params.token_budget,
|
||||
expected_goal_id: Some(goal.goal_id.clone()),
|
||||
},
|
||||
)
|
||||
})
|
||||
})
|
||||
} else {
|
||||
state_db
|
||||
.replace_thread_goal(
|
||||
thread_id,
|
||||
objective,
|
||||
status.unwrap_or(codex_state::ThreadGoalStatus::Active),
|
||||
params.token_budget.flatten(),
|
||||
)
|
||||
.await
|
||||
.await
|
||||
.and_then(|goal| {
|
||||
goal.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"cannot update goal for thread {thread_id}: no goal exists"
|
||||
)
|
||||
})
|
||||
})
|
||||
} else {
|
||||
state_db
|
||||
.replace_thread_goal(
|
||||
thread_id,
|
||||
objective,
|
||||
status.unwrap_or(codex_state::ThreadGoalStatus::Active),
|
||||
params.token_budget.flatten(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
} else {
|
||||
state_db
|
||||
@@ -193,8 +156,16 @@ impl ThreadGoalRequestProcessor {
|
||||
anyhow::anyhow!("cannot update goal for thread {thread_id}: no goal exists")
|
||||
})
|
||||
})
|
||||
})
|
||||
.map_err(|err| invalid_request(err.to_string()))?;
|
||||
};
|
||||
|
||||
let goal = match goal {
|
||||
Ok(goal) => goal,
|
||||
Err(err) => {
|
||||
self.send_invalid_request_error(request_id, err.to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let goal_status = goal.status;
|
||||
let goal = api_thread_goal_from_state(goal);
|
||||
self.outgoing
|
||||
@@ -208,51 +179,107 @@ impl ThreadGoalRequestProcessor {
|
||||
if let Some(thread) = running_thread.as_ref() {
|
||||
thread.apply_external_goal_set(goal_status).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn thread_goal_get_inner(
|
||||
pub(super) async fn thread_goal_get(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalGetParams,
|
||||
) -> Result<ThreadGoalGetResponse, JSONRPCErrorError> {
|
||||
) {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
return Err(invalid_request("goals feature is disabled"));
|
||||
self.send_invalid_request_error(request_id, "goals feature is disabled".to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
let thread_id = parse_thread_id_for_request(params.thread_id.as_str())?;
|
||||
let state_db = self.state_db_for_materialized_thread(thread_id).await?;
|
||||
let goal = state_db
|
||||
.get_thread_goal(thread_id)
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to read thread goal: {err}")))?
|
||||
.map(api_thread_goal_from_state);
|
||||
Ok(ThreadGoalGetResponse { goal })
|
||||
let thread_id = match parse_thread_id_for_request(params.thread_id.as_str()) {
|
||||
Ok(thread_id) => thread_id,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let goal = match state_db.get_thread_goal(thread_id).await {
|
||||
Ok(goal) => goal.map(api_thread_goal_from_state),
|
||||
Err(err) => {
|
||||
self.send_internal_error(request_id, format!("failed to read thread goal: {err}"))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
self.outgoing
|
||||
.send_response(request_id, ThreadGoalGetResponse { goal })
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn thread_goal_clear_inner(
|
||||
pub(super) async fn thread_goal_clear(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ThreadGoalClearParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
) {
|
||||
if !self.config.features.enabled(Feature::Goals) {
|
||||
return Err(invalid_request("goals feature is disabled"));
|
||||
self.send_invalid_request_error(request_id, "goals feature is disabled".to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
let thread_id = parse_thread_id_for_request(params.thread_id.as_str())?;
|
||||
let state_db = self.state_db_for_materialized_thread(thread_id).await?;
|
||||
let thread_id = match parse_thread_id_for_request(params.thread_id.as_str()) {
|
||||
Ok(thread_id) => thread_id,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let running_thread = self.thread_manager.get_thread(thread_id).await.ok();
|
||||
let rollout_path = match running_thread.as_ref() {
|
||||
Some(thread) => thread.rollout_path().ok_or_else(|| {
|
||||
invalid_request(format!(
|
||||
"ephemeral thread does not support goals: {thread_id}"
|
||||
))
|
||||
})?,
|
||||
None => find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to locate thread id {thread_id}: {err}"))
|
||||
})?
|
||||
.ok_or_else(|| invalid_request(format!("thread not found: {thread_id}")))?,
|
||||
Some(thread) => match thread.rollout_path() {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("ephemeral thread does not support goals: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string())
|
||||
.await
|
||||
{
|
||||
Ok(Some(path)) => path,
|
||||
Ok(None) => {
|
||||
self.send_invalid_request_error(
|
||||
request_id,
|
||||
format!("thread not found: {thread_id}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
self.send_internal_error(
|
||||
request_id,
|
||||
format!("failed to locate thread id {thread_id}: {err}"),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
reconcile_rollout(
|
||||
Some(&state_db),
|
||||
@@ -274,10 +301,14 @@ impl ThreadGoalRequestProcessor {
|
||||
let thread_state = thread_state.lock().await;
|
||||
thread_state.listener_command_tx()
|
||||
};
|
||||
let cleared = state_db
|
||||
.delete_thread_goal(thread_id)
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to clear thread goal: {err}")))?;
|
||||
let cleared = match state_db.delete_thread_goal(thread_id).await {
|
||||
Ok(cleared) => cleared,
|
||||
Err(err) => {
|
||||
self.send_internal_error(request_id, format!("failed to clear thread goal: {err}"))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if cleared && let Some(thread) = running_thread.as_ref() {
|
||||
thread.apply_external_goal_clear().await;
|
||||
@@ -290,7 +321,6 @@ impl ThreadGoalRequestProcessor {
|
||||
self.emit_thread_goal_cleared_ordered(thread_id, listener_command_tx)
|
||||
.await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn state_db_for_materialized_thread(
|
||||
@@ -307,12 +337,18 @@ impl ThreadGoalRequestProcessor {
|
||||
return Ok(state_db);
|
||||
}
|
||||
} else {
|
||||
find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to locate thread id {thread_id}: {err}"))
|
||||
})?
|
||||
.ok_or_else(|| invalid_request(format!("thread not found: {thread_id}")))?;
|
||||
match find_thread_path_by_id_str(&self.config.codex_home, &thread_id.to_string()).await
|
||||
{
|
||||
Ok(Some(_)) => {}
|
||||
Ok(None) => {
|
||||
return Err(invalid_request(format!("thread not found: {thread_id}")));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(internal_error(format!(
|
||||
"failed to locate thread id {thread_id}: {err}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
open_state_db_for_direct_thread_lookup(&self.config)
|
||||
@@ -320,7 +356,7 @@ impl ThreadGoalRequestProcessor {
|
||||
.ok_or_else(|| internal_error("sqlite state db unavailable for thread goals"))
|
||||
}
|
||||
|
||||
async fn emit_thread_goal_snapshot(&self, thread_id: ThreadId) {
|
||||
pub(super) async fn emit_thread_goal_snapshot(&self, thread_id: ThreadId) {
|
||||
let state_db = match self.state_db_for_materialized_thread(thread_id).await {
|
||||
Ok(state_db) => state_db,
|
||||
Err(err) => {
|
||||
@@ -441,8 +477,3 @@ pub(super) fn api_thread_goal_from_state(goal: codex_state::ThreadGoal) -> Threa
|
||||
updated_at: goal.updated_at.timestamp(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_thread_id_for_request(thread_id: &str) -> Result<ThreadId, JSONRPCErrorError> {
|
||||
ThreadId::from_string(thread_id)
|
||||
.map_err(|err| invalid_request(format!("invalid thread id: {err}")))
|
||||
}
|
||||
@@ -9,6 +9,7 @@
|
||||
//! the time the `TokenCount` was persisted so the notification still targets the
|
||||
//! corresponding rebuilt turn.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
@@ -23,6 +24,7 @@ use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
|
||||
use crate::codex_message_processor::read_rollout_items_from_rollout;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
|
||||
@@ -30,7 +32,7 @@ use crate::outgoing_message::OutgoingMessageSender;
|
||||
///
|
||||
/// This is lifecycle replay rather than a model event: the rollout already contains
|
||||
/// the original `TokenCount`, and emitting through `send_event` here would duplicate
|
||||
/// persisted usage records. Keeping replay connection-scoped also avoids
|
||||
/// persisted usage records. Keeping this helper connection-scoped also avoids
|
||||
/// surprising other subscribers with a historical usage update while they may be
|
||||
/// rendering live turn events.
|
||||
pub(super) async fn send_thread_token_usage_update_to_connection(
|
||||
@@ -57,6 +59,19 @@ pub(super) async fn send_thread_token_usage_update_to_connection(
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(super) async fn latest_token_usage_turn_id_for_thread_path(thread: &Thread) -> Option<String> {
|
||||
let rollout_path = thread.path.as_deref()?;
|
||||
latest_token_usage_turn_id_from_rollout_path(rollout_path, thread.turns.as_slice()).await
|
||||
}
|
||||
|
||||
pub(super) async fn latest_token_usage_turn_id_from_rollout_path(
|
||||
rollout_path: &Path,
|
||||
turns: &[Turn],
|
||||
) -> Option<String> {
|
||||
let rollout_items = read_rollout_items_from_rollout(rollout_path).await.ok()?;
|
||||
latest_token_usage_turn_id_from_rollout_items(&rollout_items, turns)
|
||||
}
|
||||
|
||||
/// Identifies the turn that was active when a `TokenCount` record appeared.
|
||||
///
|
||||
/// The id is preferred when it still appears in the rebuilt thread. The position is a
|
||||
874
codex-rs/app-server/src/config_api.rs
Normal file
874
codex-rs/app-server/src/config_api.rs
Normal file
@@ -0,0 +1,874 @@
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::config_manager_service::ConfigManagerError;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use async_trait::async_trait;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigReadResponse;
|
||||
use codex_app_server_protocol::ConfigRequirements;
|
||||
use codex_app_server_protocol::ConfigRequirementsReadResponse;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::ConfiguredHookHandler;
|
||||
use codex_app_server_protocol::ConfiguredHookMatcherGroup;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ManagedHooksRequirements;
|
||||
use codex_app_server_protocol::NetworkDomainPermission;
|
||||
use codex_app_server_protocol::NetworkRequirements;
|
||||
use codex_app_server_protocol::NetworkUnixSocketPermission;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_config::ConfigRequirementsToml;
|
||||
use codex_config::HookEventsToml;
|
||||
use codex_config::HookHandlerConfig as CoreHookHandlerConfig;
|
||||
use codex_config::ManagedHooksRequirementsToml;
|
||||
use codex_config::MatcherGroup as CoreMatcherGroup;
|
||||
use codex_config::ResidencyRequirement as CoreResidencyRequirement;
|
||||
use codex_config::SandboxModeRequirement as CoreSandboxModeRequirement;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core_plugins::loader::installed_plugin_telemetry_metadata;
|
||||
use codex_core_plugins::toggles::collect_plugin_enabled_candidates;
|
||||
use codex_features::canonical_feature_for_key;
|
||||
use codex_features::feature_for_key;
|
||||
use codex_plugin::PluginId;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::protocol::Op;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tracing::warn;
|
||||
|
||||
const SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT: &[&str] = &[
|
||||
"apps",
|
||||
"memories",
|
||||
"plugins",
|
||||
"remote_control",
|
||||
"tool_search",
|
||||
"tool_suggest",
|
||||
"tool_call_mcp_elicitation",
|
||||
];
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait UserConfigReloader: Send + Sync {
|
||||
async fn reload_user_config(&self);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserConfigReloader for ThreadManager {
|
||||
async fn reload_user_config(&self) {
|
||||
let thread_ids = self.list_thread_ids().await;
|
||||
for thread_id in thread_ids {
|
||||
let Ok(thread) = self.get_thread(thread_id).await else {
|
||||
continue;
|
||||
};
|
||||
if let Err(err) = thread.submit(Op::ReloadUserConfig).await {
|
||||
warn!("failed to request user config reload: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigApi {
|
||||
config_manager: ConfigManager,
|
||||
user_config_reloader: Arc<dyn UserConfigReloader>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(
|
||||
config_manager: ConfigManager,
|
||||
user_config_reloader: Arc<dyn UserConfigReloader>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
) -> Self {
|
||||
Self {
|
||||
config_manager,
|
||||
user_config_reloader,
|
||||
analytics_events_client,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!(
|
||||
"failed to resolve feature override precedence: {err}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn read(
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
|
||||
let fallback_cwd = params.cwd.as_ref().map(PathBuf::from);
|
||||
let mut response = self.config_manager.read(params).await.map_err(map_error)?;
|
||||
let config = self.load_latest_config(fallback_cwd).await?;
|
||||
for feature_key in SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT {
|
||||
let Some(feature) = feature_for_key(feature_key) else {
|
||||
continue;
|
||||
};
|
||||
let features = response
|
||||
.config
|
||||
.additional
|
||||
.entry("features".to_string())
|
||||
.or_insert_with(|| json!({}));
|
||||
if !features.is_object() {
|
||||
*features = json!({});
|
||||
}
|
||||
if let Some(features) = features.as_object_mut() {
|
||||
features.insert(
|
||||
(*feature_key).to_string(),
|
||||
json!(config.features.enabled(feature)),
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn config_requirements_read(
|
||||
&self,
|
||||
) -> Result<ConfigRequirementsReadResponse, JSONRPCErrorError> {
|
||||
let requirements = self
|
||||
.config_manager
|
||||
.read_requirements()
|
||||
.await
|
||||
.map_err(map_error)?
|
||||
.map(map_requirements_toml_to_api);
|
||||
|
||||
Ok(ConfigRequirementsReadResponse { requirements })
|
||||
}
|
||||
|
||||
pub(crate) async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let pending_changes =
|
||||
collect_plugin_enabled_candidates([(¶ms.key_path, ¶ms.value)].into_iter());
|
||||
let response = self
|
||||
.config_manager
|
||||
.write_value(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn batch_write(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let reload_user_config = params.reload_user_config;
|
||||
let pending_changes = collect_plugin_enabled_candidates(
|
||||
params
|
||||
.edits
|
||||
.iter()
|
||||
.map(|edit| (&edit.key_path, &edit.value)),
|
||||
);
|
||||
let response = self
|
||||
.config_manager
|
||||
.batch_write(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
if reload_user_config {
|
||||
self.user_config_reloader.reload_user_config().await;
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn set_experimental_feature_enablement(
|
||||
&self,
|
||||
params: ExperimentalFeatureEnablementSetParams,
|
||||
) -> Result<ExperimentalFeatureEnablementSetResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureEnablementSetParams { enablement } = params;
|
||||
for key in enablement.keys() {
|
||||
if canonical_feature_for_key(key).is_some() {
|
||||
if SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.contains(&key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Err(invalid_request(format!(
|
||||
"unsupported feature enablement `{key}`: currently supported features are {}",
|
||||
SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.join(", ")
|
||||
)));
|
||||
}
|
||||
|
||||
let message = if let Some(feature) = feature_for_key(key) {
|
||||
format!(
|
||||
"invalid feature enablement `{key}`: use canonical feature key `{}`",
|
||||
feature.key()
|
||||
)
|
||||
} else {
|
||||
format!("invalid feature enablement `{key}`")
|
||||
};
|
||||
return Err(invalid_request(message));
|
||||
}
|
||||
|
||||
if enablement.is_empty() {
|
||||
return Ok(ExperimentalFeatureEnablementSetResponse { enablement });
|
||||
}
|
||||
|
||||
self.config_manager
|
||||
.extend_runtime_feature_enablement(
|
||||
enablement
|
||||
.iter()
|
||||
.map(|(name, enabled)| (name.clone(), *enabled)),
|
||||
)
|
||||
.map_err(|_| internal_error("failed to update feature enablement"))?;
|
||||
|
||||
self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
self.user_config_reloader.reload_user_config().await;
|
||||
|
||||
Ok(ExperimentalFeatureEnablementSetResponse { enablement })
|
||||
}
|
||||
|
||||
async fn emit_plugin_toggle_events(
|
||||
&self,
|
||||
pending_changes: std::collections::BTreeMap<String, bool>,
|
||||
) {
|
||||
for (plugin_id, enabled) in pending_changes {
|
||||
let Ok(plugin_id) = PluginId::parse(&plugin_id) else {
|
||||
continue;
|
||||
};
|
||||
let metadata =
|
||||
installed_plugin_telemetry_metadata(self.config_manager.codex_home(), &plugin_id)
|
||||
.await;
|
||||
if enabled {
|
||||
self.analytics_events_client.track_plugin_enabled(metadata);
|
||||
} else {
|
||||
self.analytics_events_client.track_plugin_disabled(metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigRequirements {
|
||||
ConfigRequirements {
|
||||
allowed_approval_policies: requirements.allowed_approval_policies.map(|policies| {
|
||||
policies
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::AskForApproval::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_approvals_reviewers: requirements.allowed_approvals_reviewers.map(|reviewers| {
|
||||
reviewers
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::ApprovalsReviewer::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_sandbox_modes: requirements.allowed_sandbox_modes.map(|modes| {
|
||||
modes
|
||||
.into_iter()
|
||||
.filter_map(map_sandbox_mode_requirement_to_api)
|
||||
.collect()
|
||||
}),
|
||||
allowed_web_search_modes: requirements.allowed_web_search_modes.map(|modes| {
|
||||
let mut normalized = modes
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<WebSearchMode>>();
|
||||
if !normalized.contains(&WebSearchMode::Disabled) {
|
||||
normalized.push(WebSearchMode::Disabled);
|
||||
}
|
||||
normalized
|
||||
}),
|
||||
feature_requirements: requirements
|
||||
.feature_requirements
|
||||
.map(|requirements| requirements.entries),
|
||||
hooks: requirements.hooks.map(map_hooks_requirements_to_api),
|
||||
enforce_residency: requirements
|
||||
.enforce_residency
|
||||
.map(map_residency_requirement_to_api),
|
||||
network: requirements.network.map(map_network_requirements_to_api),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hooks_requirements_to_api(hooks: ManagedHooksRequirementsToml) -> ManagedHooksRequirements {
|
||||
let ManagedHooksRequirementsToml {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
hooks,
|
||||
} = hooks;
|
||||
let HookEventsToml {
|
||||
pre_tool_use,
|
||||
permission_request,
|
||||
post_tool_use,
|
||||
session_start,
|
||||
user_prompt_submit,
|
||||
stop,
|
||||
} = hooks;
|
||||
|
||||
ManagedHooksRequirements {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
pre_tool_use: map_hook_matcher_groups_to_api(pre_tool_use),
|
||||
permission_request: map_hook_matcher_groups_to_api(permission_request),
|
||||
post_tool_use: map_hook_matcher_groups_to_api(post_tool_use),
|
||||
session_start: map_hook_matcher_groups_to_api(session_start),
|
||||
user_prompt_submit: map_hook_matcher_groups_to_api(user_prompt_submit),
|
||||
stop: map_hook_matcher_groups_to_api(stop),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_matcher_groups_to_api(
|
||||
groups: Vec<CoreMatcherGroup>,
|
||||
) -> Vec<ConfiguredHookMatcherGroup> {
|
||||
groups
|
||||
.into_iter()
|
||||
.map(map_hook_matcher_group_to_api)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn map_hook_matcher_group_to_api(group: CoreMatcherGroup) -> ConfiguredHookMatcherGroup {
|
||||
ConfiguredHookMatcherGroup {
|
||||
matcher: group.matcher,
|
||||
hooks: group
|
||||
.hooks
|
||||
.into_iter()
|
||||
.map(map_hook_handler_to_api)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_handler_to_api(handler: CoreHookHandlerConfig) -> ConfiguredHookHandler {
|
||||
match handler {
|
||||
CoreHookHandlerConfig::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
} => ConfiguredHookHandler::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
},
|
||||
CoreHookHandlerConfig::Prompt {} => ConfiguredHookHandler::Prompt {},
|
||||
CoreHookHandlerConfig::Agent {} => ConfiguredHookHandler::Agent {},
|
||||
}
|
||||
}
|
||||
|
||||
fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Option<SandboxMode> {
|
||||
match mode {
|
||||
CoreSandboxModeRequirement::ReadOnly => Some(SandboxMode::ReadOnly),
|
||||
CoreSandboxModeRequirement::WorkspaceWrite => Some(SandboxMode::WorkspaceWrite),
|
||||
CoreSandboxModeRequirement::DangerFullAccess => Some(SandboxMode::DangerFullAccess),
|
||||
CoreSandboxModeRequirement::ExternalSandbox => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_residency_requirement_to_api(
|
||||
residency: CoreResidencyRequirement,
|
||||
) -> codex_app_server_protocol::ResidencyRequirement {
|
||||
match residency {
|
||||
CoreResidencyRequirement::Us => codex_app_server_protocol::ResidencyRequirement::Us,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_requirements_to_api(
|
||||
network: codex_config::NetworkRequirementsToml,
|
||||
) -> NetworkRequirements {
|
||||
let allowed_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains);
|
||||
let denied_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::denied_domains);
|
||||
let allow_unix_sockets = network
|
||||
.unix_sockets
|
||||
.as_ref()
|
||||
.map(codex_config::NetworkUnixSocketPermissionsToml::allow_unix_sockets)
|
||||
.filter(|entries| !entries.is_empty());
|
||||
|
||||
NetworkRequirements {
|
||||
enabled: network.enabled,
|
||||
http_port: network.http_port,
|
||||
socks_port: network.socks_port,
|
||||
allow_upstream_proxy: network.allow_upstream_proxy,
|
||||
dangerously_allow_non_loopback_proxy: network.dangerously_allow_non_loopback_proxy,
|
||||
dangerously_allow_all_unix_sockets: network.dangerously_allow_all_unix_sockets,
|
||||
domains: network.domains.map(|domains| {
|
||||
domains
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(pattern, permission)| {
|
||||
(pattern, map_network_domain_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
managed_allowed_domains_only: network.managed_allowed_domains_only,
|
||||
allowed_domains,
|
||||
denied_domains,
|
||||
unix_sockets: network.unix_sockets.map(|unix_sockets| {
|
||||
unix_sockets
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(path, permission)| {
|
||||
(path, map_network_unix_socket_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
allow_unix_sockets,
|
||||
allow_local_binding: network.allow_local_binding,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_domain_permission_to_api(
|
||||
permission: codex_config::NetworkDomainPermissionToml,
|
||||
) -> NetworkDomainPermission {
|
||||
match permission {
|
||||
codex_config::NetworkDomainPermissionToml::Allow => NetworkDomainPermission::Allow,
|
||||
codex_config::NetworkDomainPermissionToml::Deny => NetworkDomainPermission::Deny,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_unix_socket_permission_to_api(
|
||||
permission: codex_config::NetworkUnixSocketPermissionToml,
|
||||
) -> NetworkUnixSocketPermission {
|
||||
match permission {
|
||||
codex_config::NetworkUnixSocketPermissionToml::Allow => NetworkUnixSocketPermission::Allow,
|
||||
codex_config::NetworkUnixSocketPermissionToml::None => NetworkUnixSocketPermission::None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigManagerError) -> JSONRPCErrorError {
|
||||
if let Some(code) = err.write_error_code() {
|
||||
return config_write_error(code, err.to_string());
|
||||
}
|
||||
|
||||
internal_error(err.to_string())
|
||||
}
|
||||
|
||||
fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: Some(json!({
|
||||
"config_write_error_code": code,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_manager::apply_runtime_feature_enablement;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_config::CloudRequirementsLoader;
|
||||
use codex_config::LoaderOverrides;
|
||||
use codex_config::NetworkDomainPermissionToml as CoreNetworkDomainPermissionToml;
|
||||
use codex_config::NetworkDomainPermissionsToml as CoreNetworkDomainPermissionsToml;
|
||||
use codex_config::NetworkRequirementsToml as CoreNetworkRequirementsToml;
|
||||
use codex_config::NetworkUnixSocketPermissionToml as CoreNetworkUnixSocketPermissionToml;
|
||||
use codex_config::NetworkUnixSocketPermissionsToml as CoreNetworkUnixSocketPermissionsToml;
|
||||
use codex_features::Feature;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_protocol::config_types::ApprovalsReviewer as CoreApprovalsReviewer;
|
||||
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tempfile::TempDir;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[derive(Default)]
|
||||
struct RecordingUserConfigReloader {
|
||||
call_count: AtomicUsize,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserConfigReloader for RecordingUserConfigReloader {
|
||||
async fn reload_user_config(&self) {
|
||||
self.call_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_converts_core_enums() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: Some(vec![
|
||||
CoreAskForApproval::Never,
|
||||
CoreAskForApproval::OnRequest,
|
||||
]),
|
||||
allowed_approvals_reviewers: Some(vec![
|
||||
CoreApprovalsReviewer::User,
|
||||
CoreApprovalsReviewer::AutoReview,
|
||||
]),
|
||||
allowed_sandbox_modes: Some(vec![
|
||||
CoreSandboxModeRequirement::ReadOnly,
|
||||
CoreSandboxModeRequirement::ExternalSandbox,
|
||||
]),
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: Some(vec![codex_config::WebSearchModeRequirement::Cached]),
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: Some(codex_config::FeatureRequirementsToml {
|
||||
entries: std::collections::BTreeMap::from([
|
||||
("apps".to_string(), false),
|
||||
("personality".to_string(), true),
|
||||
]),
|
||||
}),
|
||||
hooks: Some(ManagedHooksRequirementsToml {
|
||||
managed_dir: Some(PathBuf::from("/enterprise/hooks")),
|
||||
windows_managed_dir: Some(PathBuf::from(r"C:\enterprise\hooks")),
|
||||
hooks: HookEventsToml {
|
||||
pre_tool_use: vec![CoreMatcherGroup {
|
||||
matcher: Some("^Bash$".to_string()),
|
||||
hooks: vec![CoreHookHandlerConfig::Command {
|
||||
command: "python3 /enterprise/hooks/pre.py".to_string(),
|
||||
timeout_sec: Some(10),
|
||||
r#async: false,
|
||||
status_message: Some("checking".to_string()),
|
||||
}],
|
||||
}],
|
||||
..Default::default()
|
||||
},
|
||||
}),
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: Some(CoreResidencyRequirement::Us),
|
||||
network: Some(CoreNetworkRequirementsToml {
|
||||
enabled: Some(true),
|
||||
http_port: Some(8080),
|
||||
socks_port: Some(1080),
|
||||
allow_upstream_proxy: Some(false),
|
||||
dangerously_allow_non_loopback_proxy: Some(false),
|
||||
dangerously_allow_all_unix_sockets: Some(true),
|
||||
domains: Some(CoreNetworkDomainPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([
|
||||
(
|
||||
"api.openai.com".to_string(),
|
||||
CoreNetworkDomainPermissionToml::Allow,
|
||||
),
|
||||
(
|
||||
"example.com".to_string(),
|
||||
CoreNetworkDomainPermissionToml::Deny,
|
||||
),
|
||||
]),
|
||||
}),
|
||||
managed_allowed_domains_only: Some(false),
|
||||
unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([(
|
||||
"/tmp/proxy.sock".to_string(),
|
||||
CoreNetworkUnixSocketPermissionToml::Allow,
|
||||
)]),
|
||||
}),
|
||||
allow_local_binding: Some(true),
|
||||
}),
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.allowed_approval_policies,
|
||||
Some(vec![
|
||||
codex_app_server_protocol::AskForApproval::Never,
|
||||
codex_app_server_protocol::AskForApproval::OnRequest,
|
||||
])
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_approvals_reviewers,
|
||||
Some(vec![
|
||||
codex_app_server_protocol::ApprovalsReviewer::User,
|
||||
codex_app_server_protocol::ApprovalsReviewer::AutoReview,
|
||||
])
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_sandbox_modes,
|
||||
Some(vec![SandboxMode::ReadOnly]),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_web_search_modes,
|
||||
Some(vec![WebSearchMode::Cached, WebSearchMode::Disabled]),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.feature_requirements,
|
||||
Some(std::collections::BTreeMap::from([
|
||||
("apps".to_string(), false),
|
||||
("personality".to_string(), true),
|
||||
])),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.hooks,
|
||||
Some(ManagedHooksRequirements {
|
||||
managed_dir: Some(PathBuf::from("/enterprise/hooks")),
|
||||
windows_managed_dir: Some(PathBuf::from(r"C:\enterprise\hooks")),
|
||||
pre_tool_use: vec![ConfiguredHookMatcherGroup {
|
||||
matcher: Some("^Bash$".to_string()),
|
||||
hooks: vec![ConfiguredHookHandler::Command {
|
||||
command: "python3 /enterprise/hooks/pre.py".to_string(),
|
||||
timeout_sec: Some(10),
|
||||
r#async: false,
|
||||
status_message: Some("checking".to_string()),
|
||||
}],
|
||||
}],
|
||||
permission_request: Vec::new(),
|
||||
post_tool_use: Vec::new(),
|
||||
session_start: Vec::new(),
|
||||
user_prompt_submit: Vec::new(),
|
||||
stop: Vec::new(),
|
||||
}),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.enforce_residency,
|
||||
Some(codex_app_server_protocol::ResidencyRequirement::Us),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.network,
|
||||
Some(NetworkRequirements {
|
||||
enabled: Some(true),
|
||||
http_port: Some(8080),
|
||||
socks_port: Some(1080),
|
||||
allow_upstream_proxy: Some(false),
|
||||
dangerously_allow_non_loopback_proxy: Some(false),
|
||||
dangerously_allow_all_unix_sockets: Some(true),
|
||||
domains: Some(std::collections::BTreeMap::from([
|
||||
("api.openai.com".to_string(), NetworkDomainPermission::Allow,),
|
||||
("example.com".to_string(), NetworkDomainPermission::Deny),
|
||||
])),
|
||||
managed_allowed_domains_only: Some(false),
|
||||
allowed_domains: Some(vec!["api.openai.com".to_string()]),
|
||||
denied_domains: Some(vec!["example.com".to_string()]),
|
||||
unix_sockets: Some(std::collections::BTreeMap::from([(
|
||||
"/tmp/proxy.sock".to_string(),
|
||||
NetworkUnixSocketPermission::Allow,
|
||||
)])),
|
||||
allow_unix_sockets: Some(vec!["/tmp/proxy.sock".to_string()]),
|
||||
allow_local_binding: Some(true),
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_omits_unix_socket_none_entries_from_legacy_network_fields() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: None,
|
||||
allowed_approvals_reviewers: None,
|
||||
allowed_sandbox_modes: None,
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: None,
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: None,
|
||||
hooks: None,
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: None,
|
||||
network: Some(CoreNetworkRequirementsToml {
|
||||
enabled: None,
|
||||
http_port: None,
|
||||
socks_port: None,
|
||||
allow_upstream_proxy: None,
|
||||
dangerously_allow_non_loopback_proxy: None,
|
||||
dangerously_allow_all_unix_sockets: None,
|
||||
domains: None,
|
||||
managed_allowed_domains_only: None,
|
||||
unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([(
|
||||
"/tmp/ignored.sock".to_string(),
|
||||
CoreNetworkUnixSocketPermissionToml::None,
|
||||
)]),
|
||||
}),
|
||||
allow_local_binding: None,
|
||||
}),
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.network,
|
||||
Some(NetworkRequirements {
|
||||
enabled: None,
|
||||
http_port: None,
|
||||
socks_port: None,
|
||||
allow_upstream_proxy: None,
|
||||
dangerously_allow_non_loopback_proxy: None,
|
||||
dangerously_allow_all_unix_sockets: None,
|
||||
domains: None,
|
||||
managed_allowed_domains_only: None,
|
||||
allowed_domains: None,
|
||||
denied_domains: None,
|
||||
unix_sockets: Some(std::collections::BTreeMap::from([(
|
||||
"/tmp/ignored.sock".to_string(),
|
||||
NetworkUnixSocketPermission::None,
|
||||
)])),
|
||||
allow_unix_sockets: None,
|
||||
allow_local_binding: None,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_normalizes_allowed_web_search_modes() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: None,
|
||||
allowed_approvals_reviewers: None,
|
||||
allowed_sandbox_modes: None,
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: Some(Vec::new()),
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: None,
|
||||
hooks: None,
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: None,
|
||||
network: None,
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.allowed_web_search_modes,
|
||||
Some(vec![WebSearchMode::Disabled])
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_runtime_feature_enablement_keeps_cli_overrides_above_config_and_runtime() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
std::fs::write(
|
||||
codex_home.path().join("config.toml"),
|
||||
"[features]\napps = false\n",
|
||||
)
|
||||
.expect("write config");
|
||||
|
||||
let mut config = codex_core::config::ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.fallback_cwd(Some(codex_home.path().to_path_buf()))
|
||||
.cli_overrides(vec![(
|
||||
"features.apps".to_string(),
|
||||
TomlValue::Boolean(true),
|
||||
)])
|
||||
.build()
|
||||
.await
|
||||
.expect("load config");
|
||||
|
||||
apply_runtime_feature_enablement(
|
||||
&mut config,
|
||||
&BTreeMap::from([("apps".to_string(), false)]),
|
||||
);
|
||||
|
||||
assert!(config.features.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_runtime_feature_enablement_keeps_cloud_pins_above_cli_and_runtime() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
|
||||
let mut config = codex_core::config::ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.cli_overrides(vec![(
|
||||
"features.apps".to_string(),
|
||||
TomlValue::Boolean(true),
|
||||
)])
|
||||
.cloud_requirements(CloudRequirementsLoader::new(async {
|
||||
Ok(Some(ConfigRequirementsToml {
|
||||
feature_requirements: Some(codex_config::FeatureRequirementsToml {
|
||||
entries: BTreeMap::from([("apps".to_string(), false)]),
|
||||
}),
|
||||
..Default::default()
|
||||
}))
|
||||
}))
|
||||
.build()
|
||||
.await
|
||||
.expect("load config");
|
||||
|
||||
apply_runtime_feature_enablement(
|
||||
&mut config,
|
||||
&BTreeMap::from([("apps".to_string(), true)]),
|
||||
);
|
||||
|
||||
assert!(!config.features.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn batch_write_reloads_user_config_when_requested() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
let user_config_path = codex_home.path().join("config.toml");
|
||||
std::fs::write(&user_config_path, "").expect("write config");
|
||||
let reloader = Arc::new(RecordingUserConfigReloader::default());
|
||||
let analytics_config = Arc::new(
|
||||
codex_core::config::ConfigBuilder::default()
|
||||
.build()
|
||||
.await
|
||||
.expect("load analytics config"),
|
||||
);
|
||||
let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("test"));
|
||||
let config_api = ConfigApi::new(
|
||||
ConfigManager::new(
|
||||
codex_home.path().to_path_buf(),
|
||||
Vec::new(),
|
||||
LoaderOverrides::default(),
|
||||
CloudRequirementsLoader::default(),
|
||||
Arg0DispatchPaths::default(),
|
||||
Arc::new(codex_config::NoopThreadConfigLoader),
|
||||
),
|
||||
reloader.clone(),
|
||||
AnalyticsEventsClient::new(
|
||||
auth_manager,
|
||||
analytics_config
|
||||
.chatgpt_base_url
|
||||
.trim_end_matches('/')
|
||||
.to_string(),
|
||||
analytics_config.analytics_enabled,
|
||||
),
|
||||
);
|
||||
|
||||
let response = config_api
|
||||
.batch_write(ConfigBatchWriteParams {
|
||||
edits: vec![codex_app_server_protocol::ConfigEdit {
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-5"),
|
||||
merge_strategy: codex_app_server_protocol::MergeStrategy::Replace,
|
||||
}],
|
||||
file_path: Some(user_config_path.display().to_string()),
|
||||
expected_version: None,
|
||||
reload_user_config: true,
|
||||
})
|
||||
.await
|
||||
.expect("batch write should succeed");
|
||||
|
||||
assert_eq!(
|
||||
response,
|
||||
ConfigWriteResponse {
|
||||
status: codex_app_server_protocol::WriteStatus::Ok,
|
||||
version: response.version.clone(),
|
||||
file_path: codex_utils_absolute_path::AbsolutePathBuf::try_from(
|
||||
user_config_path.clone()
|
||||
)
|
||||
.expect("absolute config path"),
|
||||
overridden_metadata: None,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_to_string(user_config_path).unwrap(),
|
||||
"model = \"gpt-5\"\n"
|
||||
);
|
||||
assert_eq!(reloader.call_count.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,8 @@
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use async_trait::async_trait;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::DeviceKeyAlgorithm;
|
||||
use codex_app_server_protocol::DeviceKeyCreateParams;
|
||||
use codex_app_server_protocol::DeviceKeyCreateResponse;
|
||||
@@ -36,22 +28,19 @@ use codex_device_key::RemoteControlClientEnrollmentAudience;
|
||||
use codex_device_key::RemoteControlClientEnrollmentSignPayload;
|
||||
use codex_state::DeviceKeyBindingRecord;
|
||||
use codex_state::StateRuntime;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct DeviceKeyRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(crate) struct DeviceKeyApi {
|
||||
store: DeviceKeyStore,
|
||||
}
|
||||
|
||||
impl DeviceKeyRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
sqlite_home: PathBuf,
|
||||
default_provider: String,
|
||||
) -> Self {
|
||||
impl DeviceKeyApi {
|
||||
pub(crate) fn new(sqlite_home: PathBuf, default_provider: String) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
store: DeviceKeyStore::new(Arc::new(StateDeviceKeyBindingStore::new(
|
||||
sqlite_home,
|
||||
default_provider,
|
||||
@@ -59,120 +48,56 @@ impl DeviceKeyRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create(
|
||||
pub(crate) async fn create(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: DeviceKeyCreateParams,
|
||||
device_key_requests_allowed: bool,
|
||||
) {
|
||||
self.spawn_request(
|
||||
request_id,
|
||||
"device/key/create",
|
||||
device_key_requests_allowed,
|
||||
move |store| async move { create_device_key(store, params).await },
|
||||
);
|
||||
) -> Result<DeviceKeyCreateResponse, JSONRPCErrorError> {
|
||||
let info = self
|
||||
.store
|
||||
.create(DeviceKeyCreateRequest {
|
||||
protection_policy: protection_policy_from_params(params.protection_policy),
|
||||
binding: DeviceKeyBinding {
|
||||
account_user_id: params.account_user_id,
|
||||
client_id: params.client_id,
|
||||
},
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(create_response_from_info(info))
|
||||
}
|
||||
|
||||
pub(crate) fn public(
|
||||
pub(crate) async fn public(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: DeviceKeyPublicParams,
|
||||
device_key_requests_allowed: bool,
|
||||
) {
|
||||
self.spawn_request(
|
||||
request_id,
|
||||
"device/key/public",
|
||||
device_key_requests_allowed,
|
||||
move |store| async move { public_device_key(store, params).await },
|
||||
);
|
||||
) -> Result<DeviceKeyPublicResponse, JSONRPCErrorError> {
|
||||
let info = self
|
||||
.store
|
||||
.get_public(DeviceKeyGetPublicRequest {
|
||||
key_id: params.key_id,
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(public_response_from_info(info))
|
||||
}
|
||||
|
||||
pub(crate) fn sign(
|
||||
pub(crate) async fn sign(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: DeviceKeySignParams,
|
||||
device_key_requests_allowed: bool,
|
||||
) {
|
||||
self.spawn_request(
|
||||
request_id,
|
||||
"device/key/sign",
|
||||
device_key_requests_allowed,
|
||||
move |store| async move { sign_device_key(store, params).await },
|
||||
);
|
||||
) -> Result<DeviceKeySignResponse, JSONRPCErrorError> {
|
||||
let signature = self
|
||||
.store
|
||||
.sign(DeviceKeySignRequest {
|
||||
key_id: params.key_id,
|
||||
payload: payload_from_params(params.payload),
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(DeviceKeySignResponse {
|
||||
signature_der_base64: STANDARD.encode(signature.signature_der),
|
||||
signed_payload_base64: STANDARD.encode(signature.signed_payload),
|
||||
algorithm: algorithm_from_store(signature.algorithm),
|
||||
})
|
||||
}
|
||||
|
||||
fn spawn_request<R, F, Fut>(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
method: &'static str,
|
||||
device_key_requests_allowed: bool,
|
||||
run_request: F,
|
||||
) where
|
||||
R: Into<ClientResponsePayload> + Send + 'static,
|
||||
F: FnOnce(DeviceKeyStore) -> Fut + Send + 'static,
|
||||
Fut: Future<Output = Result<R, JSONRPCErrorError>> + Send + 'static,
|
||||
{
|
||||
let store = self.store.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
tokio::spawn(async move {
|
||||
let result = if !device_key_requests_allowed {
|
||||
Err(invalid_request(format!(
|
||||
"{method} is not available over remote transports"
|
||||
)))
|
||||
} else {
|
||||
run_request(store).await
|
||||
};
|
||||
outgoing.send_result(request_id, result).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_device_key(
|
||||
store: DeviceKeyStore,
|
||||
params: DeviceKeyCreateParams,
|
||||
) -> Result<DeviceKeyCreateResponse, JSONRPCErrorError> {
|
||||
let info = store
|
||||
.create(DeviceKeyCreateRequest {
|
||||
protection_policy: protection_policy_from_params(params.protection_policy),
|
||||
binding: DeviceKeyBinding {
|
||||
account_user_id: params.account_user_id,
|
||||
client_id: params.client_id,
|
||||
},
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(create_response_from_info(info))
|
||||
}
|
||||
|
||||
async fn public_device_key(
|
||||
store: DeviceKeyStore,
|
||||
params: DeviceKeyPublicParams,
|
||||
) -> Result<DeviceKeyPublicResponse, JSONRPCErrorError> {
|
||||
let info = store
|
||||
.get_public(DeviceKeyGetPublicRequest {
|
||||
key_id: params.key_id,
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(public_response_from_info(info))
|
||||
}
|
||||
|
||||
async fn sign_device_key(
|
||||
store: DeviceKeyStore,
|
||||
params: DeviceKeySignParams,
|
||||
) -> Result<DeviceKeySignResponse, JSONRPCErrorError> {
|
||||
let signature = store
|
||||
.sign(DeviceKeySignRequest {
|
||||
key_id: params.key_id,
|
||||
payload: payload_from_params(params.payload),
|
||||
})
|
||||
.await
|
||||
.map_err(map_device_key_error)?;
|
||||
Ok(DeviceKeySignResponse {
|
||||
signature_der_base64: STANDARD.encode(signature.signature_der),
|
||||
signed_payload_base64: STANDARD.encode(signature.signed_payload),
|
||||
algorithm: algorithm_from_store(signature.algorithm),
|
||||
})
|
||||
}
|
||||
|
||||
struct StateDeviceKeyBindingStore {
|
||||
@@ -1,22 +1,15 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config::external_agent_config::ExternalAgentConfigDetectOptions;
|
||||
use crate::config::external_agent_config::ExternalAgentConfigMigrationItem as CoreMigrationItem;
|
||||
use crate::config::external_agent_config::ExternalAgentConfigMigrationItemType as CoreMigrationItemType;
|
||||
use crate::config::external_agent_config::ExternalAgentConfigService;
|
||||
use crate::config::external_agent_config::NamedMigration as CoreNamedMigration;
|
||||
use crate::config::external_agent_config::PendingPluginImport;
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_params;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_app_server_protocol::CommandMigration;
|
||||
use codex_app_server_protocol::ExternalAgentConfigDetectParams;
|
||||
use codex_app_server_protocol::ExternalAgentConfigDetectResponse;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportCompletedNotification;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportParams;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportResponse;
|
||||
use codex_app_server_protocol::ExternalAgentConfigMigrationItem;
|
||||
use codex_app_server_protocol::ExternalAgentConfigMigrationItemType;
|
||||
use codex_app_server_protocol::HookMigration;
|
||||
@@ -24,55 +17,30 @@ use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::McpServerMigration;
|
||||
use codex_app_server_protocol::MigrationDetails;
|
||||
use codex_app_server_protocol::PluginsMigration;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_core::StartThreadOptions;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_app_server_protocol::SubagentMigration;
|
||||
use codex_external_agent_sessions::ExternalAgentSessionMigration as CoreSessionMigration;
|
||||
use codex_external_agent_sessions::ImportedExternalAgentSession;
|
||||
use codex_external_agent_sessions::PendingSessionImport;
|
||||
use codex_external_agent_sessions::prepare_validated_session_imports;
|
||||
use codex_external_agent_sessions::record_imported_session;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::Op;
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use super::ConfigRequestProcessor;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ExternalAgentConfigRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(crate) struct ExternalAgentConfigApi {
|
||||
codex_home: PathBuf,
|
||||
migration_service: ExternalAgentConfigService,
|
||||
session_import_permits: Arc<Semaphore>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config_manager: ConfigManager,
|
||||
config_processor: ConfigRequestProcessor,
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
}
|
||||
|
||||
impl ExternalAgentConfigRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config_manager: ConfigManager,
|
||||
config_processor: ConfigRequestProcessor,
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
codex_home: PathBuf,
|
||||
) -> Self {
|
||||
impl ExternalAgentConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
migration_service: ExternalAgentConfigService::new(codex_home.clone()),
|
||||
codex_home,
|
||||
session_import_permits: Arc::new(Semaphore::new(1)),
|
||||
thread_manager,
|
||||
config_manager,
|
||||
config_processor,
|
||||
arg0_paths,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,7 +123,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
subagents: details
|
||||
.subagents
|
||||
.into_iter()
|
||||
.map(|subagent| codex_app_server_protocol::SubagentMigration {
|
||||
.map(|subagent| SubagentMigration {
|
||||
name: subagent.name,
|
||||
})
|
||||
.collect(),
|
||||
@@ -170,164 +138,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn import(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ExternalAgentConfigImportParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let needs_runtime_refresh = migration_items_need_runtime_refresh(¶ms.migration_items);
|
||||
let has_migration_items = !params.migration_items.is_empty();
|
||||
let has_plugin_imports = params.migration_items.iter().any(|item| {
|
||||
matches!(
|
||||
item.item_type,
|
||||
ExternalAgentConfigMigrationItemType::Plugins
|
||||
)
|
||||
});
|
||||
let pending_session_imports = self.validate_pending_session_imports(¶ms)?;
|
||||
let pending_plugin_imports = self.import_external_agent_config(params).await?;
|
||||
if needs_runtime_refresh {
|
||||
self.config_processor.handle_config_mutation().await;
|
||||
}
|
||||
self.outgoing
|
||||
.send_response(request_id, ExternalAgentConfigImportResponse {})
|
||||
.await;
|
||||
|
||||
if !has_migration_items {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let has_background_imports =
|
||||
!pending_plugin_imports.is_empty() || !pending_session_imports.is_empty();
|
||||
if !has_background_imports {
|
||||
self.outgoing
|
||||
.send_server_notification(ServerNotification::ExternalAgentConfigImportCompleted(
|
||||
ExternalAgentConfigImportCompletedNotification {},
|
||||
))
|
||||
.await;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let session_import_permits = Arc::clone(&self.session_import_permits);
|
||||
let session_processor = self.clone();
|
||||
let plugin_processor = self.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let thread_manager = Arc::clone(&self.thread_manager);
|
||||
tokio::spawn(async move {
|
||||
let session_imports = async move {
|
||||
if !pending_session_imports.is_empty() {
|
||||
let Ok(_session_import_permit) = session_import_permits.acquire_owned().await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let pending_session_imports = session_processor
|
||||
.prepare_validated_session_imports(pending_session_imports);
|
||||
for pending_session_import in pending_session_imports {
|
||||
match session_processor
|
||||
.import_external_agent_session(pending_session_import.session)
|
||||
.await
|
||||
{
|
||||
Ok(imported_thread_id) => {
|
||||
session_processor.record_imported_session(
|
||||
&pending_session_import.source_path,
|
||||
imported_thread_id,
|
||||
);
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
error = %error.message,
|
||||
path = %pending_session_import.source_path.display(),
|
||||
"external agent session import failed"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let plugin_imports = async move {
|
||||
for pending_plugin_import in pending_plugin_imports {
|
||||
match plugin_processor
|
||||
.complete_pending_plugin_import(pending_plugin_import)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {}
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
error = %error.message,
|
||||
"external agent config plugin import failed"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
tokio::join!(session_imports, plugin_imports);
|
||||
if has_plugin_imports {
|
||||
thread_manager.plugins_manager().clear_cache();
|
||||
thread_manager.skills_manager().clear_cache();
|
||||
}
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ExternalAgentConfigImportCompleted(
|
||||
ExternalAgentConfigImportCompletedNotification {},
|
||||
))
|
||||
.await;
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn import_external_agent_session(
|
||||
&self,
|
||||
session: ImportedExternalAgentSession,
|
||||
) -> Result<ThreadId, JSONRPCErrorError> {
|
||||
let ImportedExternalAgentSession {
|
||||
cwd,
|
||||
title,
|
||||
rollout_items,
|
||||
} = session;
|
||||
let config = self
|
||||
.config_manager
|
||||
.load_with_overrides(
|
||||
/*request_overrides*/ None,
|
||||
ConfigOverrides {
|
||||
cwd: Some(PathBuf::from(cwd.to_string_lossy().into_owned())),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to load imported session config: {err}"))
|
||||
})?;
|
||||
let environments = self
|
||||
.thread_manager
|
||||
.default_environment_selections(&config.cwd);
|
||||
let imported_thread = self
|
||||
.thread_manager
|
||||
.start_thread_with_options(StartThreadOptions {
|
||||
config,
|
||||
initial_history: InitialHistory::Forked(rollout_items),
|
||||
session_source: None,
|
||||
dynamic_tools: Vec::new(),
|
||||
persist_extended_history: false,
|
||||
metrics_service_name: None,
|
||||
parent_trace: None,
|
||||
environments,
|
||||
})
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to import session: {err}")))?;
|
||||
if let Some(title) = title
|
||||
&& let Some(name) = codex_core::util::normalize_thread_name(&title)
|
||||
{
|
||||
imported_thread
|
||||
.thread
|
||||
.submit(Op::SetThreadName { name })
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to name imported session: {err}")))?;
|
||||
}
|
||||
Ok(imported_thread.thread_id)
|
||||
}
|
||||
|
||||
fn validate_pending_session_imports(
|
||||
pub(crate) fn validate_pending_session_imports(
|
||||
&self,
|
||||
params: &ExternalAgentConfigImportParams,
|
||||
) -> Result<Vec<CoreSessionMigration>, JSONRPCErrorError> {
|
||||
@@ -365,14 +176,22 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
Ok(selected_sessions)
|
||||
}
|
||||
|
||||
fn prepare_validated_session_imports(
|
||||
pub(crate) fn prepare_validated_session_imports(
|
||||
&self,
|
||||
sessions: Vec<CoreSessionMigration>,
|
||||
) -> Vec<PendingSessionImport> {
|
||||
prepare_validated_session_imports(&self.codex_home, sessions)
|
||||
}
|
||||
|
||||
fn record_imported_session(&self, source_path: &std::path::Path, imported_thread_id: ThreadId) {
|
||||
pub(crate) fn session_import_permits(&self) -> Arc<Semaphore> {
|
||||
Arc::clone(&self.session_import_permits)
|
||||
}
|
||||
|
||||
pub(crate) fn record_imported_session(
|
||||
&self,
|
||||
source_path: &std::path::Path,
|
||||
imported_thread_id: ThreadId,
|
||||
) {
|
||||
if let Err(err) = record_imported_session(&self.codex_home, source_path, imported_thread_id)
|
||||
{
|
||||
tracing::warn!(
|
||||
@@ -383,7 +202,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn import_external_agent_config(
|
||||
pub(crate) async fn import(
|
||||
&self,
|
||||
params: ExternalAgentConfigImportParams,
|
||||
) -> Result<Vec<PendingPluginImport>, JSONRPCErrorError> {
|
||||
@@ -478,7 +297,7 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
.map_err(|err| internal_error(err.to_string()))
|
||||
}
|
||||
|
||||
async fn complete_pending_plugin_import(
|
||||
pub(crate) async fn complete_pending_plugin_import(
|
||||
&self,
|
||||
pending_plugin_import: PendingPluginImport,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
@@ -493,27 +312,9 @@ impl ExternalAgentConfigRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn migration_items_need_runtime_refresh(items: &[ExternalAgentConfigMigrationItem]) -> bool {
|
||||
items.iter().any(|item| {
|
||||
matches!(
|
||||
item.item_type,
|
||||
ExternalAgentConfigMigrationItemType::Config
|
||||
| ExternalAgentConfigMigrationItemType::Skills
|
||||
| ExternalAgentConfigMigrationItemType::McpServerConfig
|
||||
| ExternalAgentConfigMigrationItemType::Hooks
|
||||
| ExternalAgentConfigMigrationItemType::Commands
|
||||
| ExternalAgentConfigMigrationItemType::Plugins
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn session_not_detected_error(path: &std::path::Path) -> JSONRPCErrorError {
|
||||
invalid_params(format!(
|
||||
"external agent session was not detected for import: {}",
|
||||
path.display()
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "external_agent_config_processor_tests.rs"]
|
||||
mod external_agent_config_processor_tests;
|
||||
@@ -1,7 +1,5 @@
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::fs_watch::FsWatchManager;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use base64::Engine;
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use codex_app_server_protocol::FsCopyParams;
|
||||
@@ -17,10 +15,6 @@ use codex_app_server_protocol::FsReadFileParams;
|
||||
use codex_app_server_protocol::FsReadFileResponse;
|
||||
use codex_app_server_protocol::FsRemoveParams;
|
||||
use codex_app_server_protocol::FsRemoveResponse;
|
||||
use codex_app_server_protocol::FsUnwatchParams;
|
||||
use codex_app_server_protocol::FsUnwatchResponse;
|
||||
use codex_app_server_protocol::FsWatchParams;
|
||||
use codex_app_server_protocol::FsWatchResponse;
|
||||
use codex_app_server_protocol::FsWriteFileParams;
|
||||
use codex_app_server_protocol::FsWriteFileResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
@@ -32,24 +26,13 @@ use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FsRequestProcessor {
|
||||
pub(crate) struct FsApi {
|
||||
file_system: Arc<dyn ExecutorFileSystem>,
|
||||
fs_watch_manager: FsWatchManager,
|
||||
}
|
||||
|
||||
impl FsRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
file_system: Arc<dyn ExecutorFileSystem>,
|
||||
fs_watch_manager: FsWatchManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
file_system,
|
||||
fs_watch_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn connection_closed(&self, connection_id: ConnectionId) {
|
||||
self.fs_watch_manager.connection_closed(connection_id).await;
|
||||
impl FsApi {
|
||||
pub(crate) fn new(file_system: Arc<dyn ExecutorFileSystem>) -> Self {
|
||||
Self { file_system }
|
||||
}
|
||||
|
||||
pub(crate) async fn read_file(
|
||||
@@ -173,25 +156,9 @@ impl FsRequestProcessor {
|
||||
.map_err(map_fs_error)?;
|
||||
Ok(FsCopyResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn watch(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
params: FsWatchParams,
|
||||
) -> Result<FsWatchResponse, JSONRPCErrorError> {
|
||||
self.fs_watch_manager.watch(connection_id, params).await
|
||||
}
|
||||
|
||||
pub(crate) async fn unwatch(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
params: FsUnwatchParams,
|
||||
) -> Result<FsUnwatchResponse, JSONRPCErrorError> {
|
||||
self.fs_watch_manager.unwatch(connection_id, params).await
|
||||
}
|
||||
}
|
||||
|
||||
fn map_fs_error(err: io::Error) -> JSONRPCErrorError {
|
||||
pub(crate) fn map_fs_error(err: io::Error) -> JSONRPCErrorError {
|
||||
if err.kind() == io::ErrorKind::InvalidInput {
|
||||
invalid_request(err.to_string())
|
||||
} else {
|
||||
@@ -804,7 +804,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn in_process_allows_device_key_requests_to_reach_device_key_processor() {
|
||||
async fn in_process_allows_device_key_requests_to_reach_device_key_api() {
|
||||
let client = start_test_client(SessionSource::Cli).await;
|
||||
const MALFORMED_KEY_ID_MESSAGE: &str = concat!(
|
||||
"invalid device key payload: keyId must be dk_hse_, dk_tpm_, or dk_osn_ ",
|
||||
|
||||
@@ -73,21 +73,25 @@ use tracing_subscriber::util::SubscriberInitExt;
|
||||
mod analytics_utils;
|
||||
mod app_server_tracing;
|
||||
mod bespoke_event_handling;
|
||||
mod codex_message_processor;
|
||||
mod command_exec;
|
||||
mod config;
|
||||
mod config_api;
|
||||
mod config_manager;
|
||||
mod config_manager_service;
|
||||
mod connection_rpc_gate;
|
||||
mod device_key_api;
|
||||
mod dynamic_tools;
|
||||
mod error_code;
|
||||
mod external_agent_config_api;
|
||||
mod filters;
|
||||
mod fs_api;
|
||||
mod fs_watch;
|
||||
mod fuzzy_file_search;
|
||||
pub mod in_process;
|
||||
mod message_processor;
|
||||
mod models;
|
||||
mod outgoing_message;
|
||||
mod request_processors;
|
||||
mod request_serialization;
|
||||
mod server_request_error;
|
||||
mod thread_state;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
use std::sync::atomic::Ordering;
|
||||
@@ -14,6 +15,7 @@ use codex_app_server_protocol::ServerRequestPayload;
|
||||
use codex_otel::span_w3c_trace_context;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::W3cTraceContext;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -24,17 +26,22 @@ use tracing::warn;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::server_request_error::TURN_TRANSITION_PENDING_REQUEST_ERROR_REASON;
|
||||
pub(crate) use codex_app_server_transport::ConnectionId;
|
||||
pub(crate) use codex_app_server_transport::OutgoingError;
|
||||
pub(crate) use codex_app_server_transport::OutgoingMessage;
|
||||
pub(crate) use codex_app_server_transport::OutgoingResponse;
|
||||
pub(crate) use codex_app_server_transport::QueuedOutgoingMessage;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_protocol::account::PlanType;
|
||||
|
||||
pub(crate) type ClientRequestResult = std::result::Result<Result, JSONRPCErrorError>;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub(crate) struct ConnectionId(pub(crate) u64);
|
||||
|
||||
impl fmt::Display for ConnectionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stable identifier for a client request scoped to a transport connection.
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
pub(crate) struct ConnectionRequestId {
|
||||
@@ -89,6 +96,21 @@ pub(crate) enum OutgoingEnvelope {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct QueuedOutgoingMessage {
|
||||
pub(crate) message: OutgoingMessage,
|
||||
pub(crate) write_complete_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl QueuedOutgoingMessage {
|
||||
pub(crate) fn new(message: OutgoingMessage) -> Self {
|
||||
Self {
|
||||
message,
|
||||
write_complete_tx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends messages to the client and manages request callbacks.
|
||||
pub(crate) struct OutgoingMessageSender {
|
||||
next_server_request_id: AtomicI64,
|
||||
@@ -643,6 +665,30 @@ impl OutgoingMessageSender {
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing message from the server to the client.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub(crate) enum OutgoingMessage {
|
||||
Request(ServerRequest),
|
||||
/// AppServerNotification is specific to the case where this is run as an
|
||||
/// "app server" as opposed to an MCP server.
|
||||
AppServerNotification(ServerNotification),
|
||||
Response(OutgoingResponse),
|
||||
Error(OutgoingError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub(crate) struct OutgoingResponse {
|
||||
pub id: RequestId,
|
||||
pub result: Result,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub(crate) struct OutgoingError {
|
||||
pub error: JSONRPCErrorError,
|
||||
pub id: RequestId,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -1,493 +0,0 @@
|
||||
use crate::bespoke_event_handling::apply_bespoke_event_handling;
|
||||
use crate::bespoke_event_handling::maybe_emit_hook_prompt_item_completed;
|
||||
use crate::command_exec::CommandExecManager;
|
||||
use crate::command_exec::StartCommandExecParams;
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::error_code::INPUT_TOO_LARGE_ERROR_CODE;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_PARAMS_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::invalid_params;
|
||||
use crate::models::supported_models;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use crate::outgoing_message::RequestContext;
|
||||
use crate::outgoing_message::ThreadScopedOutgoingMessageSender;
|
||||
use crate::thread_status::ThreadWatchManager;
|
||||
use crate::thread_status::resolve_thread_status;
|
||||
use chrono::DateTime;
|
||||
use chrono::Duration as ChronoDuration;
|
||||
use chrono::SecondsFormat;
|
||||
use chrono::Utc;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_analytics::AnalyticsJsonRpcError;
|
||||
use codex_analytics::InputError;
|
||||
use codex_analytics::TurnSteerRequestError;
|
||||
use codex_app_server_protocol::Account;
|
||||
use codex_app_server_protocol::AccountLoginCompletedNotification;
|
||||
use codex_app_server_protocol::AccountUpdatedNotification;
|
||||
use codex_app_server_protocol::AddCreditsNudgeCreditType;
|
||||
use codex_app_server_protocol::AddCreditsNudgeEmailStatus;
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::AppSummary;
|
||||
use codex_app_server_protocol::AppsListParams;
|
||||
use codex_app_server_protocol::AppsListResponse;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountResponse;
|
||||
use codex_app_server_protocol::CancelLoginAccountStatus;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::CodexErrorInfo;
|
||||
use codex_app_server_protocol::CollaborationModeListParams;
|
||||
use codex_app_server_protocol::CollaborationModeListResponse;
|
||||
use codex_app_server_protocol::CommandExecParams;
|
||||
use codex_app_server_protocol::CommandExecResizeParams;
|
||||
use codex_app_server_protocol::CommandExecTerminateParams;
|
||||
use codex_app_server_protocol::CommandExecWriteParams;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::ConversationGitInfo;
|
||||
use codex_app_server_protocol::ConversationSummary;
|
||||
use codex_app_server_protocol::DynamicToolSpec as ApiDynamicToolSpec;
|
||||
use codex_app_server_protocol::ExperimentalFeature as ApiExperimentalFeature;
|
||||
use codex_app_server_protocol::ExperimentalFeatureListParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureListResponse;
|
||||
use codex_app_server_protocol::ExperimentalFeatureStage as ApiExperimentalFeatureStage;
|
||||
use codex_app_server_protocol::FeedbackUploadParams;
|
||||
use codex_app_server_protocol::FeedbackUploadResponse;
|
||||
use codex_app_server_protocol::GetAccountParams;
|
||||
use codex_app_server_protocol::GetAccountRateLimitsResponse;
|
||||
use codex_app_server_protocol::GetAccountResponse;
|
||||
use codex_app_server_protocol::GetAuthStatusParams;
|
||||
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||
use codex_app_server_protocol::GetConversationSummaryParams;
|
||||
use codex_app_server_protocol::GetConversationSummaryResponse;
|
||||
use codex_app_server_protocol::GitDiffToRemoteParams;
|
||||
use codex_app_server_protocol::GitDiffToRemoteResponse;
|
||||
use codex_app_server_protocol::GitInfo as ApiGitInfo;
|
||||
use codex_app_server_protocol::HookMetadata;
|
||||
use codex_app_server_protocol::HooksListParams;
|
||||
use codex_app_server_protocol::HooksListResponse;
|
||||
use codex_app_server_protocol::InitializeParams;
|
||||
use codex_app_server_protocol::InitializeResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ListMcpServerStatusParams;
|
||||
use codex_app_server_protocol::ListMcpServerStatusResponse;
|
||||
use codex_app_server_protocol::LoginAccountParams;
|
||||
use codex_app_server_protocol::LoginAccountResponse;
|
||||
use codex_app_server_protocol::LoginApiKeyParams;
|
||||
use codex_app_server_protocol::LogoutAccountResponse;
|
||||
use codex_app_server_protocol::MarketplaceAddParams;
|
||||
use codex_app_server_protocol::MarketplaceAddResponse;
|
||||
use codex_app_server_protocol::MarketplaceInterface;
|
||||
use codex_app_server_protocol::MarketplaceRemoveParams;
|
||||
use codex_app_server_protocol::MarketplaceRemoveResponse;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeErrorInfo;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeParams;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeResponse;
|
||||
use codex_app_server_protocol::McpResourceReadParams;
|
||||
use codex_app_server_protocol::McpResourceReadResponse;
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::McpServerOauthLoginParams;
|
||||
use codex_app_server_protocol::McpServerOauthLoginResponse;
|
||||
use codex_app_server_protocol::McpServerRefreshResponse;
|
||||
use codex_app_server_protocol::McpServerStatus;
|
||||
use codex_app_server_protocol::McpServerStatusDetail;
|
||||
use codex_app_server_protocol::McpServerToolCallParams;
|
||||
use codex_app_server_protocol::McpServerToolCallResponse;
|
||||
use codex_app_server_protocol::MemoryResetResponse;
|
||||
use codex_app_server_protocol::MockExperimentalMethodParams;
|
||||
use codex_app_server_protocol::MockExperimentalMethodResponse;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::PermissionProfileModificationParams;
|
||||
use codex_app_server_protocol::PermissionProfileSelectionParams;
|
||||
use codex_app_server_protocol::PluginDetail;
|
||||
use codex_app_server_protocol::PluginInstallParams;
|
||||
use codex_app_server_protocol::PluginInstallResponse;
|
||||
use codex_app_server_protocol::PluginInterface;
|
||||
use codex_app_server_protocol::PluginListParams;
|
||||
use codex_app_server_protocol::PluginListResponse;
|
||||
use codex_app_server_protocol::PluginMarketplaceEntry;
|
||||
use codex_app_server_protocol::PluginReadParams;
|
||||
use codex_app_server_protocol::PluginReadResponse;
|
||||
use codex_app_server_protocol::PluginShareDeleteParams;
|
||||
use codex_app_server_protocol::PluginShareDeleteResponse;
|
||||
use codex_app_server_protocol::PluginShareListItem;
|
||||
use codex_app_server_protocol::PluginShareListParams;
|
||||
use codex_app_server_protocol::PluginShareListResponse;
|
||||
use codex_app_server_protocol::PluginShareSaveParams;
|
||||
use codex_app_server_protocol::PluginShareSaveResponse;
|
||||
use codex_app_server_protocol::PluginSkillReadParams;
|
||||
use codex_app_server_protocol::PluginSkillReadResponse;
|
||||
use codex_app_server_protocol::PluginSource;
|
||||
use codex_app_server_protocol::PluginSummary;
|
||||
use codex_app_server_protocol::PluginUninstallParams;
|
||||
use codex_app_server_protocol::PluginUninstallResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ReviewDelivery as ApiReviewDelivery;
|
||||
use codex_app_server_protocol::ReviewStartParams;
|
||||
use codex_app_server_protocol::ReviewStartResponse;
|
||||
use codex_app_server_protocol::ReviewTarget as ApiReviewTarget;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::SendAddCreditsNudgeEmailParams;
|
||||
use codex_app_server_protocol::SendAddCreditsNudgeEmailResponse;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequestResolvedNotification;
|
||||
use codex_app_server_protocol::SkillSummary;
|
||||
use codex_app_server_protocol::SkillsConfigWriteParams;
|
||||
use codex_app_server_protocol::SkillsConfigWriteResponse;
|
||||
use codex_app_server_protocol::SkillsListParams;
|
||||
use codex_app_server_protocol::SkillsListResponse;
|
||||
use codex_app_server_protocol::SortDirection;
|
||||
use codex_app_server_protocol::Thread;
|
||||
use codex_app_server_protocol::ThreadApproveGuardianDeniedActionParams;
|
||||
use codex_app_server_protocol::ThreadApproveGuardianDeniedActionResponse;
|
||||
use codex_app_server_protocol::ThreadArchiveParams;
|
||||
use codex_app_server_protocol::ThreadArchiveResponse;
|
||||
use codex_app_server_protocol::ThreadArchivedNotification;
|
||||
use codex_app_server_protocol::ThreadBackgroundTerminalsCleanParams;
|
||||
use codex_app_server_protocol::ThreadBackgroundTerminalsCleanResponse;
|
||||
use codex_app_server_protocol::ThreadClosedNotification;
|
||||
use codex_app_server_protocol::ThreadCompactStartParams;
|
||||
use codex_app_server_protocol::ThreadCompactStartResponse;
|
||||
use codex_app_server_protocol::ThreadDecrementElicitationParams;
|
||||
use codex_app_server_protocol::ThreadDecrementElicitationResponse;
|
||||
use codex_app_server_protocol::ThreadForkParams;
|
||||
use codex_app_server_protocol::ThreadForkResponse;
|
||||
use codex_app_server_protocol::ThreadGoal;
|
||||
use codex_app_server_protocol::ThreadGoalClearParams;
|
||||
use codex_app_server_protocol::ThreadGoalClearResponse;
|
||||
use codex_app_server_protocol::ThreadGoalClearedNotification;
|
||||
use codex_app_server_protocol::ThreadGoalGetParams;
|
||||
use codex_app_server_protocol::ThreadGoalGetResponse;
|
||||
use codex_app_server_protocol::ThreadGoalSetParams;
|
||||
use codex_app_server_protocol::ThreadGoalSetResponse;
|
||||
use codex_app_server_protocol::ThreadGoalStatus;
|
||||
use codex_app_server_protocol::ThreadGoalUpdatedNotification;
|
||||
use codex_app_server_protocol::ThreadHistoryBuilder;
|
||||
use codex_app_server_protocol::ThreadIncrementElicitationParams;
|
||||
use codex_app_server_protocol::ThreadIncrementElicitationResponse;
|
||||
use codex_app_server_protocol::ThreadInjectItemsParams;
|
||||
use codex_app_server_protocol::ThreadInjectItemsResponse;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadListCwdFilter;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
use codex_app_server_protocol::ThreadLoadedListParams;
|
||||
use codex_app_server_protocol::ThreadLoadedListResponse;
|
||||
use codex_app_server_protocol::ThreadMemoryModeSetParams;
|
||||
use codex_app_server_protocol::ThreadMemoryModeSetResponse;
|
||||
use codex_app_server_protocol::ThreadMetadataGitInfoUpdateParams;
|
||||
use codex_app_server_protocol::ThreadMetadataUpdateParams;
|
||||
use codex_app_server_protocol::ThreadMetadataUpdateResponse;
|
||||
use codex_app_server_protocol::ThreadNameUpdatedNotification;
|
||||
use codex_app_server_protocol::ThreadReadParams;
|
||||
use codex_app_server_protocol::ThreadReadResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendAudioParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendAudioResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendTextParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendTextResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeListVoicesResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartTransport;
|
||||
use codex_app_server_protocol::ThreadRealtimeStopParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeStopResponse;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
use codex_app_server_protocol::ThreadRollbackParams;
|
||||
use codex_app_server_protocol::ThreadSetNameParams;
|
||||
use codex_app_server_protocol::ThreadSetNameResponse;
|
||||
use codex_app_server_protocol::ThreadShellCommandParams;
|
||||
use codex_app_server_protocol::ThreadShellCommandResponse;
|
||||
use codex_app_server_protocol::ThreadSortKey;
|
||||
use codex_app_server_protocol::ThreadSourceKind;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::ThreadStartedNotification;
|
||||
use codex_app_server_protocol::ThreadStatus;
|
||||
use codex_app_server_protocol::ThreadTurnsListParams;
|
||||
use codex_app_server_protocol::ThreadTurnsListResponse;
|
||||
use codex_app_server_protocol::ThreadUnarchiveParams;
|
||||
use codex_app_server_protocol::ThreadUnarchiveResponse;
|
||||
use codex_app_server_protocol::ThreadUnarchivedNotification;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeParams;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeResponse;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeStatus;
|
||||
use codex_app_server_protocol::Turn;
|
||||
use codex_app_server_protocol::TurnEnvironmentParams;
|
||||
use codex_app_server_protocol::TurnError;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnInterruptResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::TurnSteerParams;
|
||||
use codex_app_server_protocol::TurnSteerResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupCompletedNotification;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupMode;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartParams;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartResponse;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_backend_client::AddCreditsNudgeCreditType as BackendAddCreditsNudgeCreditType;
|
||||
use codex_backend_client::Client as BackendClient;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_chatgpt::workspace_settings;
|
||||
use codex_config::CloudRequirementsLoadError;
|
||||
use codex_config::CloudRequirementsLoadErrorCode;
|
||||
use codex_config::ConfigLayerStack;
|
||||
use codex_config::loader::project_trust_key;
|
||||
use codex_config::types::McpServerTransportConfig;
|
||||
use codex_core::CodexThread;
|
||||
use codex_core::CodexThreadTurnContextOverrides;
|
||||
use codex_core::ForkSnapshot;
|
||||
use codex_core::NewThread;
|
||||
use codex_core::RolloutRecorder;
|
||||
use codex_core::SessionMeta;
|
||||
use codex_core::StartThreadOptions;
|
||||
use codex_core::SteerInputError;
|
||||
use codex_core::ThreadConfigSnapshot;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::NetworkProxyAuditMetadata;
|
||||
use codex_core::config::edit::ConfigEdit;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::exec::ExecCapturePolicy;
|
||||
use codex_core::exec::ExecExpiration;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec_env::create_env;
|
||||
use codex_core::find_archived_thread_path_by_id_str;
|
||||
use codex_core::find_thread_name_by_id;
|
||||
use codex_core::find_thread_path_by_id_str;
|
||||
use codex_core::path_utils;
|
||||
use codex_core::read_head_for_summary;
|
||||
use codex_core::sandboxing::SandboxPermissions;
|
||||
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
|
||||
use codex_core::windows_sandbox::WindowsSandboxSetupMode as CoreWindowsSandboxSetupMode;
|
||||
use codex_core::windows_sandbox::WindowsSandboxSetupRequest;
|
||||
use codex_core_plugins::OPENAI_CURATED_MARKETPLACE_NAME;
|
||||
use codex_core_plugins::PluginInstallError as CorePluginInstallError;
|
||||
use codex_core_plugins::PluginInstallRequest;
|
||||
use codex_core_plugins::PluginLoadOutcome;
|
||||
use codex_core_plugins::PluginReadRequest;
|
||||
use codex_core_plugins::PluginUninstallError as CorePluginUninstallError;
|
||||
use codex_core_plugins::loader::load_plugin_apps;
|
||||
use codex_core_plugins::loader::load_plugin_mcp_servers;
|
||||
use codex_core_plugins::loader::plugin_telemetry_metadata_from_root;
|
||||
use codex_core_plugins::manifest::PluginManifestInterface;
|
||||
use codex_core_plugins::marketplace::MarketplaceError;
|
||||
use codex_core_plugins::marketplace::MarketplacePluginSource;
|
||||
use codex_core_plugins::marketplace_add::MarketplaceAddError;
|
||||
use codex_core_plugins::marketplace_add::MarketplaceAddRequest;
|
||||
use codex_core_plugins::marketplace_add::add_marketplace as add_marketplace_to_codex_home;
|
||||
use codex_core_plugins::marketplace_remove::MarketplaceRemoveError;
|
||||
use codex_core_plugins::marketplace_remove::MarketplaceRemoveRequest as CoreMarketplaceRemoveRequest;
|
||||
use codex_core_plugins::marketplace_remove::remove_marketplace;
|
||||
use codex_core_plugins::remote::RemoteMarketplace;
|
||||
use codex_core_plugins::remote::RemotePluginCatalogError;
|
||||
use codex_core_plugins::remote::RemotePluginDetail as RemoteCatalogPluginDetail;
|
||||
use codex_core_plugins::remote::RemotePluginServiceConfig;
|
||||
use codex_core_plugins::remote::RemotePluginShareSummary as RemoteCatalogPluginShareSummary;
|
||||
use codex_core_plugins::remote::RemotePluginSummary as RemoteCatalogPluginSummary;
|
||||
use codex_exec_server::EnvironmentManager;
|
||||
use codex_exec_server::LOCAL_FS;
|
||||
use codex_features::FEATURES;
|
||||
use codex_features::Feature;
|
||||
use codex_features::Stage;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_feedback::FeedbackAttachmentPath;
|
||||
use codex_feedback::FeedbackUploadOptions;
|
||||
use codex_git_utils::git_diff_to_remote;
|
||||
use codex_git_utils::resolve_root_git_project_for_trust;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CLIENT_ID;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_login::ServerOptions as LoginServerOptions;
|
||||
use codex_login::ShutdownHandle;
|
||||
use codex_login::auth::login_with_chatgpt_auth_tokens;
|
||||
use codex_login::complete_device_code_login;
|
||||
use codex_login::login_with_api_key;
|
||||
use codex_login::request_device_code;
|
||||
use codex_login::run_login_server;
|
||||
use codex_mcp::McpRuntimeEnvironment;
|
||||
use codex_mcp::McpServerStatusSnapshot;
|
||||
use codex_mcp::McpSnapshotDetail;
|
||||
use codex_mcp::collect_mcp_server_status_snapshot_with_detail;
|
||||
use codex_mcp::discover_supported_scopes;
|
||||
use codex_mcp::effective_mcp_servers;
|
||||
use codex_mcp::read_mcp_resource as read_mcp_resource_without_thread;
|
||||
use codex_mcp::resolve_oauth_scopes;
|
||||
use codex_memories_write::clear_memory_roots_contents;
|
||||
use codex_model_provider::ProviderAccountError;
|
||||
use codex_model_provider::create_model_provider;
|
||||
use codex_models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::CollaborationMode;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::Personality;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::WindowsSandboxLevel;
|
||||
use codex_protocol::dynamic_tools::DynamicToolSpec as CoreDynamicToolSpec;
|
||||
use codex_protocol::error::CodexErr;
|
||||
use codex_protocol::error::Result as CodexResult;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::permissions::FileSystemSandboxPolicy;
|
||||
use codex_protocol::protocol::AgentStatus;
|
||||
use codex_protocol::protocol::ConversationAudioParams;
|
||||
use codex_protocol::protocol::ConversationStartParams;
|
||||
use codex_protocol::protocol::ConversationStartTransport;
|
||||
use codex_protocol::protocol::ConversationTextParams;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::McpAuthStatus as CoreMcpAuthStatus;
|
||||
use codex_protocol::protocol::McpServerRefreshConfig;
|
||||
use codex_protocol::protocol::Op;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RealtimeVoicesList;
|
||||
use codex_protocol::protocol::ResumedHistory;
|
||||
use codex_protocol::protocol::ReviewDelivery as CoreReviewDelivery;
|
||||
use codex_protocol::protocol::ReviewRequest;
|
||||
use codex_protocol::protocol::ReviewTarget as CoreReviewTarget;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::TurnEnvironmentSelection;
|
||||
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||
use codex_protocol::protocol::W3cTraceContext;
|
||||
use codex_protocol::user_input::MAX_USER_INPUT_TEXT_CHARS;
|
||||
use codex_protocol::user_input::UserInput as CoreInputItem;
|
||||
use codex_rmcp_client::perform_oauth_login_return_url;
|
||||
use codex_rollout::EventPersistenceMode;
|
||||
use codex_rollout::is_persisted_rollout_item;
|
||||
use codex_rollout::state_db::StateDbHandle;
|
||||
use codex_rollout::state_db::get_state_db;
|
||||
use codex_rollout::state_db::reconcile_rollout;
|
||||
use codex_state::StateRuntime;
|
||||
use codex_state::ThreadMetadata;
|
||||
use codex_state::ThreadMetadataBuilder;
|
||||
use codex_state::log_db::LogDbLayer;
|
||||
use codex_thread_store::ArchiveThreadParams as StoreArchiveThreadParams;
|
||||
use codex_thread_store::ListThreadsParams as StoreListThreadsParams;
|
||||
use codex_thread_store::LocalThreadStore;
|
||||
use codex_thread_store::ReadThreadByRolloutPathParams as StoreReadThreadByRolloutPathParams;
|
||||
use codex_thread_store::ReadThreadParams as StoreReadThreadParams;
|
||||
use codex_thread_store::SortDirection as StoreSortDirection;
|
||||
use codex_thread_store::StoredThread;
|
||||
use codex_thread_store::ThreadMetadataPatch as StoreThreadMetadataPatch;
|
||||
use codex_thread_store::ThreadSortKey as StoreThreadSortKey;
|
||||
use codex_thread_store::ThreadStore;
|
||||
use codex_thread_store::ThreadStoreError;
|
||||
use codex_thread_store::UpdateThreadMetadataParams as StoreUpdateThreadMetadataParams;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_pty::DEFAULT_OUTPUT_BYTES_CAP;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::io::Error as IoError;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::result::Result;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::sync::SemaphorePermit;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::watch;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tokio_util::task::TaskTracker;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Instrument;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
|
||||
mod account_processor;
|
||||
mod apps_processor;
|
||||
mod catalog_processor;
|
||||
mod command_exec_processor;
|
||||
mod config_processor;
|
||||
mod device_key_processor;
|
||||
mod external_agent_config_processor;
|
||||
mod feedback_processor;
|
||||
mod fs_processor;
|
||||
mod git_processor;
|
||||
mod initialize_processor;
|
||||
mod marketplace_processor;
|
||||
mod mcp_processor;
|
||||
mod plugins;
|
||||
mod search;
|
||||
mod thread_processor;
|
||||
mod token_usage_replay;
|
||||
mod turn_processor;
|
||||
mod windows_sandbox_processor;
|
||||
|
||||
pub(crate) use account_processor::AccountRequestProcessor;
|
||||
pub(crate) use apps_processor::AppsRequestProcessor;
|
||||
pub(crate) use catalog_processor::CatalogRequestProcessor;
|
||||
pub(crate) use command_exec_processor::CommandExecRequestProcessor;
|
||||
pub(crate) use config_processor::ConfigRequestProcessor;
|
||||
pub(crate) use device_key_processor::DeviceKeyRequestProcessor;
|
||||
pub(crate) use external_agent_config_processor::ExternalAgentConfigRequestProcessor;
|
||||
pub(crate) use feedback_processor::FeedbackRequestProcessor;
|
||||
pub(crate) use fs_processor::FsRequestProcessor;
|
||||
pub(crate) use git_processor::GitRequestProcessor;
|
||||
pub(crate) use initialize_processor::InitializeRequestProcessor;
|
||||
pub(crate) use marketplace_processor::MarketplaceRequestProcessor;
|
||||
pub(crate) use mcp_processor::McpRequestProcessor;
|
||||
pub(crate) use plugins::PluginRequestProcessor;
|
||||
pub(crate) use search::SearchRequestProcessor;
|
||||
pub(crate) use thread_goal_processor::ThreadGoalRequestProcessor;
|
||||
pub(crate) use thread_processor::ThreadRequestProcessor;
|
||||
pub(crate) use turn_processor::TurnRequestProcessor;
|
||||
pub(crate) use windows_sandbox_processor::WindowsSandboxRequestProcessor;
|
||||
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::filters::compute_source_filters;
|
||||
use crate::filters::source_kind_matches;
|
||||
use crate::thread_state::ThreadListenerCommand;
|
||||
use crate::thread_state::ThreadState;
|
||||
use crate::thread_state::ThreadStateManager;
|
||||
use token_usage_replay::latest_token_usage_turn_id_from_rollout_items;
|
||||
use token_usage_replay::send_thread_token_usage_update_to_connection;
|
||||
|
||||
mod config_errors;
|
||||
mod request_errors;
|
||||
mod thread_goal_processor;
|
||||
mod thread_lifecycle;
|
||||
mod thread_summary;
|
||||
|
||||
use self::config_errors::*;
|
||||
use self::request_errors::*;
|
||||
use self::thread_goal_processor::api_thread_goal_from_state;
|
||||
use self::thread_lifecycle::*;
|
||||
use self::thread_summary::*;
|
||||
|
||||
pub(crate) use self::thread_summary::read_rollout_items_from_rollout;
|
||||
pub(crate) use self::thread_summary::read_summary_from_rollout;
|
||||
pub(crate) use self::thread_summary::summary_to_thread;
|
||||
|
||||
pub(crate) fn build_api_turns_from_rollout_items(items: &[RolloutItem]) -> Vec<Turn> {
|
||||
let mut builder = ThreadHistoryBuilder::new();
|
||||
for item in items {
|
||||
if is_persisted_rollout_item(item, EventPersistenceMode::Limited) {
|
||||
builder.handle_rollout_item(item);
|
||||
}
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,350 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AppsRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
impl AppsRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn apps_list(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.apps_list_inner(request_id, params)
|
||||
.await
|
||||
.map(|response| response.map(Into::into))
|
||||
}
|
||||
|
||||
async fn apps_list_inner(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
) -> Result<Option<AppsListResponse>, JSONRPCErrorError> {
|
||||
let mut config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
|
||||
if let Some(thread_id) = params.thread_id.as_deref() {
|
||||
let (_, thread) = self.load_thread(thread_id).await?;
|
||||
|
||||
let _ = config
|
||||
.features
|
||||
.set_enabled(Feature::Apps, thread.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
if !config
|
||||
.features
|
||||
.apps_enabled_for_auth(auth.as_ref().is_some_and(CodexAuth::uses_codex_backend))
|
||||
{
|
||||
return Ok(Some(AppsListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
}));
|
||||
}
|
||||
|
||||
if !self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await
|
||||
{
|
||||
return Ok(Some(AppsListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
}));
|
||||
}
|
||||
|
||||
let request = request_id.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
tokio::spawn(async move {
|
||||
Self::apps_list_task(outgoing, request, params, config, environment_manager).await;
|
||||
});
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn apps_list_task(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
config: Config,
|
||||
environment_manager: Arc<EnvironmentManager>,
|
||||
) {
|
||||
let result = Self::apps_list_response(&outgoing, params, config, environment_manager).await;
|
||||
outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn apps_list_response(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
params: AppsListParams,
|
||||
config: Config,
|
||||
environment_manager: Arc<EnvironmentManager>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let AppsListParams {
|
||||
cursor,
|
||||
limit,
|
||||
thread_id: _,
|
||||
force_refetch,
|
||||
} = params;
|
||||
let start = match cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
let (mut accessible_connectors, mut all_connectors) = tokio::join!(
|
||||
connectors::list_cached_accessible_connectors_from_mcp_tools(&config),
|
||||
connectors::list_cached_all_connectors(&config)
|
||||
);
|
||||
let cached_all_connectors = all_connectors.clone();
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
|
||||
let accessible_config = config.clone();
|
||||
let accessible_tx = tx.clone();
|
||||
tokio::spawn(async move {
|
||||
let result =
|
||||
connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
&accessible_config,
|
||||
force_refetch,
|
||||
&environment_manager,
|
||||
)
|
||||
.await
|
||||
.map(|status| status.connectors)
|
||||
.map_err(|err| format!("failed to load accessible apps: {err}"));
|
||||
let _ = accessible_tx.send(AppListLoadResult::Accessible(result));
|
||||
});
|
||||
|
||||
let all_config = config.clone();
|
||||
tokio::spawn(async move {
|
||||
let result = connectors::list_all_connectors_with_options(&all_config, force_refetch)
|
||||
.await
|
||||
.map_err(|err| format!("failed to list apps: {err}"));
|
||||
let _ = tx.send(AppListLoadResult::Directory(result));
|
||||
});
|
||||
|
||||
let app_list_deadline = tokio::time::Instant::now() + APP_LIST_LOAD_TIMEOUT;
|
||||
let mut accessible_loaded = false;
|
||||
let mut all_loaded = false;
|
||||
let mut last_notified_apps = None;
|
||||
|
||||
if accessible_connectors.is_some() || all_connectors.is_some() {
|
||||
let merged = connectors::with_app_enabled_state(
|
||||
merge_loaded_apps(all_connectors.as_deref(), accessible_connectors.as_deref()),
|
||||
&config,
|
||||
);
|
||||
if should_send_app_list_updated_notification(
|
||||
merged.as_slice(),
|
||||
accessible_loaded,
|
||||
all_loaded,
|
||||
) {
|
||||
send_app_list_updated_notification(outgoing, merged.clone()).await;
|
||||
last_notified_apps = Some(merged);
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
let result = match tokio::time::timeout_at(app_list_deadline, rx.recv()).await {
|
||||
Ok(Some(result)) => result,
|
||||
Ok(None) => {
|
||||
return Err(internal_error("failed to load app lists"));
|
||||
}
|
||||
Err(_) => {
|
||||
let timeout_seconds = APP_LIST_LOAD_TIMEOUT.as_secs();
|
||||
return Err(internal_error(format!(
|
||||
"timed out waiting for app lists after {timeout_seconds} seconds"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
AppListLoadResult::Accessible(Ok(connectors)) => {
|
||||
accessible_connectors = Some(connectors);
|
||||
accessible_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Accessible(Err(err)) => {
|
||||
return Err(internal_error(err));
|
||||
}
|
||||
AppListLoadResult::Directory(Ok(connectors)) => {
|
||||
all_connectors = Some(connectors);
|
||||
all_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Directory(Err(err)) => {
|
||||
return Err(internal_error(err));
|
||||
}
|
||||
}
|
||||
|
||||
let showing_interim_force_refetch = force_refetch && !(accessible_loaded && all_loaded);
|
||||
let all_connectors_for_update =
|
||||
if showing_interim_force_refetch && cached_all_connectors.is_some() {
|
||||
cached_all_connectors.as_deref()
|
||||
} else {
|
||||
all_connectors.as_deref()
|
||||
};
|
||||
let accessible_connectors_for_update =
|
||||
if showing_interim_force_refetch && !accessible_loaded {
|
||||
None
|
||||
} else {
|
||||
accessible_connectors.as_deref()
|
||||
};
|
||||
let merged = connectors::with_app_enabled_state(
|
||||
merge_loaded_apps(all_connectors_for_update, accessible_connectors_for_update),
|
||||
&config,
|
||||
);
|
||||
if should_send_app_list_updated_notification(
|
||||
merged.as_slice(),
|
||||
accessible_loaded,
|
||||
all_loaded,
|
||||
) && last_notified_apps.as_ref() != Some(&merged)
|
||||
{
|
||||
send_app_list_updated_notification(outgoing, merged.clone()).await;
|
||||
last_notified_apps = Some(merged.clone());
|
||||
}
|
||||
|
||||
if accessible_loaded && all_loaded {
|
||||
return paginate_apps(merged.as_slice(), start, limit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_thread(
|
||||
&self,
|
||||
thread_id: &str,
|
||||
) -> Result<(ThreadId, Arc<CodexThread>), JSONRPCErrorError> {
|
||||
let thread_id = ThreadId::from_string(thread_id).map_err(|err| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid thread id: {err}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
let thread = self
|
||||
.thread_manager
|
||||
.get_thread(thread_id)
|
||||
.await
|
||||
.map_err(|_| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("thread not found: {thread_id}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
Ok((thread_id, thread))
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const APP_LIST_LOAD_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
|
||||
enum AppListLoadResult {
|
||||
Accessible(Result<Vec<AppInfo>, String>),
|
||||
Directory(Result<Vec<AppInfo>, String>),
|
||||
}
|
||||
|
||||
fn merge_loaded_apps(
|
||||
all_connectors: Option<&[AppInfo]>,
|
||||
accessible_connectors: Option<&[AppInfo]>,
|
||||
) -> Vec<AppInfo> {
|
||||
let all_connectors_loaded = all_connectors.is_some();
|
||||
let all = all_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
let accessible = accessible_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
connectors::merge_connectors_with_accessible(all, accessible, all_connectors_loaded)
|
||||
}
|
||||
|
||||
fn should_send_app_list_updated_notification(
|
||||
connectors: &[AppInfo],
|
||||
accessible_loaded: bool,
|
||||
all_loaded: bool,
|
||||
) -> bool {
|
||||
connectors.iter().any(|connector| connector.is_accessible) || (accessible_loaded && all_loaded)
|
||||
}
|
||||
|
||||
fn paginate_apps(
|
||||
connectors: &[AppInfo],
|
||||
start: usize,
|
||||
limit: Option<u32>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let total = connectors.len();
|
||||
if start > total {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("cursor {start} exceeds total apps {total}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = connectors[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(AppsListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn send_app_list_updated_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
data: Vec<AppInfo>,
|
||||
) {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
}
|
||||
@@ -1,600 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CatalogRequestProcessor {
|
||||
pub(super) auth_manager: Arc<AuthManager>,
|
||||
pub(super) thread_manager: Arc<ThreadManager>,
|
||||
pub(super) config: Arc<Config>,
|
||||
pub(super) config_manager: ConfigManager,
|
||||
pub(super) workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
fn skills_to_info(
|
||||
skills: &[codex_core::skills::SkillMetadata],
|
||||
disabled_paths: &HashSet<AbsolutePathBuf>,
|
||||
) -> Vec<codex_app_server_protocol::SkillMetadata> {
|
||||
skills
|
||||
.iter()
|
||||
.map(|skill| {
|
||||
let enabled = !disabled_paths.contains(&skill.path_to_skills_md);
|
||||
codex_app_server_protocol::SkillMetadata {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
short_description: skill.short_description.clone(),
|
||||
interface: skill.interface.clone().map(|interface| {
|
||||
codex_app_server_protocol::SkillInterface {
|
||||
display_name: interface.display_name,
|
||||
short_description: interface.short_description,
|
||||
icon_small: interface.icon_small,
|
||||
icon_large: interface.icon_large,
|
||||
brand_color: interface.brand_color,
|
||||
default_prompt: interface.default_prompt,
|
||||
}
|
||||
}),
|
||||
dependencies: skill.dependencies.clone().map(|dependencies| {
|
||||
codex_app_server_protocol::SkillDependencies {
|
||||
tools: dependencies
|
||||
.tools
|
||||
.into_iter()
|
||||
.map(|tool| codex_app_server_protocol::SkillToolDependency {
|
||||
r#type: tool.r#type,
|
||||
value: tool.value,
|
||||
description: tool.description,
|
||||
transport: tool.transport,
|
||||
command: tool.command,
|
||||
url: tool.url,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}),
|
||||
path: skill.path_to_skills_md.clone(),
|
||||
scope: skill.scope.into(),
|
||||
enabled,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn hooks_to_info(hooks: &[codex_hooks::HookListEntry]) -> Vec<HookMetadata> {
|
||||
hooks
|
||||
.iter()
|
||||
.map(|hook| HookMetadata {
|
||||
key: hook.key.clone(),
|
||||
event_name: hook.event_name.into(),
|
||||
handler_type: hook.handler_type.into(),
|
||||
matcher: hook.matcher.clone(),
|
||||
command: hook.command.clone(),
|
||||
timeout_sec: hook.timeout_sec,
|
||||
status_message: hook.status_message.clone(),
|
||||
source_path: hook.source_path.clone(),
|
||||
source: hook.source.into(),
|
||||
plugin_id: hook.plugin_id.clone(),
|
||||
display_order: hook.display_order,
|
||||
enabled: hook.enabled,
|
||||
is_managed: hook.is_managed,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn errors_to_info(
|
||||
errors: &[codex_core::skills::SkillError],
|
||||
) -> Vec<codex_app_server_protocol::SkillErrorInfo> {
|
||||
errors
|
||||
.iter()
|
||||
.map(|err| codex_app_server_protocol::SkillErrorInfo {
|
||||
path: err.path.to_path_buf(),
|
||||
message: err.message.clone(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
impl CatalogRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
config,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn skills_list(
|
||||
&self,
|
||||
params: SkillsListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.skills_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn hooks_list(
|
||||
&self,
|
||||
params: HooksListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.hooks_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn skills_config_write(
|
||||
&self,
|
||||
params: SkillsConfigWriteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.skills_config_write_response_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn model_list(
|
||||
&self,
|
||||
params: ModelListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
Self::list_models(self.thread_manager.clone(), params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn experimental_feature_list(
|
||||
&self,
|
||||
params: ExperimentalFeatureListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.experimental_feature_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn collaboration_mode_list(
|
||||
&self,
|
||||
params: CollaborationModeListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
Self::list_collaboration_modes(self.thread_manager.clone(), params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn mock_experimental_method(
|
||||
&self,
|
||||
params: MockExperimentalMethodParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.mock_experimental_method_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn resolve_cwd_config(
|
||||
&self,
|
||||
cwd: &Path,
|
||||
) -> Result<(AbsolutePathBuf, ConfigLayerStack), String> {
|
||||
let cwd_abs =
|
||||
AbsolutePathBuf::relative_to_current_dir(cwd).map_err(|err| err.to_string())?;
|
||||
let config_layer_stack = self
|
||||
.config_manager
|
||||
.load_config_layers_for_cwd(cwd_abs.clone())
|
||||
.await
|
||||
.map_err(|err| err.to_string())?;
|
||||
|
||||
Ok((cwd_abs, config_layer_stack))
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_models(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
params: ModelListParams,
|
||||
) -> Result<ModelListResponse, JSONRPCErrorError> {
|
||||
let ModelListParams {
|
||||
limit,
|
||||
cursor,
|
||||
include_hidden,
|
||||
} = params;
|
||||
let models = supported_models(thread_manager, include_hidden.unwrap_or(false)).await;
|
||||
let total = models.len();
|
||||
|
||||
if total == 0 {
|
||||
return Ok(ModelListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = effective_limit.min(total);
|
||||
let start = match cursor {
|
||||
Some(cursor) => cursor
|
||||
.parse::<usize>()
|
||||
.map_err(|_| invalid_request(format!("invalid cursor: {cursor}")))?,
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total models {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let items = models[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(ModelListResponse {
|
||||
data: items,
|
||||
next_cursor,
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_collaboration_modes(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
params: CollaborationModeListParams,
|
||||
) -> Result<CollaborationModeListResponse, JSONRPCErrorError> {
|
||||
let CollaborationModeListParams {} = params;
|
||||
let items = thread_manager
|
||||
.list_collaboration_modes()
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
let response = CollaborationModeListResponse { data: items };
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn experimental_feature_list_response(
|
||||
&self,
|
||||
params: ExperimentalFeatureListParams,
|
||||
) -> Result<ExperimentalFeatureListResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureListParams { cursor, limit } = params;
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
|
||||
let data = FEATURES
|
||||
.iter()
|
||||
.map(|spec| {
|
||||
let (stage, display_name, description, announcement) = match spec.stage {
|
||||
Stage::Experimental {
|
||||
name,
|
||||
menu_description,
|
||||
announcement,
|
||||
} => (
|
||||
ApiExperimentalFeatureStage::Beta,
|
||||
Some(name.to_string()),
|
||||
Some(menu_description.to_string()),
|
||||
Some(announcement.to_string()),
|
||||
),
|
||||
Stage::UnderDevelopment => (
|
||||
ApiExperimentalFeatureStage::UnderDevelopment,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
Stage::Stable => (ApiExperimentalFeatureStage::Stable, None, None, None),
|
||||
Stage::Deprecated => {
|
||||
(ApiExperimentalFeatureStage::Deprecated, None, None, None)
|
||||
}
|
||||
Stage::Removed => (ApiExperimentalFeatureStage::Removed, None, None, None),
|
||||
};
|
||||
|
||||
ApiExperimentalFeature {
|
||||
name: spec.key.to_string(),
|
||||
stage,
|
||||
display_name,
|
||||
description,
|
||||
announcement,
|
||||
enabled: config.features.enabled(spec.id)
|
||||
&& (workspace_codex_plugins_enabled
|
||||
|| !matches!(spec.id, Feature::Apps | Feature::Plugins)),
|
||||
default_enabled: spec.default_enabled,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let total = data.len();
|
||||
if total == 0 {
|
||||
return Ok(ExperimentalFeatureListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Clamp to 1 so limit=0 cannot return a non-advancing page.
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = effective_limit.min(total);
|
||||
let start = match cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total feature flags {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = data[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ExperimentalFeatureListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn mock_experimental_method_inner(
|
||||
&self,
|
||||
params: MockExperimentalMethodParams,
|
||||
) -> Result<MockExperimentalMethodResponse, JSONRPCErrorError> {
|
||||
let MockExperimentalMethodParams { value } = params;
|
||||
let response = MockExperimentalMethodResponse { echoed: value };
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn skills_list_response(
|
||||
&self,
|
||||
params: SkillsListParams,
|
||||
) -> Result<SkillsListResponse, JSONRPCErrorError> {
|
||||
let SkillsListParams {
|
||||
cwds,
|
||||
force_reload,
|
||||
per_cwd_extra_user_roots,
|
||||
} = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.to_path_buf()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
let cwd_set: HashSet<PathBuf> = cwds.iter().cloned().collect();
|
||||
|
||||
let mut extra_roots_by_cwd: HashMap<PathBuf, Vec<AbsolutePathBuf>> = HashMap::new();
|
||||
for entry in per_cwd_extra_user_roots.unwrap_or_default() {
|
||||
if !cwd_set.contains(&entry.cwd) {
|
||||
warn!(
|
||||
cwd = %entry.cwd.display(),
|
||||
"ignoring per-cwd extra roots for cwd not present in skills/list cwds"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut valid_extra_roots = Vec::new();
|
||||
for root in entry.extra_user_roots {
|
||||
let root =
|
||||
AbsolutePathBuf::from_absolute_path_checked(root.as_path()).map_err(|_| {
|
||||
invalid_request(format!(
|
||||
"skills/list perCwdExtraUserRoots extraUserRoots paths must be absolute: {}",
|
||||
root.display()
|
||||
))
|
||||
})?;
|
||||
valid_extra_roots.push(root);
|
||||
}
|
||||
extra_roots_by_cwd
|
||||
.entry(entry.cwd)
|
||||
.or_default()
|
||||
.extend(valid_extra_roots);
|
||||
}
|
||||
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
let skills_manager = self.thread_manager.skills_manager();
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let fs = self
|
||||
.thread_manager
|
||||
.environment_manager()
|
||||
.default_environment()
|
||||
.map(|environment| environment.get_filesystem());
|
||||
let mut data = Vec::new();
|
||||
for cwd in cwds {
|
||||
let (cwd_abs, config_layer_stack) = match self.resolve_cwd_config(&cwd).await {
|
||||
Ok(resolved) => resolved,
|
||||
Err(message) => {
|
||||
let error_path = cwd.clone();
|
||||
data.push(codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills: Vec::new(),
|
||||
errors: vec![codex_app_server_protocol::SkillErrorInfo {
|
||||
path: error_path,
|
||||
message,
|
||||
}],
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let extra_roots = extra_roots_by_cwd
|
||||
.get(&cwd)
|
||||
.map_or(&[][..], std::vec::Vec::as_slice);
|
||||
let effective_skill_roots = if workspace_codex_plugins_enabled {
|
||||
let plugins_input = config.plugins_config_input();
|
||||
plugins_manager
|
||||
.effective_skill_roots_for_layer_stack(&config_layer_stack, &plugins_input)
|
||||
.await
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let skills_input = codex_core::skills::SkillsLoadInput::new(
|
||||
cwd_abs.clone(),
|
||||
effective_skill_roots,
|
||||
config_layer_stack,
|
||||
config.bundled_skills_enabled(),
|
||||
);
|
||||
let outcome = skills_manager
|
||||
.skills_for_cwd_with_extra_user_roots(
|
||||
&skills_input,
|
||||
force_reload,
|
||||
extra_roots,
|
||||
fs.clone(),
|
||||
)
|
||||
.await;
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills, &outcome.disabled_paths);
|
||||
data.push(codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
});
|
||||
}
|
||||
Ok(SkillsListResponse { data })
|
||||
}
|
||||
|
||||
/// Handle `hooks/list` by resolving hooks for each requested cwd.
|
||||
async fn hooks_list_response(
|
||||
&self,
|
||||
params: HooksListParams,
|
||||
) -> Result<HooksListResponse, JSONRPCErrorError> {
|
||||
let HooksListParams { cwds } = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.to_path_buf()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let mut data = Vec::new();
|
||||
for cwd in cwds {
|
||||
let config = match self
|
||||
.config_manager
|
||||
.load_for_cwd(
|
||||
/*request_overrides*/ None,
|
||||
ConfigOverrides::default(),
|
||||
Some(cwd.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error_path = cwd.clone();
|
||||
data.push(codex_app_server_protocol::HooksListEntry {
|
||||
cwd,
|
||||
hooks: Vec::new(),
|
||||
warnings: Vec::new(),
|
||||
errors: vec![codex_app_server_protocol::HookErrorInfo {
|
||||
path: error_path,
|
||||
message: err.to_string(),
|
||||
}],
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
let plugins_enabled =
|
||||
config.features.enabled(Feature::Plugins) && workspace_codex_plugins_enabled;
|
||||
let plugin_outcome = if plugins_enabled && config.features.enabled(Feature::PluginHooks)
|
||||
{
|
||||
let plugins_input = config.plugins_config_input();
|
||||
plugins_manager
|
||||
.plugins_for_layer_stack(
|
||||
&config.config_layer_stack,
|
||||
&plugins_input,
|
||||
/*plugin_hooks_feature_enabled*/ true,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
PluginLoadOutcome::default()
|
||||
};
|
||||
let hooks = codex_hooks::list_hooks(codex_hooks::HooksConfig {
|
||||
feature_enabled: config.features.enabled(Feature::CodexHooks),
|
||||
config_layer_stack: Some(config.config_layer_stack),
|
||||
plugin_hook_sources: plugin_outcome.effective_plugin_hook_sources(),
|
||||
plugin_hook_load_warnings: plugin_outcome.effective_plugin_hook_warnings(),
|
||||
..Default::default()
|
||||
});
|
||||
data.push(codex_app_server_protocol::HooksListEntry {
|
||||
cwd,
|
||||
hooks: hooks_to_info(&hooks.hooks),
|
||||
warnings: hooks.warnings,
|
||||
errors: Vec::new(),
|
||||
});
|
||||
}
|
||||
Ok(HooksListResponse { data })
|
||||
}
|
||||
|
||||
async fn skills_config_write_response_inner(
|
||||
&self,
|
||||
params: SkillsConfigWriteParams,
|
||||
) -> Result<SkillsConfigWriteResponse, JSONRPCErrorError> {
|
||||
let SkillsConfigWriteParams {
|
||||
path,
|
||||
name,
|
||||
enabled,
|
||||
} = params;
|
||||
let edit = match (path, name) {
|
||||
(Some(path), None) => ConfigEdit::SetSkillConfig {
|
||||
path: path.into_path_buf(),
|
||||
enabled,
|
||||
},
|
||||
(None, Some(name)) if !name.trim().is_empty() => {
|
||||
ConfigEdit::SetSkillConfigByName { name, enabled }
|
||||
}
|
||||
_ => {
|
||||
return Err(invalid_params(
|
||||
"skills/config/write requires exactly one of path or name",
|
||||
));
|
||||
}
|
||||
};
|
||||
let edits = vec![edit];
|
||||
ConfigEditsBuilder::new(&self.config.codex_home)
|
||||
.with_edits(edits)
|
||||
.apply()
|
||||
.await
|
||||
.map(|()| {
|
||||
self.thread_manager.plugins_manager().clear_cache();
|
||||
self.thread_manager.skills_manager().clear_cache();
|
||||
SkillsConfigWriteResponse {
|
||||
effective_enabled: enabled,
|
||||
}
|
||||
})
|
||||
.map_err(|err| internal_error(format!("failed to update skill settings: {err}")))
|
||||
}
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CommandExecRequestProcessor {
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
config: Arc<Config>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
command_exec_manager: CommandExecManager,
|
||||
}
|
||||
|
||||
impl CommandExecRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
config: Arc<Config>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
) -> Self {
|
||||
Self {
|
||||
arg0_paths,
|
||||
config,
|
||||
outgoing,
|
||||
command_exec_manager: CommandExecManager::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn one_off_command_exec(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: CommandExecParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.exec_one_off_command(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn command_exec_write(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecWriteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.command_exec_manager
|
||||
.write(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn command_exec_resize(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecResizeParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.command_exec_manager
|
||||
.resize(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn command_exec_terminate(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecTerminateParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.command_exec_manager
|
||||
.terminate(request_id, params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn connection_closed(&self, connection_id: ConnectionId) {
|
||||
self.command_exec_manager
|
||||
.connection_closed(connection_id)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn exec_one_off_command(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: CommandExecParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
self.exec_one_off_command_inner(request_id.clone(), params)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn exec_one_off_command_inner(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: CommandExecParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
tracing::debug!("ExecOneOffCommand params: {params:?}");
|
||||
|
||||
let request = request_id.clone();
|
||||
|
||||
if params.command.is_empty() {
|
||||
return Err(invalid_request("command must not be empty"));
|
||||
}
|
||||
|
||||
let CommandExecParams {
|
||||
command,
|
||||
process_id,
|
||||
tty,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
disable_output_cap,
|
||||
disable_timeout,
|
||||
timeout_ms,
|
||||
cwd,
|
||||
env: env_overrides,
|
||||
size,
|
||||
sandbox_policy,
|
||||
permission_profile,
|
||||
} = params;
|
||||
if sandbox_policy.is_some() && permission_profile.is_some() {
|
||||
return Err(invalid_request(
|
||||
"`permissionProfile` cannot be combined with `sandboxPolicy`",
|
||||
));
|
||||
}
|
||||
|
||||
if size.is_some() && !tty {
|
||||
return Err(invalid_params("command/exec size requires tty: true"));
|
||||
}
|
||||
|
||||
if disable_output_cap && output_bytes_cap.is_some() {
|
||||
return Err(invalid_params(
|
||||
"command/exec cannot set both outputBytesCap and disableOutputCap",
|
||||
));
|
||||
}
|
||||
|
||||
if disable_timeout && timeout_ms.is_some() {
|
||||
return Err(invalid_params(
|
||||
"command/exec cannot set both timeoutMs and disableTimeout",
|
||||
));
|
||||
}
|
||||
|
||||
let cwd = cwd.map_or_else(|| self.config.cwd.clone(), |cwd| self.config.cwd.join(cwd));
|
||||
let mut env = create_env(
|
||||
&self.config.permissions.shell_environment_policy,
|
||||
/*thread_id*/ None,
|
||||
);
|
||||
if let Some(env_overrides) = env_overrides {
|
||||
for (key, value) in env_overrides {
|
||||
match value {
|
||||
Some(value) => {
|
||||
env.insert(key, value);
|
||||
}
|
||||
None => {
|
||||
env.remove(&key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let timeout_ms = match timeout_ms {
|
||||
Some(timeout_ms) => match u64::try_from(timeout_ms) {
|
||||
Ok(timeout_ms) => Some(timeout_ms),
|
||||
Err(_) => {
|
||||
return Err(invalid_params(format!(
|
||||
"command/exec timeoutMs must be non-negative, got {timeout_ms}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
let managed_network_requirements_enabled =
|
||||
self.config.managed_network_requirements_enabled();
|
||||
let started_network_proxy = match self.config.permissions.network.as_ref() {
|
||||
Some(spec) => match spec
|
||||
.start_proxy(
|
||||
self.config.permissions.permission_profile.get(),
|
||||
/*policy_decider*/ None,
|
||||
/*blocked_request_observer*/ None,
|
||||
managed_network_requirements_enabled,
|
||||
NetworkProxyAuditMetadata::default(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(started) => Some(started),
|
||||
Err(err) => {
|
||||
return Err(internal_error(format!(
|
||||
"failed to start managed network proxy: {err}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
let windows_sandbox_level = WindowsSandboxLevel::from_config(&self.config);
|
||||
let output_bytes_cap = if disable_output_cap {
|
||||
None
|
||||
} else {
|
||||
Some(output_bytes_cap.unwrap_or(DEFAULT_OUTPUT_BYTES_CAP))
|
||||
};
|
||||
let expiration = if disable_timeout {
|
||||
ExecExpiration::Cancellation(CancellationToken::new())
|
||||
} else {
|
||||
match timeout_ms {
|
||||
Some(timeout_ms) => timeout_ms.into(),
|
||||
None => ExecExpiration::DefaultTimeout,
|
||||
}
|
||||
};
|
||||
let capture_policy = if disable_output_cap {
|
||||
ExecCapturePolicy::FullBuffer
|
||||
} else {
|
||||
ExecCapturePolicy::ShellTool
|
||||
};
|
||||
let sandbox_cwd = if permission_profile.is_some() {
|
||||
cwd.clone()
|
||||
} else {
|
||||
self.config.cwd.clone()
|
||||
};
|
||||
let exec_params = ExecParams {
|
||||
command,
|
||||
cwd: cwd.clone(),
|
||||
expiration,
|
||||
capture_policy,
|
||||
env,
|
||||
network: started_network_proxy
|
||||
.as_ref()
|
||||
.map(codex_core::config::StartedNetworkProxy::proxy),
|
||||
sandbox_permissions: SandboxPermissions::UseDefault,
|
||||
windows_sandbox_level,
|
||||
windows_sandbox_private_desktop: self
|
||||
.config
|
||||
.permissions
|
||||
.windows_sandbox_private_desktop,
|
||||
justification: None,
|
||||
arg0: None,
|
||||
};
|
||||
|
||||
let effective_permission_profile = if let Some(permission_profile) = permission_profile {
|
||||
let permission_profile =
|
||||
codex_protocol::models::PermissionProfile::from(permission_profile);
|
||||
let (mut file_system_sandbox_policy, network_sandbox_policy) =
|
||||
permission_profile.to_runtime_permissions();
|
||||
let configured_file_system_sandbox_policy =
|
||||
self.config.permissions.file_system_sandbox_policy();
|
||||
Self::preserve_configured_deny_read_restrictions(
|
||||
&mut file_system_sandbox_policy,
|
||||
&configured_file_system_sandbox_policy,
|
||||
);
|
||||
let effective_permission_profile =
|
||||
codex_protocol::models::PermissionProfile::from_runtime_permissions_with_enforcement(
|
||||
permission_profile.enforcement(),
|
||||
&file_system_sandbox_policy,
|
||||
network_sandbox_policy,
|
||||
);
|
||||
self.config
|
||||
.permissions
|
||||
.permission_profile
|
||||
.can_set(&effective_permission_profile)
|
||||
.map_err(|err| invalid_request(format!("invalid permission profile: {err}")))?;
|
||||
effective_permission_profile
|
||||
} else if let Some(policy) = sandbox_policy.map(|policy| policy.to_core()) {
|
||||
self.config
|
||||
.permissions
|
||||
.can_set_legacy_sandbox_policy(&policy, &sandbox_cwd)
|
||||
.map_err(|err| invalid_request(format!("invalid sandbox policy: {err}")))?;
|
||||
let file_system_sandbox_policy =
|
||||
codex_protocol::permissions::FileSystemSandboxPolicy::from_legacy_sandbox_policy_for_cwd(&policy, &sandbox_cwd);
|
||||
let network_sandbox_policy =
|
||||
codex_protocol::permissions::NetworkSandboxPolicy::from(&policy);
|
||||
let permission_profile =
|
||||
codex_protocol::models::PermissionProfile::from_runtime_permissions_with_enforcement(
|
||||
codex_protocol::models::SandboxEnforcement::from_legacy_sandbox_policy(&policy),
|
||||
&file_system_sandbox_policy,
|
||||
network_sandbox_policy,
|
||||
);
|
||||
self.config
|
||||
.permissions
|
||||
.permission_profile
|
||||
.can_set(&permission_profile)
|
||||
.map_err(|err| invalid_request(format!("invalid sandbox policy: {err}")))?;
|
||||
permission_profile
|
||||
} else {
|
||||
self.config.permissions.permission_profile()
|
||||
};
|
||||
|
||||
let codex_linux_sandbox_exe = self.arg0_paths.codex_linux_sandbox_exe.clone();
|
||||
let outgoing = self.outgoing.clone();
|
||||
let request_for_task = request.clone();
|
||||
let started_network_proxy_for_task = started_network_proxy;
|
||||
let use_legacy_landlock = self.config.features.use_legacy_landlock();
|
||||
let size = match size.map(crate::command_exec::terminal_size_from_protocol) {
|
||||
Some(Ok(size)) => Some(size),
|
||||
Some(Err(error)) => return Err(error),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let exec_request = codex_core::exec::build_exec_request(
|
||||
exec_params,
|
||||
&effective_permission_profile,
|
||||
&sandbox_cwd,
|
||||
&codex_linux_sandbox_exe,
|
||||
use_legacy_landlock,
|
||||
)
|
||||
.map_err(|err| internal_error(format!("exec failed: {err}")))?;
|
||||
self.command_exec_manager
|
||||
.start(StartCommandExecParams {
|
||||
outgoing,
|
||||
request_id: request_for_task,
|
||||
process_id,
|
||||
exec_request,
|
||||
started_network_proxy: started_network_proxy_for_task,
|
||||
tty,
|
||||
stream_stdin,
|
||||
stream_stdout_stderr,
|
||||
output_bytes_cap,
|
||||
size,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn preserve_configured_deny_read_restrictions(
|
||||
file_system_sandbox_policy: &mut FileSystemSandboxPolicy,
|
||||
configured_file_system_sandbox_policy: &FileSystemSandboxPolicy,
|
||||
) {
|
||||
file_system_sandbox_policy
|
||||
.preserve_deny_read_restrictions_from(configured_file_system_sandbox_policy);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "command_exec_processor_tests.rs"]
|
||||
mod command_exec_processor_tests;
|
||||
@@ -1,38 +0,0 @@
|
||||
use super::*;
|
||||
use codex_protocol::permissions::FileSystemAccessMode;
|
||||
use codex_protocol::permissions::FileSystemPath;
|
||||
use codex_protocol::permissions::FileSystemSandboxEntry;
|
||||
use codex_protocol::permissions::FileSystemSandboxPolicy;
|
||||
use codex_utils_absolute_path::test_support::PathBufExt;
|
||||
use codex_utils_absolute_path::test_support::test_path_buf;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn command_profile_preserves_configured_deny_read_restrictions() {
|
||||
let readable_entry = FileSystemSandboxEntry {
|
||||
path: FileSystemPath::Path {
|
||||
path: test_path_buf("/tmp/project").abs(),
|
||||
},
|
||||
access: FileSystemAccessMode::Read,
|
||||
};
|
||||
let deny_entry = FileSystemSandboxEntry {
|
||||
path: FileSystemPath::GlobPattern {
|
||||
pattern: "/tmp/project/**/*.env".to_string(),
|
||||
},
|
||||
access: FileSystemAccessMode::None,
|
||||
};
|
||||
let mut file_system_sandbox_policy =
|
||||
FileSystemSandboxPolicy::restricted(vec![readable_entry.clone()]);
|
||||
let mut configured_file_system_sandbox_policy =
|
||||
FileSystemSandboxPolicy::restricted(vec![deny_entry.clone()]);
|
||||
configured_file_system_sandbox_policy.glob_scan_max_depth = Some(2);
|
||||
|
||||
CommandExecRequestProcessor::preserve_configured_deny_read_restrictions(
|
||||
&mut file_system_sandbox_policy,
|
||||
&configured_file_system_sandbox_policy,
|
||||
);
|
||||
|
||||
let mut expected = FileSystemSandboxPolicy::restricted(vec![readable_entry, deny_entry]);
|
||||
expected.glob_scan_max_depth = Some(2);
|
||||
assert_eq!(file_system_sandbox_policy, expected);
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
fn cloud_requirements_load_error(err: &std::io::Error) -> Option<&CloudRequirementsLoadError> {
|
||||
let mut current: Option<&(dyn std::error::Error + 'static)> = err
|
||||
.get_ref()
|
||||
.map(|source| source as &(dyn std::error::Error + 'static));
|
||||
while let Some(source) = current {
|
||||
if let Some(cloud_error) = source.downcast_ref::<CloudRequirementsLoadError>() {
|
||||
return Some(cloud_error);
|
||||
}
|
||||
current = source.source();
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub(super) fn config_load_error(err: &std::io::Error) -> JSONRPCErrorError {
|
||||
let data = cloud_requirements_load_error(err).map(|cloud_error| {
|
||||
let mut data = serde_json::json!({
|
||||
"reason": "cloudRequirements",
|
||||
"errorCode": format!("{:?}", cloud_error.code()),
|
||||
"detail": cloud_error.to_string(),
|
||||
});
|
||||
if let Some(status_code) = cloud_error.status_code() {
|
||||
data["statusCode"] = serde_json::json!(status_code);
|
||||
}
|
||||
if cloud_error.code() == CloudRequirementsLoadErrorCode::Auth {
|
||||
data["action"] = serde_json::json!("relogin");
|
||||
}
|
||||
data
|
||||
});
|
||||
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("failed to load configuration: {err}"),
|
||||
data,
|
||||
}
|
||||
}
|
||||
@@ -1,622 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::config_manager_service::ConfigManagerError;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use crate::transport::RemoteControlHandle;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigReadResponse;
|
||||
use codex_app_server_protocol::ConfigRequirements;
|
||||
use codex_app_server_protocol::ConfigRequirementsReadResponse;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::ConfiguredHookHandler;
|
||||
use codex_app_server_protocol::ConfiguredHookMatcherGroup;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ManagedHooksRequirements;
|
||||
use codex_app_server_protocol::ModelProviderCapabilitiesReadResponse;
|
||||
use codex_app_server_protocol::NetworkDomainPermission;
|
||||
use codex_app_server_protocol::NetworkRequirements;
|
||||
use codex_app_server_protocol::NetworkUnixSocketPermission;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_config::ConfigRequirementsToml;
|
||||
use codex_config::HookEventsToml;
|
||||
use codex_config::HookHandlerConfig as CoreHookHandlerConfig;
|
||||
use codex_config::ManagedHooksRequirementsToml;
|
||||
use codex_config::MatcherGroup as CoreMatcherGroup;
|
||||
use codex_config::ResidencyRequirement as CoreResidencyRequirement;
|
||||
use codex_config::SandboxModeRequirement as CoreSandboxModeRequirement;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_features::Feature;
|
||||
use codex_features::canonical_feature_for_key;
|
||||
use codex_features::feature_for_key;
|
||||
use codex_login::AuthManager;
|
||||
use codex_model_provider::create_model_provider;
|
||||
use codex_plugin::PluginId;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::protocol::Op;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
|
||||
const SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT: &[&str] = &[
|
||||
"apps",
|
||||
"memories",
|
||||
"plugins",
|
||||
"remote_control",
|
||||
"tool_search",
|
||||
"tool_suggest",
|
||||
"tool_call_mcp_elicitation",
|
||||
];
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
remote_control_handle: Option<RemoteControlHandle>,
|
||||
}
|
||||
|
||||
impl ConfigRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
remote_control_handle: Option<RemoteControlHandle>,
|
||||
) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
config_manager,
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
analytics_events_client,
|
||||
remote_control_handle,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn read(
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
|
||||
let fallback_cwd = params.cwd.as_ref().map(PathBuf::from);
|
||||
let mut response = self.config_manager.read(params).await.map_err(map_error)?;
|
||||
let config = self.load_latest_config(fallback_cwd).await?;
|
||||
for feature_key in SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT {
|
||||
let Some(feature) = feature_for_key(feature_key) else {
|
||||
continue;
|
||||
};
|
||||
let features = response
|
||||
.config
|
||||
.additional
|
||||
.entry("features".to_string())
|
||||
.or_insert_with(|| json!({}));
|
||||
if !features.is_object() {
|
||||
*features = json!({});
|
||||
}
|
||||
if let Some(features) = features.as_object_mut() {
|
||||
features.insert(
|
||||
(*feature_key).to_string(),
|
||||
json!(config.features.enabled(feature)),
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn config_requirements_read(
|
||||
&self,
|
||||
) -> Result<ConfigRequirementsReadResponse, JSONRPCErrorError> {
|
||||
let requirements = self
|
||||
.config_manager
|
||||
.read_requirements()
|
||||
.await
|
||||
.map_err(map_error)?
|
||||
.map(map_requirements_toml_to_api);
|
||||
|
||||
Ok(ConfigRequirementsReadResponse { requirements })
|
||||
}
|
||||
|
||||
pub(crate) async fn value_write(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ClientResponsePayload, JSONRPCErrorError> {
|
||||
self.handle_config_mutation_result(self.write_value(params).await)
|
||||
.await
|
||||
.map(ClientResponsePayload::ConfigValueWrite)
|
||||
}
|
||||
|
||||
pub(crate) async fn batch_write(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ClientResponsePayload, JSONRPCErrorError> {
|
||||
self.handle_config_mutation_result(self.batch_write_inner(params).await)
|
||||
.await
|
||||
.map(ClientResponsePayload::ConfigBatchWrite)
|
||||
}
|
||||
|
||||
pub(crate) async fn experimental_feature_enablement_set(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ExperimentalFeatureEnablementSetParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
let should_refresh_apps_list = params.enablement.get("apps").copied() == Some(true);
|
||||
let response = self
|
||||
.handle_config_mutation_result(self.set_experimental_feature_enablement(params).await)
|
||||
.await?;
|
||||
self.outgoing
|
||||
.send_response_as(
|
||||
request_id,
|
||||
ClientResponsePayload::ExperimentalFeatureEnablementSet(response),
|
||||
)
|
||||
.await;
|
||||
if should_refresh_apps_list {
|
||||
self.refresh_apps_list_after_experimental_feature_enablement_set()
|
||||
.await;
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub(crate) async fn model_provider_capabilities_read(
|
||||
&self,
|
||||
) -> Result<ModelProviderCapabilitiesReadResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let provider = create_model_provider(config.model_provider, /*auth_manager*/ None);
|
||||
let capabilities = provider.capabilities();
|
||||
Ok(ModelProviderCapabilitiesReadResponse {
|
||||
namespace_tools: capabilities.namespace_tools,
|
||||
image_generation: capabilities.image_generation,
|
||||
web_search: capabilities.web_search,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_config_mutation(&self) {
|
||||
self.thread_manager.plugins_manager().clear_cache();
|
||||
self.thread_manager.skills_manager().clear_cache();
|
||||
let Some(remote_control_handle) = &self.remote_control_handle else {
|
||||
return;
|
||||
};
|
||||
|
||||
match self.load_latest_config(/*fallback_cwd*/ None).await {
|
||||
Ok(config) => {
|
||||
remote_control_handle.set_enabled(config.features.enabled(Feature::RemoteControl));
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
"failed to load config for remote control enablement refresh after config mutation: {}",
|
||||
error.message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_config_mutation_result<T>(
|
||||
&self,
|
||||
result: std::result::Result<T, JSONRPCErrorError>,
|
||||
) -> Result<T, JSONRPCErrorError> {
|
||||
let response = result?;
|
||||
self.handle_config_mutation().await;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn refresh_apps_list_after_experimental_feature_enablement_set(&self) {
|
||||
let config = match self.load_latest_config(/*fallback_cwd*/ None).await {
|
||||
Ok(config) => config,
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
"failed to load config for apps list refresh after experimental feature enablement: {}",
|
||||
error.message
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let auth = self.auth_manager.auth().await;
|
||||
if !config.features.apps_enabled_for_auth(
|
||||
auth.as_ref()
|
||||
.is_some_and(codex_login::CodexAuth::uses_codex_backend),
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
tokio::spawn(async move {
|
||||
let (all_connectors_result, accessible_connectors_result) = tokio::join!(
|
||||
connectors::list_all_connectors_with_options(&config, /*force_refetch*/ true),
|
||||
connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
&config,
|
||||
/*force_refetch*/ true,
|
||||
&environment_manager,
|
||||
),
|
||||
);
|
||||
let all_connectors = match all_connectors_result {
|
||||
Ok(connectors) => connectors,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
"failed to force-refresh directory apps after experimental feature enablement: {err:#}"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let accessible_connectors = match accessible_connectors_result {
|
||||
Ok(status) => status.connectors,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
"failed to force-refresh accessible apps after experimental feature enablement: {err:#}"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let data = connectors::with_app_enabled_state(
|
||||
connectors::merge_connectors_with_accessible(
|
||||
all_connectors,
|
||||
accessible_connectors,
|
||||
/*all_connectors_loaded*/ true,
|
||||
),
|
||||
&config,
|
||||
);
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<codex_core::config::Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!(
|
||||
"failed to resolve feature override precedence: {err}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let pending_changes = codex_core_plugins::toggles::collect_plugin_enabled_candidates(
|
||||
[(¶ms.key_path, ¶ms.value)].into_iter(),
|
||||
);
|
||||
let response = self
|
||||
.config_manager
|
||||
.write_value(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn batch_write_inner(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let reload_user_config = params.reload_user_config;
|
||||
let pending_changes = codex_core_plugins::toggles::collect_plugin_enabled_candidates(
|
||||
params
|
||||
.edits
|
||||
.iter()
|
||||
.map(|edit| (&edit.key_path, &edit.value)),
|
||||
);
|
||||
let response = self
|
||||
.config_manager
|
||||
.batch_write(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
if reload_user_config {
|
||||
self.reload_user_config().await;
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn set_experimental_feature_enablement(
|
||||
&self,
|
||||
params: ExperimentalFeatureEnablementSetParams,
|
||||
) -> Result<ExperimentalFeatureEnablementSetResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureEnablementSetParams { enablement } = params;
|
||||
for key in enablement.keys() {
|
||||
if canonical_feature_for_key(key).is_some() {
|
||||
if SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.contains(&key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Err(invalid_request(format!(
|
||||
"unsupported feature enablement `{key}`: currently supported features are {}",
|
||||
SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.join(", ")
|
||||
)));
|
||||
}
|
||||
|
||||
let message = if let Some(feature) = feature_for_key(key) {
|
||||
format!(
|
||||
"invalid feature enablement `{key}`: use canonical feature key `{}`",
|
||||
feature.key()
|
||||
)
|
||||
} else {
|
||||
format!("invalid feature enablement `{key}`")
|
||||
};
|
||||
return Err(invalid_request(message));
|
||||
}
|
||||
|
||||
if enablement.is_empty() {
|
||||
return Ok(ExperimentalFeatureEnablementSetResponse { enablement });
|
||||
}
|
||||
|
||||
self.config_manager
|
||||
.extend_runtime_feature_enablement(
|
||||
enablement
|
||||
.iter()
|
||||
.map(|(name, enabled)| (name.clone(), *enabled)),
|
||||
)
|
||||
.map_err(|_| internal_error("failed to update feature enablement"))?;
|
||||
|
||||
self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
self.reload_user_config().await;
|
||||
|
||||
Ok(ExperimentalFeatureEnablementSetResponse { enablement })
|
||||
}
|
||||
|
||||
async fn reload_user_config(&self) {
|
||||
let thread_ids = self.thread_manager.list_thread_ids().await;
|
||||
for thread_id in thread_ids {
|
||||
let Ok(thread) = self.thread_manager.get_thread(thread_id).await else {
|
||||
continue;
|
||||
};
|
||||
if let Err(err) = thread.submit(Op::ReloadUserConfig).await {
|
||||
tracing::warn!("failed to request user config reload: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn emit_plugin_toggle_events(
|
||||
&self,
|
||||
pending_changes: std::collections::BTreeMap<String, bool>,
|
||||
) {
|
||||
for (plugin_id, enabled) in pending_changes {
|
||||
let Ok(plugin_id) = PluginId::parse(&plugin_id) else {
|
||||
continue;
|
||||
};
|
||||
let metadata = codex_core_plugins::loader::installed_plugin_telemetry_metadata(
|
||||
self.config_manager.codex_home(),
|
||||
&plugin_id,
|
||||
)
|
||||
.await;
|
||||
if enabled {
|
||||
self.analytics_events_client.track_plugin_enabled(metadata);
|
||||
} else {
|
||||
self.analytics_events_client.track_plugin_disabled(metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigRequirements {
|
||||
ConfigRequirements {
|
||||
allowed_approval_policies: requirements.allowed_approval_policies.map(|policies| {
|
||||
policies
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::AskForApproval::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_approvals_reviewers: requirements.allowed_approvals_reviewers.map(|reviewers| {
|
||||
reviewers
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::ApprovalsReviewer::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_sandbox_modes: requirements.allowed_sandbox_modes.map(|modes| {
|
||||
modes
|
||||
.into_iter()
|
||||
.filter_map(map_sandbox_mode_requirement_to_api)
|
||||
.collect()
|
||||
}),
|
||||
allowed_web_search_modes: requirements.allowed_web_search_modes.map(|modes| {
|
||||
let mut normalized = modes
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<WebSearchMode>>();
|
||||
if !normalized.contains(&WebSearchMode::Disabled) {
|
||||
normalized.push(WebSearchMode::Disabled);
|
||||
}
|
||||
normalized
|
||||
}),
|
||||
feature_requirements: requirements
|
||||
.feature_requirements
|
||||
.map(|requirements| requirements.entries),
|
||||
hooks: requirements.hooks.map(map_hooks_requirements_to_api),
|
||||
enforce_residency: requirements
|
||||
.enforce_residency
|
||||
.map(map_residency_requirement_to_api),
|
||||
network: requirements.network.map(map_network_requirements_to_api),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hooks_requirements_to_api(hooks: ManagedHooksRequirementsToml) -> ManagedHooksRequirements {
|
||||
let ManagedHooksRequirementsToml {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
hooks,
|
||||
} = hooks;
|
||||
let HookEventsToml {
|
||||
pre_tool_use,
|
||||
permission_request,
|
||||
post_tool_use,
|
||||
session_start,
|
||||
user_prompt_submit,
|
||||
stop,
|
||||
} = hooks;
|
||||
|
||||
ManagedHooksRequirements {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
pre_tool_use: map_hook_matcher_groups_to_api(pre_tool_use),
|
||||
permission_request: map_hook_matcher_groups_to_api(permission_request),
|
||||
post_tool_use: map_hook_matcher_groups_to_api(post_tool_use),
|
||||
session_start: map_hook_matcher_groups_to_api(session_start),
|
||||
user_prompt_submit: map_hook_matcher_groups_to_api(user_prompt_submit),
|
||||
stop: map_hook_matcher_groups_to_api(stop),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_matcher_groups_to_api(
|
||||
groups: Vec<CoreMatcherGroup>,
|
||||
) -> Vec<ConfiguredHookMatcherGroup> {
|
||||
groups
|
||||
.into_iter()
|
||||
.map(map_hook_matcher_group_to_api)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn map_hook_matcher_group_to_api(group: CoreMatcherGroup) -> ConfiguredHookMatcherGroup {
|
||||
ConfiguredHookMatcherGroup {
|
||||
matcher: group.matcher,
|
||||
hooks: group
|
||||
.hooks
|
||||
.into_iter()
|
||||
.map(map_hook_handler_to_api)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_handler_to_api(handler: CoreHookHandlerConfig) -> ConfiguredHookHandler {
|
||||
match handler {
|
||||
CoreHookHandlerConfig::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
} => ConfiguredHookHandler::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
},
|
||||
CoreHookHandlerConfig::Prompt {} => ConfiguredHookHandler::Prompt {},
|
||||
CoreHookHandlerConfig::Agent {} => ConfiguredHookHandler::Agent {},
|
||||
}
|
||||
}
|
||||
|
||||
fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Option<SandboxMode> {
|
||||
match mode {
|
||||
CoreSandboxModeRequirement::ReadOnly => Some(SandboxMode::ReadOnly),
|
||||
CoreSandboxModeRequirement::WorkspaceWrite => Some(SandboxMode::WorkspaceWrite),
|
||||
CoreSandboxModeRequirement::DangerFullAccess => Some(SandboxMode::DangerFullAccess),
|
||||
CoreSandboxModeRequirement::ExternalSandbox => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_residency_requirement_to_api(
|
||||
residency: CoreResidencyRequirement,
|
||||
) -> codex_app_server_protocol::ResidencyRequirement {
|
||||
match residency {
|
||||
CoreResidencyRequirement::Us => codex_app_server_protocol::ResidencyRequirement::Us,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_requirements_to_api(
|
||||
network: codex_config::NetworkRequirementsToml,
|
||||
) -> NetworkRequirements {
|
||||
let allowed_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains);
|
||||
let denied_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::denied_domains);
|
||||
let allow_unix_sockets = network
|
||||
.unix_sockets
|
||||
.as_ref()
|
||||
.map(codex_config::NetworkUnixSocketPermissionsToml::allow_unix_sockets)
|
||||
.filter(|entries| !entries.is_empty());
|
||||
|
||||
NetworkRequirements {
|
||||
enabled: network.enabled,
|
||||
http_port: network.http_port,
|
||||
socks_port: network.socks_port,
|
||||
allow_upstream_proxy: network.allow_upstream_proxy,
|
||||
dangerously_allow_non_loopback_proxy: network.dangerously_allow_non_loopback_proxy,
|
||||
dangerously_allow_all_unix_sockets: network.dangerously_allow_all_unix_sockets,
|
||||
domains: network.domains.map(|domains| {
|
||||
domains
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(pattern, permission)| {
|
||||
(pattern, map_network_domain_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
managed_allowed_domains_only: network.managed_allowed_domains_only,
|
||||
allowed_domains,
|
||||
denied_domains,
|
||||
unix_sockets: network.unix_sockets.map(|unix_sockets| {
|
||||
unix_sockets
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(path, permission)| {
|
||||
(path, map_network_unix_socket_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
allow_unix_sockets,
|
||||
allow_local_binding: network.allow_local_binding,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_domain_permission_to_api(
|
||||
permission: codex_config::NetworkDomainPermissionToml,
|
||||
) -> NetworkDomainPermission {
|
||||
match permission {
|
||||
codex_config::NetworkDomainPermissionToml::Allow => NetworkDomainPermission::Allow,
|
||||
codex_config::NetworkDomainPermissionToml::Deny => NetworkDomainPermission::Deny,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_unix_socket_permission_to_api(
|
||||
permission: codex_config::NetworkUnixSocketPermissionToml,
|
||||
) -> NetworkUnixSocketPermission {
|
||||
match permission {
|
||||
codex_config::NetworkUnixSocketPermissionToml::Allow => NetworkUnixSocketPermission::Allow,
|
||||
codex_config::NetworkUnixSocketPermissionToml::None => NetworkUnixSocketPermission::None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigManagerError) -> JSONRPCErrorError {
|
||||
if let Some(code) = err.write_error_code() {
|
||||
return config_write_error(code, err.to_string());
|
||||
}
|
||||
|
||||
internal_error(err.to_string())
|
||||
}
|
||||
|
||||
fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: Some(json!({
|
||||
"config_write_error_code": code,
|
||||
})),
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
fn migration_item(
|
||||
item_type: ExternalAgentConfigMigrationItemType,
|
||||
) -> ExternalAgentConfigMigrationItem {
|
||||
ExternalAgentConfigMigrationItem {
|
||||
item_type,
|
||||
description: String::new(),
|
||||
cwd: None,
|
||||
details: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn migration_items_that_update_runtime_sources_trigger_refresh() {
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Config,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Skills,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::McpServerConfig,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Hooks,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Commands,
|
||||
)]));
|
||||
assert!(migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Plugins,
|
||||
)]));
|
||||
assert!(!migration_items_need_runtime_refresh(&[migration_item(
|
||||
ExternalAgentConfigMigrationItemType::Sessions,
|
||||
)]));
|
||||
}
|
||||
@@ -1,242 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FeedbackRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: Arc<Config>,
|
||||
feedback: CodexFeedback,
|
||||
log_db: Option<LogDbLayer>,
|
||||
}
|
||||
|
||||
impl FeedbackRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: Arc<Config>,
|
||||
feedback: CodexFeedback,
|
||||
log_db: Option<LogDbLayer>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
config,
|
||||
feedback,
|
||||
log_db,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn feedback_upload(
|
||||
&self,
|
||||
params: FeedbackUploadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.upload_feedback_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn upload_feedback_response(
|
||||
&self,
|
||||
params: FeedbackUploadParams,
|
||||
) -> Result<FeedbackUploadResponse, JSONRPCErrorError> {
|
||||
if !self.config.feedback_enabled {
|
||||
return Err(invalid_request(
|
||||
"sending feedback is disabled by configuration",
|
||||
));
|
||||
}
|
||||
|
||||
let FeedbackUploadParams {
|
||||
classification,
|
||||
reason,
|
||||
thread_id,
|
||||
include_logs,
|
||||
extra_log_files,
|
||||
tags,
|
||||
} = params;
|
||||
|
||||
let conversation_id = match thread_id.as_deref() {
|
||||
Some(thread_id) => match ThreadId::from_string(thread_id) {
|
||||
Ok(conversation_id) => Some(conversation_id),
|
||||
Err(err) => return Err(invalid_request(format!("invalid thread id: {err}"))),
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
|
||||
if let Some(chatgpt_user_id) = self
|
||||
.auth_manager
|
||||
.auth_cached()
|
||||
.and_then(|auth| auth.get_chatgpt_user_id())
|
||||
{
|
||||
tracing::info!(target: "feedback_tags", chatgpt_user_id);
|
||||
}
|
||||
let snapshot = self.feedback.snapshot(conversation_id);
|
||||
let thread_id = snapshot.thread_id.clone();
|
||||
let (feedback_thread_ids, sqlite_feedback_logs, state_db_ctx) = if include_logs {
|
||||
if let Some(log_db) = self.log_db.as_ref() {
|
||||
log_db.flush().await;
|
||||
}
|
||||
let state_db_ctx = get_state_db(&self.config).await;
|
||||
let feedback_thread_ids = match conversation_id {
|
||||
Some(conversation_id) => match self
|
||||
.thread_manager
|
||||
.list_agent_subtree_thread_ids(conversation_id)
|
||||
.await
|
||||
{
|
||||
Ok(thread_ids) => thread_ids,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to list feedback subtree for thread_id={conversation_id}: {err}"
|
||||
);
|
||||
let mut thread_ids = vec![conversation_id];
|
||||
if let Some(state_db_ctx) = state_db_ctx.as_ref() {
|
||||
for status in [
|
||||
codex_state::DirectionalThreadSpawnEdgeStatus::Open,
|
||||
codex_state::DirectionalThreadSpawnEdgeStatus::Closed,
|
||||
] {
|
||||
match state_db_ctx
|
||||
.list_thread_spawn_descendants_with_status(
|
||||
conversation_id,
|
||||
status,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(descendant_ids) => thread_ids.extend(descendant_ids),
|
||||
Err(err) => warn!(
|
||||
"failed to list persisted feedback subtree for thread_id={conversation_id}: {err}"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
thread_ids
|
||||
}
|
||||
},
|
||||
None => Vec::new(),
|
||||
};
|
||||
let sqlite_feedback_logs = if let Some(state_db_ctx) = state_db_ctx.as_ref()
|
||||
&& !feedback_thread_ids.is_empty()
|
||||
{
|
||||
let thread_id_texts = feedback_thread_ids
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
let thread_id_refs = thread_id_texts
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>();
|
||||
match state_db_ctx
|
||||
.query_feedback_logs_for_threads(&thread_id_refs)
|
||||
.await
|
||||
{
|
||||
Ok(logs) if logs.is_empty() => None,
|
||||
Ok(logs) => Some(logs),
|
||||
Err(err) => {
|
||||
let thread_ids = thread_id_texts.join(", ");
|
||||
warn!(
|
||||
"failed to query feedback logs from sqlite for thread_ids=[{thread_ids}]: {err}"
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(feedback_thread_ids, sqlite_feedback_logs, state_db_ctx)
|
||||
} else {
|
||||
(Vec::new(), None, None)
|
||||
};
|
||||
|
||||
let mut attachment_paths = Vec::new();
|
||||
let mut seen_attachment_paths = HashSet::new();
|
||||
if include_logs {
|
||||
for feedback_thread_id in &feedback_thread_ids {
|
||||
let Some(rollout_path) = self
|
||||
.resolve_rollout_path(*feedback_thread_id, state_db_ctx.as_ref())
|
||||
.await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
if seen_attachment_paths.insert(rollout_path.clone()) {
|
||||
attachment_paths.push(FeedbackAttachmentPath {
|
||||
path: rollout_path,
|
||||
attachment_filename_override: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
if let Some(conversation_id) = conversation_id
|
||||
&& let Ok(conversation) = self.thread_manager.get_thread(conversation_id).await
|
||||
&& let Some(guardian_rollout_path) =
|
||||
conversation.guardian_trunk_rollout_path().await
|
||||
&& seen_attachment_paths.insert(guardian_rollout_path.clone())
|
||||
{
|
||||
attachment_paths.push(FeedbackAttachmentPath {
|
||||
path: guardian_rollout_path,
|
||||
attachment_filename_override: Some(auto_review_rollout_filename(
|
||||
conversation_id,
|
||||
)),
|
||||
});
|
||||
}
|
||||
}
|
||||
if let Some(extra_log_files) = extra_log_files {
|
||||
for extra_log_file in extra_log_files {
|
||||
if seen_attachment_paths.insert(extra_log_file.clone()) {
|
||||
attachment_paths.push(FeedbackAttachmentPath {
|
||||
path: extra_log_file,
|
||||
attachment_filename_override: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let session_source = self.thread_manager.session_source();
|
||||
|
||||
let upload_result = tokio::task::spawn_blocking(move || {
|
||||
snapshot.upload_feedback(FeedbackUploadOptions {
|
||||
classification: &classification,
|
||||
reason: reason.as_deref(),
|
||||
tags: tags.as_ref(),
|
||||
include_logs,
|
||||
extra_attachment_paths: &attachment_paths,
|
||||
session_source: Some(session_source),
|
||||
logs_override: sqlite_feedback_logs,
|
||||
})
|
||||
})
|
||||
.await;
|
||||
|
||||
let upload_result = match upload_result {
|
||||
Ok(result) => result,
|
||||
Err(join_err) => {
|
||||
return Err(internal_error(format!(
|
||||
"failed to upload feedback: {join_err}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
upload_result.map_err(|err| internal_error(format!("failed to upload feedback: {err}")))?;
|
||||
Ok(FeedbackUploadResponse { thread_id })
|
||||
}
|
||||
|
||||
async fn resolve_rollout_path(
|
||||
&self,
|
||||
conversation_id: ThreadId,
|
||||
state_db_ctx: Option<&StateDbHandle>,
|
||||
) -> Option<PathBuf> {
|
||||
if let Ok(conversation) = self.thread_manager.get_thread(conversation_id).await
|
||||
&& let Some(rollout_path) = conversation.rollout_path()
|
||||
{
|
||||
return Some(rollout_path);
|
||||
}
|
||||
|
||||
let state_db_ctx = state_db_ctx?;
|
||||
state_db_ctx
|
||||
.find_rollout_path_by_id(conversation_id, /*archived_only*/ None)
|
||||
.await
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("failed to resolve rollout path for thread_id={conversation_id}: {err}");
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn auto_review_rollout_filename(thread_id: ThreadId) -> String {
|
||||
format!("auto-review-rollout-{thread_id}.jsonl")
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct GitRequestProcessor;
|
||||
|
||||
impl GitRequestProcessor {
|
||||
pub(crate) fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
|
||||
pub(crate) async fn git_diff_to_remote(
|
||||
&self,
|
||||
params: GitDiffToRemoteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.git_diff_to_origin(params.cwd)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn git_diff_to_origin(
|
||||
&self,
|
||||
cwd: PathBuf,
|
||||
) -> Result<GitDiffToRemoteResponse, JSONRPCErrorError> {
|
||||
git_diff_to_remote(&cwd)
|
||||
.await
|
||||
.map(|value| GitDiffToRemoteResponse {
|
||||
sha: value.sha,
|
||||
diff: value.diff,
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
invalid_request(format!(
|
||||
"failed to compute git diff to remote for cwd: {cwd:?}"
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use axum::http::HeaderValue;
|
||||
use codex_analytics::AppServerRpcTransport;
|
||||
use codex_login::default_client::SetOriginatorError;
|
||||
use codex_login::default_client::USER_AGENT_SUFFIX;
|
||||
use codex_login::default_client::get_codex_user_agent;
|
||||
use codex_login::default_client::set_default_client_residency_requirement;
|
||||
use codex_login::default_client::set_default_originator;
|
||||
|
||||
use super::*;
|
||||
use crate::message_processor::ConnectionSessionState;
|
||||
use crate::message_processor::InitializedConnectionSessionState;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct InitializeRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config: Arc<Config>,
|
||||
config_warnings: Arc<Vec<ConfigWarningNotification>>,
|
||||
rpc_transport: AppServerRpcTransport,
|
||||
}
|
||||
|
||||
impl InitializeRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
config: Arc<Config>,
|
||||
config_warnings: Vec<ConfigWarningNotification>,
|
||||
rpc_transport: AppServerRpcTransport,
|
||||
) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
analytics_events_client,
|
||||
config,
|
||||
config_warnings: Arc::new(config_warnings),
|
||||
rpc_transport,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn initialize(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
request_id: RequestId,
|
||||
params: InitializeParams,
|
||||
session: &ConnectionSessionState,
|
||||
// `Some(...)` means the caller wants initialize to immediately mark the
|
||||
// connection outbound-ready. Websocket JSON-RPC calls pass `None` so
|
||||
// lib.rs can deliver connection-scoped initialize notifications first.
|
||||
outbound_initialized: Option<&AtomicBool>,
|
||||
) -> Result<bool, JSONRPCErrorError> {
|
||||
let connection_request_id = ConnectionRequestId {
|
||||
connection_id,
|
||||
request_id,
|
||||
};
|
||||
if session.initialized() {
|
||||
return Err(invalid_request("Already initialized"));
|
||||
}
|
||||
|
||||
// TODO(maxj): Revisit capability scoping for `experimental_api_enabled`.
|
||||
// Current behavior is per-connection. Reviewer feedback notes this can
|
||||
// create odd cross-client behavior (for example dynamic tool calls on a
|
||||
// shared thread when another connected client did not opt into
|
||||
// experimental API). Proposed direction is instance-global first-write-wins
|
||||
// with initialize-time mismatch rejection.
|
||||
let analytics_initialize_params = params.clone();
|
||||
let (experimental_api_enabled, opt_out_notification_methods) = match params.capabilities {
|
||||
Some(capabilities) => (
|
||||
capabilities.experimental_api,
|
||||
capabilities
|
||||
.opt_out_notification_methods
|
||||
.unwrap_or_default(),
|
||||
),
|
||||
None => (false, Vec::new()),
|
||||
};
|
||||
let ClientInfo {
|
||||
name,
|
||||
title: _title,
|
||||
version,
|
||||
} = params.client_info;
|
||||
// Validate before committing; set_default_originator validates while
|
||||
// mutating process-global metadata.
|
||||
if HeaderValue::from_str(&name).is_err() {
|
||||
return Err(invalid_request(format!(
|
||||
"Invalid clientInfo.name: '{name}'. Must be a valid HTTP header value."
|
||||
)));
|
||||
}
|
||||
let originator = name.clone();
|
||||
let user_agent_suffix = format!("{name}; {version}");
|
||||
let codex_home = self.config.codex_home.clone();
|
||||
if session
|
||||
.initialize(InitializedConnectionSessionState {
|
||||
experimental_api_enabled,
|
||||
opted_out_notification_methods: opt_out_notification_methods.into_iter().collect(),
|
||||
app_server_client_name: name.clone(),
|
||||
client_version: version,
|
||||
})
|
||||
.is_err()
|
||||
{
|
||||
return Err(invalid_request("Already initialized"));
|
||||
}
|
||||
|
||||
// Only the request that wins session initialization may mutate
|
||||
// process-global client metadata.
|
||||
if let Err(error) = set_default_originator(originator.clone()) {
|
||||
match error {
|
||||
SetOriginatorError::InvalidHeaderValue => {
|
||||
tracing::warn!(
|
||||
client_info_name = %name,
|
||||
"validated clientInfo.name was rejected while setting originator"
|
||||
);
|
||||
}
|
||||
SetOriginatorError::AlreadyInitialized => {
|
||||
// No-op. This is expected to happen if the originator is already set via env var.
|
||||
// TODO(owen): Once we remove support for CODEX_INTERNAL_ORIGINATOR_OVERRIDE,
|
||||
// this will be an unexpected state and we can return a JSON-RPC error indicating
|
||||
// internal server error.
|
||||
}
|
||||
}
|
||||
}
|
||||
self.analytics_events_client.track_initialize(
|
||||
connection_id.0,
|
||||
analytics_initialize_params,
|
||||
originator,
|
||||
self.rpc_transport,
|
||||
);
|
||||
set_default_client_residency_requirement(self.config.enforce_residency.value());
|
||||
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
|
||||
*suffix = Some(user_agent_suffix);
|
||||
}
|
||||
|
||||
let user_agent = get_codex_user_agent();
|
||||
let response = InitializeResponse {
|
||||
user_agent,
|
||||
codex_home,
|
||||
platform_family: std::env::consts::FAMILY.to_string(),
|
||||
platform_os: std::env::consts::OS.to_string(),
|
||||
};
|
||||
|
||||
self.outgoing
|
||||
.send_response(connection_request_id, response)
|
||||
.await;
|
||||
|
||||
if let Some(outbound_initialized) = outbound_initialized {
|
||||
outbound_initialized.store(true, Ordering::Release);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub(crate) async fn send_initialize_notifications_to_connection(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
) {
|
||||
for notification in self.config_warnings.iter().cloned() {
|
||||
self.outgoing
|
||||
.send_server_notification_to_connections(
|
||||
&[connection_id],
|
||||
ServerNotification::ConfigWarning(notification),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn send_initialize_notifications(&self) {
|
||||
for notification in self.config_warnings.iter().cloned() {
|
||||
self.outgoing
|
||||
.send_server_notification(ServerNotification::ConfigWarning(notification))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn track_initialized_request(
|
||||
&self,
|
||||
connection_id: ConnectionId,
|
||||
request_id: RequestId,
|
||||
request: &ClientRequest,
|
||||
) {
|
||||
self.analytics_events_client
|
||||
.track_request(connection_id.0, request_id, request);
|
||||
}
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MarketplaceRequestProcessor {
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
}
|
||||
|
||||
impl MarketplaceRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
config_manager,
|
||||
thread_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn marketplace_add(
|
||||
&self,
|
||||
params: MarketplaceAddParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.marketplace_add_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn marketplace_remove(
|
||||
&self,
|
||||
params: MarketplaceRemoveParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.marketplace_remove_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn marketplace_upgrade(
|
||||
&self,
|
||||
params: MarketplaceUpgradeParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.marketplace_upgrade_response_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn marketplace_remove_inner(
|
||||
&self,
|
||||
params: MarketplaceRemoveParams,
|
||||
) -> Result<MarketplaceRemoveResponse, JSONRPCErrorError> {
|
||||
remove_marketplace(
|
||||
self.config.codex_home.to_path_buf(),
|
||||
CoreMarketplaceRemoveRequest {
|
||||
marketplace_name: params.marketplace_name,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map(|outcome| MarketplaceRemoveResponse {
|
||||
marketplace_name: outcome.marketplace_name,
|
||||
installed_root: outcome.removed_installed_root,
|
||||
})
|
||||
.map_err(|err| match err {
|
||||
MarketplaceRemoveError::InvalidRequest(message) => invalid_request(message),
|
||||
MarketplaceRemoveError::Internal(message) => internal_error(message),
|
||||
})
|
||||
}
|
||||
|
||||
async fn marketplace_upgrade_response_inner(
|
||||
&self,
|
||||
params: MarketplaceUpgradeParams,
|
||||
) -> Result<MarketplaceUpgradeResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let MarketplaceUpgradeParams { marketplace_name } = params;
|
||||
let plugins_input = config.plugins_config_input();
|
||||
|
||||
let outcome = tokio::task::spawn_blocking(move || {
|
||||
plugins_manager.upgrade_configured_marketplaces_for_config(
|
||||
&plugins_input,
|
||||
marketplace_name.as_deref(),
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to upgrade marketplaces: {err}")))?
|
||||
.map_err(invalid_request)?;
|
||||
|
||||
Ok(MarketplaceUpgradeResponse {
|
||||
selected_marketplaces: outcome.selected_marketplaces,
|
||||
upgraded_roots: outcome.upgraded_roots,
|
||||
errors: outcome
|
||||
.errors
|
||||
.into_iter()
|
||||
.map(|err| MarketplaceUpgradeErrorInfo {
|
||||
marketplace_name: err.marketplace_name,
|
||||
message: err.message,
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn marketplace_add_inner(
|
||||
&self,
|
||||
params: MarketplaceAddParams,
|
||||
) -> Result<MarketplaceAddResponse, JSONRPCErrorError> {
|
||||
add_marketplace_to_codex_home(
|
||||
self.config.codex_home.to_path_buf(),
|
||||
MarketplaceAddRequest {
|
||||
source: params.source,
|
||||
ref_name: params.ref_name,
|
||||
sparse_paths: params.sparse_paths.unwrap_or_default(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map(|outcome| MarketplaceAddResponse {
|
||||
marketplace_name: outcome.marketplace_name,
|
||||
installed_root: outcome.installed_root,
|
||||
already_added: outcome.already_added,
|
||||
})
|
||||
.map_err(|err| match err {
|
||||
MarketplaceAddError::InvalidRequest(message) => invalid_request(message),
|
||||
MarketplaceAddError::Internal(message) => internal_error(message),
|
||||
})
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,513 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
const MCP_TOOL_THREAD_ID_META_KEY: &str = "threadId";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct McpRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
}
|
||||
|
||||
impl McpRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
config_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_oauth_login(
|
||||
&self,
|
||||
params: McpServerOauthLoginParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.mcp_server_oauth_login_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_refresh(
|
||||
&self,
|
||||
params: Option<()>,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.mcp_server_refresh_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_status_list(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: ListMcpServerStatusParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.list_mcp_server_status(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_resource_read(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpResourceReadParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.read_mcp_resource(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
pub(crate) async fn mcp_server_tool_call(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpServerToolCallParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.call_mcp_server_tool(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
async fn mcp_server_refresh_response(
|
||||
&self,
|
||||
_params: Option<()>,
|
||||
) -> Result<McpServerRefreshResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
Self::queue_mcp_server_refresh_for_config(&self.thread_manager, &config).await?;
|
||||
Ok(McpServerRefreshResponse {})
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn load_thread(
|
||||
&self,
|
||||
thread_id: &str,
|
||||
) -> Result<(ThreadId, Arc<CodexThread>), JSONRPCErrorError> {
|
||||
let thread_id = ThreadId::from_string(thread_id).map_err(|err| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid thread id: {err}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
let thread = self
|
||||
.thread_manager
|
||||
.get_thread(thread_id)
|
||||
.await
|
||||
.map_err(|_| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("thread not found: {thread_id}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
Ok((thread_id, thread))
|
||||
}
|
||||
|
||||
pub(super) async fn queue_mcp_server_refresh_for_config(
|
||||
thread_manager: &Arc<ThreadManager>,
|
||||
config: &Config,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let configured_servers = thread_manager
|
||||
.mcp_manager()
|
||||
.configured_servers(config)
|
||||
.await;
|
||||
let mcp_servers = match serde_json::to_value(configured_servers) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to serialize MCP servers: {err}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let mcp_oauth_credentials_store_mode =
|
||||
match serde_json::to_value(config.mcp_oauth_credentials_store_mode) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!(
|
||||
"failed to serialize MCP OAuth credentials store mode: {err}"
|
||||
),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let refresh_config = McpServerRefreshConfig {
|
||||
mcp_servers,
|
||||
mcp_oauth_credentials_store_mode,
|
||||
};
|
||||
|
||||
// Refresh requests are queued per thread; each thread rebuilds MCP connections on its next
|
||||
// active turn to avoid work for threads that never resume.
|
||||
thread_manager.refresh_mcp_servers(refresh_config).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn mcp_server_oauth_login_response(
|
||||
&self,
|
||||
params: McpServerOauthLoginParams,
|
||||
) -> Result<McpServerOauthLoginResponse, JSONRPCErrorError> {
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let McpServerOauthLoginParams {
|
||||
name,
|
||||
scopes,
|
||||
timeout_secs,
|
||||
} = params;
|
||||
|
||||
let configured_servers = self
|
||||
.thread_manager
|
||||
.mcp_manager()
|
||||
.configured_servers(&config)
|
||||
.await;
|
||||
let Some(server) = configured_servers.get(&name) else {
|
||||
return Err(invalid_request(format!(
|
||||
"No MCP server named '{name}' found."
|
||||
)));
|
||||
};
|
||||
|
||||
let (url, http_headers, env_http_headers) = match &server.transport {
|
||||
McpServerTransportConfig::StreamableHttp {
|
||||
url,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
..
|
||||
} => (url.clone(), http_headers.clone(), env_http_headers.clone()),
|
||||
_ => {
|
||||
return Err(invalid_request(
|
||||
"OAuth login is only supported for streamable HTTP servers.",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let discovered_scopes = if scopes.is_none() && server.scopes.is_none() {
|
||||
discover_supported_scopes(&server.transport).await
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let resolved_scopes =
|
||||
resolve_oauth_scopes(scopes, server.scopes.clone(), discovered_scopes);
|
||||
|
||||
let handle = perform_oauth_login_return_url(
|
||||
&name,
|
||||
&url,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
&resolved_scopes.scopes,
|
||||
server.oauth_resource.as_deref(),
|
||||
timeout_secs,
|
||||
config.mcp_oauth_callback_port,
|
||||
config.mcp_oauth_callback_url.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| internal_error(format!("failed to login to MCP server '{name}': {err}")))?;
|
||||
let authorization_url = handle.authorization_url().to_string();
|
||||
let notification_name = name.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let (success, error) = match handle.wait().await {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
|
||||
Ok(McpServerOauthLoginResponse { authorization_url })
|
||||
}
|
||||
|
||||
async fn list_mcp_server_status(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: ListMcpServerStatusParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let request = request_id.clone();
|
||||
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let mcp_config = config
|
||||
.to_mcp_config(self.thread_manager.plugins_manager().as_ref())
|
||||
.await;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let runtime_environment = match environment_manager.default_environment() {
|
||||
Some(environment) => {
|
||||
// Status listing has no turn cwd. This fallback is used only
|
||||
// by executor-backed stdio MCPs whose config omits `cwd`.
|
||||
McpRuntimeEnvironment::new(environment, config.cwd.to_path_buf())
|
||||
}
|
||||
None => McpRuntimeEnvironment::new(
|
||||
environment_manager.local_environment(),
|
||||
config.cwd.to_path_buf(),
|
||||
),
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
Self::list_mcp_server_status_task(
|
||||
outgoing,
|
||||
request,
|
||||
params,
|
||||
config,
|
||||
mcp_config,
|
||||
auth,
|
||||
runtime_environment,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_mcp_server_status_task(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ListMcpServerStatusParams,
|
||||
config: Config,
|
||||
mcp_config: codex_mcp::McpConfig,
|
||||
auth: Option<CodexAuth>,
|
||||
runtime_environment: McpRuntimeEnvironment,
|
||||
) {
|
||||
let result = Self::list_mcp_server_status_response(
|
||||
request_id.request_id.to_string(),
|
||||
params,
|
||||
config,
|
||||
mcp_config,
|
||||
auth,
|
||||
runtime_environment,
|
||||
)
|
||||
.await;
|
||||
outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn list_mcp_server_status_response(
|
||||
request_id: String,
|
||||
params: ListMcpServerStatusParams,
|
||||
config: Config,
|
||||
mcp_config: codex_mcp::McpConfig,
|
||||
auth: Option<CodexAuth>,
|
||||
runtime_environment: McpRuntimeEnvironment,
|
||||
) -> Result<ListMcpServerStatusResponse, JSONRPCErrorError> {
|
||||
let detail = match params.detail.unwrap_or(McpServerStatusDetail::Full) {
|
||||
McpServerStatusDetail::Full => McpSnapshotDetail::Full,
|
||||
McpServerStatusDetail::ToolsAndAuthOnly => McpSnapshotDetail::ToolsAndAuthOnly,
|
||||
};
|
||||
|
||||
let snapshot = collect_mcp_server_status_snapshot_with_detail(
|
||||
&mcp_config,
|
||||
auth.as_ref(),
|
||||
request_id,
|
||||
runtime_environment,
|
||||
detail,
|
||||
)
|
||||
.await;
|
||||
|
||||
let effective_servers = effective_mcp_servers(&mcp_config, auth.as_ref());
|
||||
let McpServerStatusSnapshot {
|
||||
tools_by_server,
|
||||
resources,
|
||||
resource_templates,
|
||||
auth_statuses,
|
||||
} = snapshot;
|
||||
|
||||
let mut server_names: Vec<String> = config
|
||||
.mcp_servers
|
||||
.keys()
|
||||
.cloned()
|
||||
// Include built-in/plugin MCP servers that are present in the
|
||||
// effective runtime config even when they are not user-declared in
|
||||
// `config.mcp_servers`.
|
||||
.chain(effective_servers.keys().cloned())
|
||||
.chain(auth_statuses.keys().cloned())
|
||||
.chain(resources.keys().cloned())
|
||||
.chain(resource_templates.keys().cloned())
|
||||
.collect();
|
||||
server_names.sort();
|
||||
server_names.dedup();
|
||||
|
||||
let total = server_names.len();
|
||||
let limit = params.limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = limit.min(total);
|
||||
let start = match params.cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total MCP servers {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
|
||||
let data: Vec<McpServerStatus> = server_names[start..end]
|
||||
.iter()
|
||||
.map(|name| McpServerStatus {
|
||||
name: name.clone(),
|
||||
tools: tools_by_server.get(name).cloned().unwrap_or_default(),
|
||||
resources: resources.get(name).cloned().unwrap_or_default(),
|
||||
resource_templates: resource_templates.get(name).cloned().unwrap_or_default(),
|
||||
auth_status: auth_statuses
|
||||
.get(name)
|
||||
.cloned()
|
||||
.unwrap_or(CoreMcpAuthStatus::Unsupported)
|
||||
.into(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ListMcpServerStatusResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn read_mcp_resource(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpResourceReadParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let McpResourceReadParams {
|
||||
thread_id,
|
||||
server,
|
||||
uri,
|
||||
} = params;
|
||||
|
||||
if let Some(thread_id) = thread_id {
|
||||
let (_, thread) = self.load_thread(&thread_id).await?;
|
||||
let request_id = request_id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let result = thread.read_mcp_resource(&server, &uri).await;
|
||||
Self::send_mcp_resource_read_response(outgoing, request_id, result).await;
|
||||
});
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let mcp_config = config
|
||||
.to_mcp_config(self.thread_manager.plugins_manager().as_ref())
|
||||
.await;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let runtime_environment = {
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
let environment = environment_manager
|
||||
.default_environment()
|
||||
.unwrap_or_else(|| environment_manager.local_environment());
|
||||
// Resource reads without a thread have no turn cwd. This fallback
|
||||
// is used only by executor-backed stdio MCPs whose config omits `cwd`.
|
||||
McpRuntimeEnvironment::new(environment, config.cwd.to_path_buf())
|
||||
};
|
||||
let request_id = request_id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let result = read_mcp_resource_without_thread(
|
||||
&mcp_config,
|
||||
auth.as_ref(),
|
||||
runtime_environment,
|
||||
&server,
|
||||
&uri,
|
||||
)
|
||||
.await
|
||||
.and_then(|result| serde_json::to_value(result).map_err(anyhow::Error::from));
|
||||
Self::send_mcp_resource_read_response(outgoing, request_id, result).await;
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_mcp_resource_read_response(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
result: anyhow::Result<serde_json::Value>,
|
||||
) {
|
||||
let result = result
|
||||
.map_err(|error| internal_error(format!("{error:#}")))
|
||||
.and_then(|result| {
|
||||
serde_json::from_value::<McpResourceReadResponse>(result).map_err(|error| {
|
||||
internal_error(format!(
|
||||
"failed to deserialize MCP resource read response: {error}"
|
||||
))
|
||||
})
|
||||
});
|
||||
outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn call_mcp_server_tool(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: McpServerToolCallParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let thread_id = params.thread_id.clone();
|
||||
let (_, thread) = self.load_thread(&thread_id).await?;
|
||||
let meta = with_mcp_tool_call_thread_id_meta(params.meta, &thread_id);
|
||||
let request_id = request_id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let result = thread
|
||||
.call_mcp_tool(¶ms.server, ¶ms.tool, params.arguments, meta)
|
||||
.await
|
||||
.map(McpServerToolCallResponse::from)
|
||||
.map_err(|error| internal_error(format!("{error:#}")));
|
||||
outgoing.send_result(request_id, result).await;
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn with_mcp_tool_call_thread_id_meta(
|
||||
meta: Option<serde_json::Value>,
|
||||
thread_id: &str,
|
||||
) -> Option<serde_json::Value> {
|
||||
match meta {
|
||||
Some(serde_json::Value::Object(mut map)) => {
|
||||
map.insert(
|
||||
MCP_TOOL_THREAD_ID_META_KEY.to_string(),
|
||||
serde_json::Value::String(thread_id.to_string()),
|
||||
);
|
||||
Some(serde_json::Value::Object(map))
|
||||
}
|
||||
None => {
|
||||
let mut map = serde_json::Map::new();
|
||||
map.insert(
|
||||
MCP_TOOL_THREAD_ID_META_KEY.to_string(),
|
||||
serde_json::Value::String(thread_id.to_string()),
|
||||
);
|
||||
Some(serde_json::Value::Object(map))
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
pub(super) fn environment_selection_error_message(err: CodexErr) -> String {
|
||||
match err {
|
||||
CodexErr::InvalidRequest(message) => message,
|
||||
err => err.to_string(),
|
||||
}
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::fuzzy_file_search::FuzzyFileSearchSession;
|
||||
use crate::fuzzy_file_search::run_fuzzy_file_search;
|
||||
use crate::fuzzy_file_search::start_fuzzy_file_search_session;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use codex_app_server_protocol::FuzzyFileSearchParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchResponse;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStartParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStartResponse;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStopParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionStopResponse;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionUpdateParams;
|
||||
use codex_app_server_protocol::FuzzyFileSearchSessionUpdateResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SearchRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pending_fuzzy_searches: Arc<Mutex<HashMap<String, Arc<AtomicBool>>>>,
|
||||
fuzzy_search_sessions: Arc<Mutex<HashMap<String, FuzzyFileSearchSession>>>,
|
||||
}
|
||||
|
||||
impl SearchRequestProcessor {
|
||||
pub(crate) fn new(outgoing: Arc<OutgoingMessageSender>) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
pending_fuzzy_searches: Arc::new(Mutex::new(HashMap::new())),
|
||||
fuzzy_search_sessions: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search(
|
||||
&self,
|
||||
params: FuzzyFileSearchParams,
|
||||
) -> Result<FuzzyFileSearchResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchParams {
|
||||
query,
|
||||
roots,
|
||||
cancellation_token,
|
||||
} = params;
|
||||
|
||||
let cancel_flag = match cancellation_token.clone() {
|
||||
Some(token) => {
|
||||
let mut pending_fuzzy_searches = self.pending_fuzzy_searches.lock().await;
|
||||
// if a cancellation_token is provided and a pending_request exists for
|
||||
// that token, cancel it
|
||||
if let Some(existing) = pending_fuzzy_searches.get(&token) {
|
||||
existing.store(true, Ordering::Relaxed);
|
||||
}
|
||||
let flag = Arc::new(AtomicBool::new(false));
|
||||
pending_fuzzy_searches.insert(token.clone(), flag.clone());
|
||||
flag
|
||||
}
|
||||
None => Arc::new(AtomicBool::new(false)),
|
||||
};
|
||||
|
||||
let results = match query.as_str() {
|
||||
"" => vec![],
|
||||
_ => run_fuzzy_file_search(query, roots, cancel_flag.clone()).await,
|
||||
};
|
||||
|
||||
if let Some(token) = cancellation_token {
|
||||
let mut pending_fuzzy_searches = self.pending_fuzzy_searches.lock().await;
|
||||
if let Some(current_flag) = pending_fuzzy_searches.get(&token)
|
||||
&& Arc::ptr_eq(current_flag, &cancel_flag)
|
||||
{
|
||||
pending_fuzzy_searches.remove(&token);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(FuzzyFileSearchResponse { files: results })
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search_session_start_response(
|
||||
&self,
|
||||
params: FuzzyFileSearchSessionStartParams,
|
||||
) -> Result<FuzzyFileSearchSessionStartResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchSessionStartParams { session_id, roots } = params;
|
||||
if session_id.is_empty() {
|
||||
return Err(invalid_request("sessionId must not be empty"));
|
||||
}
|
||||
|
||||
let session =
|
||||
start_fuzzy_file_search_session(session_id.clone(), roots, self.outgoing.clone())
|
||||
.map_err(|err| {
|
||||
internal_error(format!("failed to start fuzzy file search session: {err}"))
|
||||
})?;
|
||||
self.fuzzy_search_sessions
|
||||
.lock()
|
||||
.await
|
||||
.insert(session_id, session);
|
||||
Ok(FuzzyFileSearchSessionStartResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search_session_update_response(
|
||||
&self,
|
||||
params: FuzzyFileSearchSessionUpdateParams,
|
||||
) -> Result<FuzzyFileSearchSessionUpdateResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchSessionUpdateParams { session_id, query } = params;
|
||||
let found = {
|
||||
let sessions = self.fuzzy_search_sessions.lock().await;
|
||||
if let Some(session) = sessions.get(&session_id) {
|
||||
session.update_query(query);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
if !found {
|
||||
return Err(invalid_request(format!(
|
||||
"fuzzy file search session not found: {session_id}"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(FuzzyFileSearchSessionUpdateResponse {})
|
||||
}
|
||||
|
||||
pub(crate) async fn fuzzy_file_search_session_stop(
|
||||
&self,
|
||||
params: FuzzyFileSearchSessionStopParams,
|
||||
) -> Result<FuzzyFileSearchSessionStopResponse, JSONRPCErrorError> {
|
||||
let FuzzyFileSearchSessionStopParams { session_id } = params;
|
||||
self.fuzzy_search_sessions.lock().await.remove(&session_id);
|
||||
|
||||
Ok(FuzzyFileSearchSessionStopResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
fn invalid_request(message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn internal_error(message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
@@ -1,761 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
pub(super) const THREAD_UNLOADING_DELAY: Duration = Duration::from_secs(30 * 60);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct ListenerTaskContext {
|
||||
pub(super) thread_manager: Arc<ThreadManager>,
|
||||
pub(super) thread_state_manager: ThreadStateManager,
|
||||
pub(super) outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(super) pending_thread_unloads: Arc<Mutex<HashSet<ThreadId>>>,
|
||||
pub(super) analytics_events_client: AnalyticsEventsClient,
|
||||
pub(super) thread_watch_manager: ThreadWatchManager,
|
||||
pub(super) thread_list_state_permit: Arc<Semaphore>,
|
||||
pub(super) fallback_model_provider: String,
|
||||
pub(super) codex_home: PathBuf,
|
||||
}
|
||||
|
||||
struct UnloadingState {
|
||||
delay: Duration,
|
||||
has_subscribers_rx: watch::Receiver<bool>,
|
||||
has_subscribers: (bool, Instant),
|
||||
thread_status_rx: watch::Receiver<ThreadStatus>,
|
||||
is_active: (bool, Instant),
|
||||
}
|
||||
|
||||
impl UnloadingState {
|
||||
async fn new(
|
||||
listener_task_context: &ListenerTaskContext,
|
||||
thread_id: ThreadId,
|
||||
delay: Duration,
|
||||
) -> Option<Self> {
|
||||
let has_subscribers_rx = listener_task_context
|
||||
.thread_state_manager
|
||||
.subscribe_to_has_connections(thread_id)
|
||||
.await?;
|
||||
let thread_status_rx = listener_task_context
|
||||
.thread_watch_manager
|
||||
.subscribe(thread_id)
|
||||
.await?;
|
||||
let has_subscribers = (*has_subscribers_rx.borrow(), Instant::now());
|
||||
let is_active = (
|
||||
matches!(*thread_status_rx.borrow(), ThreadStatus::Active { .. }),
|
||||
Instant::now(),
|
||||
);
|
||||
Some(Self {
|
||||
delay,
|
||||
has_subscribers_rx,
|
||||
has_subscribers,
|
||||
thread_status_rx,
|
||||
is_active,
|
||||
})
|
||||
}
|
||||
|
||||
fn unloading_target(&self) -> Option<Instant> {
|
||||
match (self.has_subscribers, self.is_active) {
|
||||
((false, has_no_subscribers_since), (false, is_inactive_since)) => {
|
||||
Some(std::cmp::max(has_no_subscribers_since, is_inactive_since) + self.delay)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn sync_receiver_values(&mut self) {
|
||||
let has_subscribers = *self.has_subscribers_rx.borrow();
|
||||
if self.has_subscribers.0 != has_subscribers {
|
||||
self.has_subscribers = (has_subscribers, Instant::now());
|
||||
}
|
||||
|
||||
let is_active = matches!(*self.thread_status_rx.borrow(), ThreadStatus::Active { .. });
|
||||
if self.is_active.0 != is_active {
|
||||
self.is_active = (is_active, Instant::now());
|
||||
}
|
||||
}
|
||||
|
||||
fn should_unload_now(&mut self) -> bool {
|
||||
self.sync_receiver_values();
|
||||
self.unloading_target()
|
||||
.is_some_and(|target| target <= Instant::now())
|
||||
}
|
||||
|
||||
fn note_thread_activity_observed(&mut self) {
|
||||
if !self.is_active.0 {
|
||||
self.is_active = (false, Instant::now());
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_for_unloading_trigger(&mut self) -> bool {
|
||||
loop {
|
||||
self.sync_receiver_values();
|
||||
let unloading_target = self.unloading_target();
|
||||
if let Some(target) = unloading_target
|
||||
&& target <= Instant::now()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
let unloading_sleep = async {
|
||||
if let Some(target) = unloading_target {
|
||||
tokio::time::sleep_until(target.into()).await;
|
||||
} else {
|
||||
futures::future::pending::<()>().await;
|
||||
}
|
||||
};
|
||||
tokio::select! {
|
||||
_ = unloading_sleep => return true,
|
||||
changed = self.has_subscribers_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
return false;
|
||||
}
|
||||
self.sync_receiver_values();
|
||||
},
|
||||
changed = self.thread_status_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
return false;
|
||||
}
|
||||
self.sync_receiver_values();
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) enum ThreadShutdownResult {
|
||||
Complete,
|
||||
SubmitFailed,
|
||||
TimedOut,
|
||||
}
|
||||
|
||||
pub(super) enum EnsureConversationListenerResult {
|
||||
Attached,
|
||||
ConnectionClosed,
|
||||
}
|
||||
|
||||
#[expect(
|
||||
clippy::await_holding_invalid_type,
|
||||
reason = "listener subscription must be serialized against pending unloads"
|
||||
)]
|
||||
pub(super) async fn ensure_conversation_listener(
|
||||
listener_task_context: ListenerTaskContext,
|
||||
conversation_id: ThreadId,
|
||||
connection_id: ConnectionId,
|
||||
raw_events_enabled: bool,
|
||||
) -> Result<EnsureConversationListenerResult, JSONRPCErrorError> {
|
||||
let conversation = match listener_task_context
|
||||
.thread_manager
|
||||
.get_thread(conversation_id)
|
||||
.await
|
||||
{
|
||||
Ok(conv) => conv,
|
||||
Err(_) => {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("thread not found: {conversation_id}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
};
|
||||
let thread_state = {
|
||||
let pending_thread_unloads = listener_task_context.pending_thread_unloads.lock().await;
|
||||
if pending_thread_unloads.contains(&conversation_id) {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!(
|
||||
"thread {conversation_id} is closing; retry after the thread is closed"
|
||||
),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
let Some(thread_state) = listener_task_context
|
||||
.thread_state_manager
|
||||
.try_ensure_connection_subscribed(conversation_id, connection_id, raw_events_enabled)
|
||||
.await
|
||||
else {
|
||||
return Ok(EnsureConversationListenerResult::ConnectionClosed);
|
||||
};
|
||||
thread_state
|
||||
};
|
||||
if let Err(error) = ensure_listener_task_running(
|
||||
listener_task_context.clone(),
|
||||
conversation_id,
|
||||
conversation,
|
||||
thread_state,
|
||||
)
|
||||
.await
|
||||
{
|
||||
let _ = listener_task_context
|
||||
.thread_state_manager
|
||||
.unsubscribe_connection_from_thread(conversation_id, connection_id)
|
||||
.await;
|
||||
return Err(error);
|
||||
}
|
||||
Ok(EnsureConversationListenerResult::Attached)
|
||||
}
|
||||
|
||||
pub(super) fn log_listener_attach_result(
|
||||
result: Result<EnsureConversationListenerResult, JSONRPCErrorError>,
|
||||
thread_id: ThreadId,
|
||||
connection_id: ConnectionId,
|
||||
thread_kind: &'static str,
|
||||
) {
|
||||
match result {
|
||||
Ok(EnsureConversationListenerResult::Attached) => {}
|
||||
Ok(EnsureConversationListenerResult::ConnectionClosed) => {
|
||||
tracing::debug!(
|
||||
thread_id = %thread_id,
|
||||
connection_id = ?connection_id,
|
||||
"skipping auto-attach for closed connection"
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
"failed to attach listener for {thread_kind} {thread_id}: {message}",
|
||||
message = err.message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn ensure_listener_task_running(
|
||||
listener_task_context: ListenerTaskContext,
|
||||
conversation_id: ThreadId,
|
||||
conversation: Arc<CodexThread>,
|
||||
thread_state: Arc<Mutex<ThreadState>>,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
let (cancel_tx, mut cancel_rx) = oneshot::channel();
|
||||
let Some(mut unloading_state) = UnloadingState::new(
|
||||
&listener_task_context,
|
||||
conversation_id,
|
||||
THREAD_UNLOADING_DELAY,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!(
|
||||
"thread {conversation_id} is closing; retry after the thread is closed"
|
||||
),
|
||||
data: None,
|
||||
});
|
||||
};
|
||||
let (mut listener_command_rx, listener_generation) = {
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
if thread_state.listener_matches(&conversation) {
|
||||
return Ok(());
|
||||
}
|
||||
thread_state.set_listener(cancel_tx, &conversation)
|
||||
};
|
||||
let ListenerTaskContext {
|
||||
outgoing,
|
||||
thread_manager,
|
||||
thread_state_manager,
|
||||
pending_thread_unloads,
|
||||
analytics_events_client,
|
||||
thread_watch_manager,
|
||||
thread_list_state_permit,
|
||||
fallback_model_provider,
|
||||
codex_home,
|
||||
} = listener_task_context;
|
||||
let outgoing_for_task = Arc::clone(&outgoing);
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
_ = &mut cancel_rx => {
|
||||
// Listener was superseded or the thread is being torn down.
|
||||
break;
|
||||
}
|
||||
listener_command = listener_command_rx.recv() => {
|
||||
let Some(listener_command) = listener_command else {
|
||||
break;
|
||||
};
|
||||
handle_thread_listener_command(
|
||||
conversation_id,
|
||||
&conversation,
|
||||
codex_home.as_path(),
|
||||
&thread_state_manager,
|
||||
&thread_state,
|
||||
&thread_watch_manager,
|
||||
&outgoing_for_task,
|
||||
&pending_thread_unloads,
|
||||
listener_command,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
event = conversation.next_event() => {
|
||||
let event = match event {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
tracing::warn!("thread.next_event() failed with: {err}");
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// Track the event before emitting any typed translations
|
||||
// so thread-local state such as raw event opt-in stays
|
||||
// synchronized with the conversation.
|
||||
let raw_events_enabled = {
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
thread_state.track_current_turn_event(&event.id, &event.msg);
|
||||
thread_state.experimental_raw_events
|
||||
};
|
||||
let subscribed_connection_ids = thread_state_manager
|
||||
.subscribed_connection_ids(conversation_id)
|
||||
.await;
|
||||
let thread_outgoing = ThreadScopedOutgoingMessageSender::new(
|
||||
outgoing_for_task.clone(),
|
||||
subscribed_connection_ids,
|
||||
conversation_id,
|
||||
);
|
||||
|
||||
if let EventMsg::RawResponseItem(raw_response_item_event) = &event.msg
|
||||
&& !raw_events_enabled
|
||||
{
|
||||
maybe_emit_hook_prompt_item_completed(
|
||||
conversation_id,
|
||||
&event.id,
|
||||
&raw_response_item_event.item,
|
||||
&thread_outgoing,
|
||||
)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
|
||||
apply_bespoke_event_handling(
|
||||
event.clone(),
|
||||
conversation_id,
|
||||
conversation.clone(),
|
||||
thread_manager.clone(),
|
||||
Some(analytics_events_client.clone()),
|
||||
thread_outgoing,
|
||||
thread_state.clone(),
|
||||
thread_watch_manager.clone(),
|
||||
thread_list_state_permit.clone(),
|
||||
fallback_model_provider.clone(),
|
||||
codex_home.as_path(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
unloading_watchers_open = unloading_state.wait_for_unloading_trigger() => {
|
||||
if !unloading_watchers_open {
|
||||
break;
|
||||
}
|
||||
if !unloading_state.should_unload_now() {
|
||||
continue;
|
||||
}
|
||||
if matches!(conversation.agent_status().await, AgentStatus::Running) {
|
||||
unloading_state.note_thread_activity_observed();
|
||||
continue;
|
||||
}
|
||||
{
|
||||
let mut pending_thread_unloads = pending_thread_unloads.lock().await;
|
||||
if pending_thread_unloads.contains(&conversation_id) {
|
||||
continue;
|
||||
}
|
||||
if !unloading_state.should_unload_now() {
|
||||
continue;
|
||||
}
|
||||
pending_thread_unloads.insert(conversation_id);
|
||||
}
|
||||
unload_thread_without_subscribers(
|
||||
thread_manager.clone(),
|
||||
outgoing_for_task.clone(),
|
||||
pending_thread_unloads.clone(),
|
||||
thread_state_manager.clone(),
|
||||
thread_watch_manager.clone(),
|
||||
conversation_id,
|
||||
conversation.clone(),
|
||||
)
|
||||
.await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
if thread_state.listener_generation == listener_generation {
|
||||
thread_state.clear_listener();
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn wait_for_thread_shutdown(thread: &Arc<CodexThread>) -> ThreadShutdownResult {
|
||||
match tokio::time::timeout(Duration::from_secs(10), thread.shutdown_and_wait()).await {
|
||||
Ok(Ok(())) => ThreadShutdownResult::Complete,
|
||||
Ok(Err(_)) => ThreadShutdownResult::SubmitFailed,
|
||||
Err(_) => ThreadShutdownResult::TimedOut,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn unload_thread_without_subscribers(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
pending_thread_unloads: Arc<Mutex<HashSet<ThreadId>>>,
|
||||
thread_state_manager: ThreadStateManager,
|
||||
thread_watch_manager: ThreadWatchManager,
|
||||
thread_id: ThreadId,
|
||||
thread: Arc<CodexThread>,
|
||||
) {
|
||||
info!("thread {thread_id} has no subscribers and is idle; shutting down");
|
||||
|
||||
// Any pending app-server -> client requests for this thread can no longer be
|
||||
// answered; cancel their callbacks before shutdown/unload.
|
||||
outgoing
|
||||
.cancel_requests_for_thread(thread_id, /*error*/ None)
|
||||
.await;
|
||||
thread_state_manager.remove_thread_state(thread_id).await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
match wait_for_thread_shutdown(&thread).await {
|
||||
ThreadShutdownResult::Complete => {
|
||||
if thread_manager.remove_thread(&thread_id).await.is_none() {
|
||||
info!("thread {thread_id} was already removed before teardown finalized");
|
||||
thread_watch_manager
|
||||
.remove_thread(&thread_id.to_string())
|
||||
.await;
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
return;
|
||||
}
|
||||
thread_watch_manager
|
||||
.remove_thread(&thread_id.to_string())
|
||||
.await;
|
||||
let notification = ThreadClosedNotification {
|
||||
thread_id: thread_id.to_string(),
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadClosed(notification))
|
||||
.await;
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
}
|
||||
ThreadShutdownResult::SubmitFailed => {
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
warn!("failed to submit Shutdown to thread {thread_id}");
|
||||
}
|
||||
ThreadShutdownResult::TimedOut => {
|
||||
pending_thread_unloads.lock().await.remove(&thread_id);
|
||||
warn!("thread {thread_id} shutdown timed out; leaving thread loaded");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) async fn handle_thread_listener_command(
|
||||
conversation_id: ThreadId,
|
||||
conversation: &Arc<CodexThread>,
|
||||
codex_home: &Path,
|
||||
thread_state_manager: &ThreadStateManager,
|
||||
thread_state: &Arc<Mutex<ThreadState>>,
|
||||
thread_watch_manager: &ThreadWatchManager,
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
pending_thread_unloads: &Arc<Mutex<HashSet<ThreadId>>>,
|
||||
listener_command: ThreadListenerCommand,
|
||||
) {
|
||||
match listener_command {
|
||||
ThreadListenerCommand::SendThreadResumeResponse(resume_request) => {
|
||||
handle_pending_thread_resume_request(
|
||||
conversation_id,
|
||||
conversation,
|
||||
codex_home,
|
||||
thread_state_manager,
|
||||
thread_state,
|
||||
thread_watch_manager,
|
||||
outgoing,
|
||||
pending_thread_unloads,
|
||||
*resume_request,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
ThreadListenerCommand::EmitThreadGoalUpdated { goal } => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalUpdated(
|
||||
ThreadGoalUpdatedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
turn_id: None,
|
||||
goal,
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
ThreadListenerCommand::EmitThreadGoalCleared => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalCleared(
|
||||
ThreadGoalClearedNotification {
|
||||
thread_id: conversation_id.to_string(),
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
ThreadListenerCommand::EmitThreadGoalSnapshot { state_db } => {
|
||||
send_thread_goal_snapshot_notification(outgoing, conversation_id, &state_db).await;
|
||||
}
|
||||
ThreadListenerCommand::ResolveServerRequest {
|
||||
request_id,
|
||||
completion_tx,
|
||||
} => {
|
||||
resolve_pending_server_request(
|
||||
conversation_id,
|
||||
thread_state_manager,
|
||||
outgoing,
|
||||
request_id,
|
||||
)
|
||||
.await;
|
||||
let _ = completion_tx.send(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[expect(
|
||||
clippy::await_holding_invalid_type,
|
||||
reason = "running-thread resume subscription must be serialized against pending unloads"
|
||||
)]
|
||||
pub(super) async fn handle_pending_thread_resume_request(
|
||||
conversation_id: ThreadId,
|
||||
conversation: &Arc<CodexThread>,
|
||||
_codex_home: &Path,
|
||||
thread_state_manager: &ThreadStateManager,
|
||||
thread_state: &Arc<Mutex<ThreadState>>,
|
||||
thread_watch_manager: &ThreadWatchManager,
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
pending_thread_unloads: &Arc<Mutex<HashSet<ThreadId>>>,
|
||||
pending: crate::thread_state::PendingThreadResumeRequest,
|
||||
) {
|
||||
let active_turn = {
|
||||
let state = thread_state.lock().await;
|
||||
state.active_turn_snapshot()
|
||||
};
|
||||
tracing::debug!(
|
||||
thread_id = %conversation_id,
|
||||
request_id = ?pending.request_id,
|
||||
active_turn_present = active_turn.is_some(),
|
||||
active_turn_id = ?active_turn.as_ref().map(|turn| turn.id.as_str()),
|
||||
active_turn_status = ?active_turn.as_ref().map(|turn| &turn.status),
|
||||
"composing running thread resume response"
|
||||
);
|
||||
let has_live_in_progress_turn =
|
||||
matches!(conversation.agent_status().await, AgentStatus::Running)
|
||||
|| active_turn
|
||||
.as_ref()
|
||||
.is_some_and(|turn| matches!(turn.status, TurnStatus::InProgress));
|
||||
|
||||
let request_id = pending.request_id;
|
||||
let connection_id = request_id.connection_id;
|
||||
let mut thread = pending.thread_summary;
|
||||
if pending.include_turns {
|
||||
populate_thread_turns_from_history(
|
||||
&mut thread,
|
||||
&pending.history_items,
|
||||
active_turn.as_ref(),
|
||||
);
|
||||
}
|
||||
|
||||
let thread_status = thread_watch_manager
|
||||
.loaded_status_for_thread(&thread.id)
|
||||
.await;
|
||||
|
||||
set_thread_status_and_interrupt_stale_turns(
|
||||
&mut thread,
|
||||
thread_status,
|
||||
has_live_in_progress_turn,
|
||||
);
|
||||
|
||||
{
|
||||
let pending_thread_unloads = pending_thread_unloads.lock().await;
|
||||
if pending_thread_unloads.contains(&conversation_id) {
|
||||
drop(pending_thread_unloads);
|
||||
outgoing
|
||||
.send_error(
|
||||
request_id,
|
||||
invalid_request(format!(
|
||||
"thread {conversation_id} is closing; retry thread/resume after the thread is closed"
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
if !thread_state_manager
|
||||
.try_add_connection_to_thread(conversation_id, connection_id)
|
||||
.await
|
||||
{
|
||||
tracing::debug!(
|
||||
thread_id = %conversation_id,
|
||||
connection_id = ?connection_id,
|
||||
"skipping running thread resume for closed connection"
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if pending.emit_thread_goal_update
|
||||
&& let Err(err) = conversation.apply_goal_resume_runtime_effects().await
|
||||
{
|
||||
tracing::warn!("failed to apply goal resume runtime effects: {err}");
|
||||
}
|
||||
|
||||
let ThreadConfigSnapshot {
|
||||
model,
|
||||
model_provider_id,
|
||||
service_tier,
|
||||
approval_policy,
|
||||
approvals_reviewer,
|
||||
permission_profile,
|
||||
active_permission_profile,
|
||||
cwd,
|
||||
reasoning_effort,
|
||||
..
|
||||
} = pending.config_snapshot;
|
||||
let instruction_sources = pending.instruction_sources;
|
||||
let sandbox = thread_response_sandbox_policy(&permission_profile, cwd.as_path());
|
||||
let active_permission_profile =
|
||||
thread_response_active_permission_profile(active_permission_profile);
|
||||
|
||||
let response = ThreadResumeResponse {
|
||||
thread,
|
||||
model,
|
||||
model_provider: model_provider_id,
|
||||
service_tier,
|
||||
cwd,
|
||||
instruction_sources,
|
||||
approval_policy: approval_policy.into(),
|
||||
approvals_reviewer: approvals_reviewer.into(),
|
||||
sandbox,
|
||||
permission_profile: Some(permission_profile.into()),
|
||||
active_permission_profile,
|
||||
reasoning_effort,
|
||||
};
|
||||
let token_usage_thread = pending.include_turns.then(|| response.thread.clone());
|
||||
outgoing.send_response(request_id, response).await;
|
||||
// Match cold resume: metadata-only resume should attach the listener without
|
||||
// paying the cost of turn reconstruction for historical usage replay.
|
||||
if let Some(token_usage_thread) = token_usage_thread {
|
||||
let token_usage_turn_id = latest_token_usage_turn_id_from_rollout_items(
|
||||
&pending.history_items,
|
||||
token_usage_thread.turns.as_slice(),
|
||||
);
|
||||
// Rejoining a loaded thread has the same UI contract as a cold resume, but
|
||||
// uses the live conversation state instead of reconstructing a new session.
|
||||
send_thread_token_usage_update_to_connection(
|
||||
outgoing,
|
||||
connection_id,
|
||||
conversation_id,
|
||||
&token_usage_thread,
|
||||
conversation.as_ref(),
|
||||
token_usage_turn_id,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
if pending.emit_thread_goal_update {
|
||||
if let Some(state_db) = pending.thread_goal_state_db {
|
||||
send_thread_goal_snapshot_notification(outgoing, conversation_id, &state_db).await;
|
||||
} else {
|
||||
tracing::warn!(
|
||||
thread_id = %conversation_id,
|
||||
"state db unavailable when reading thread goal for running thread resume"
|
||||
);
|
||||
}
|
||||
}
|
||||
outgoing
|
||||
.replay_requests_to_connection_for_thread(connection_id, conversation_id)
|
||||
.await;
|
||||
// App-server owns resume response and snapshot ordering, so wait until
|
||||
// replay completes before letting core start goal continuation.
|
||||
if pending.emit_thread_goal_update
|
||||
&& let Err(err) = conversation.continue_active_goal_if_idle().await
|
||||
{
|
||||
tracing::warn!("failed to continue active goal after running-thread resume: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn send_thread_goal_snapshot_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
thread_id: ThreadId,
|
||||
state_db: &StateDbHandle,
|
||||
) {
|
||||
match state_db.get_thread_goal(thread_id).await {
|
||||
Ok(Some(goal)) => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalUpdated(
|
||||
ThreadGoalUpdatedNotification {
|
||||
thread_id: thread_id.to_string(),
|
||||
turn_id: None,
|
||||
goal: api_thread_goal_from_state(goal),
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
Ok(None) => {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ThreadGoalCleared(
|
||||
ThreadGoalClearedNotification {
|
||||
thread_id: thread_id.to_string(),
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
thread_id = %thread_id,
|
||||
"failed to read thread goal for resume snapshot: {err}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn populate_thread_turns_from_history(
|
||||
thread: &mut Thread,
|
||||
items: &[RolloutItem],
|
||||
active_turn: Option<&Turn>,
|
||||
) {
|
||||
let mut turns = build_api_turns_from_rollout_items(items);
|
||||
if let Some(active_turn) = active_turn {
|
||||
merge_turn_history_with_active_turn(&mut turns, active_turn.clone());
|
||||
}
|
||||
thread.turns = turns;
|
||||
}
|
||||
|
||||
pub(super) async fn resolve_pending_server_request(
|
||||
conversation_id: ThreadId,
|
||||
thread_state_manager: &ThreadStateManager,
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
request_id: RequestId,
|
||||
) {
|
||||
let thread_id = conversation_id.to_string();
|
||||
let subscribed_connection_ids = thread_state_manager
|
||||
.subscribed_connection_ids(conversation_id)
|
||||
.await;
|
||||
let outgoing = ThreadScopedOutgoingMessageSender::new(
|
||||
outgoing.clone(),
|
||||
subscribed_connection_ids,
|
||||
conversation_id,
|
||||
);
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::ServerRequestResolved(
|
||||
ServerRequestResolvedNotification {
|
||||
thread_id,
|
||||
request_id,
|
||||
},
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(super) fn merge_turn_history_with_active_turn(turns: &mut Vec<Turn>, active_turn: Turn) {
|
||||
turns.retain(|turn| turn.id != active_turn.id);
|
||||
turns.push(active_turn);
|
||||
}
|
||||
|
||||
pub(super) fn set_thread_status_and_interrupt_stale_turns(
|
||||
thread: &mut Thread,
|
||||
loaded_status: ThreadStatus,
|
||||
has_live_in_progress_turn: bool,
|
||||
) {
|
||||
let status = resolve_thread_status(loaded_status, has_live_in_progress_turn);
|
||||
if !matches!(status, ThreadStatus::Active { .. }) {
|
||||
for turn in &mut thread.turns {
|
||||
if matches!(turn.status, TurnStatus::InProgress) {
|
||||
turn.status = TurnStatus::Interrupted;
|
||||
}
|
||||
}
|
||||
}
|
||||
thread.status = status;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,305 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
pub(super) async fn open_state_db_for_direct_thread_lookup(
|
||||
config: &Config,
|
||||
) -> Option<StateDbHandle> {
|
||||
StateRuntime::init(config.sqlite_home.clone(), config.model_provider_id.clone())
|
||||
.await
|
||||
.ok()
|
||||
}
|
||||
|
||||
pub(crate) async fn read_summary_from_rollout(
|
||||
path: &Path,
|
||||
fallback_provider: &str,
|
||||
) -> std::io::Result<ConversationSummary> {
|
||||
let head = read_head_for_summary(path).await?;
|
||||
|
||||
let Some(first) = head.first() else {
|
||||
return Err(IoError::other(format!(
|
||||
"rollout at {} is empty",
|
||||
path.display()
|
||||
)));
|
||||
};
|
||||
|
||||
let session_meta_line =
|
||||
serde_json::from_value::<SessionMetaLine>(first.clone()).map_err(|_| {
|
||||
IoError::other(format!(
|
||||
"rollout at {} does not start with session metadata",
|
||||
path.display()
|
||||
))
|
||||
})?;
|
||||
let SessionMetaLine {
|
||||
meta: session_meta,
|
||||
git,
|
||||
} = session_meta_line;
|
||||
let mut session_meta = session_meta;
|
||||
session_meta.source = with_thread_spawn_agent_metadata(
|
||||
session_meta.source.clone(),
|
||||
session_meta.agent_nickname.clone(),
|
||||
session_meta.agent_role.clone(),
|
||||
);
|
||||
|
||||
let created_at = if session_meta.timestamp.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(session_meta.timestamp.as_str())
|
||||
};
|
||||
let updated_at = read_updated_at(path, created_at).await;
|
||||
if let Some(summary) = extract_conversation_summary(
|
||||
path.to_path_buf(),
|
||||
&head,
|
||||
&session_meta,
|
||||
git.as_ref(),
|
||||
fallback_provider,
|
||||
updated_at.clone(),
|
||||
) {
|
||||
return Ok(summary);
|
||||
}
|
||||
|
||||
let timestamp = if session_meta.timestamp.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(session_meta.timestamp.clone())
|
||||
};
|
||||
let model_provider = session_meta
|
||||
.model_provider
|
||||
.clone()
|
||||
.unwrap_or_else(|| fallback_provider.to_string());
|
||||
let git_info = git.as_ref().map(map_git_info);
|
||||
let updated_at = updated_at.or_else(|| timestamp.clone());
|
||||
|
||||
Ok(ConversationSummary {
|
||||
conversation_id: session_meta.id,
|
||||
timestamp,
|
||||
updated_at,
|
||||
path: path.to_path_buf(),
|
||||
preview: String::new(),
|
||||
model_provider,
|
||||
cwd: session_meta.cwd,
|
||||
cli_version: session_meta.cli_version,
|
||||
source: session_meta.source,
|
||||
git_info,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn read_rollout_items_from_rollout(
|
||||
path: &Path,
|
||||
) -> std::io::Result<Vec<RolloutItem>> {
|
||||
let items = match RolloutRecorder::get_rollout_history(path).await? {
|
||||
InitialHistory::New | InitialHistory::Cleared => Vec::new(),
|
||||
InitialHistory::Forked(items) => items,
|
||||
InitialHistory::Resumed(resumed) => resumed.history,
|
||||
};
|
||||
|
||||
Ok(items)
|
||||
}
|
||||
|
||||
fn extract_conversation_summary(
|
||||
path: PathBuf,
|
||||
head: &[serde_json::Value],
|
||||
session_meta: &SessionMeta,
|
||||
git: Option<&CoreGitInfo>,
|
||||
fallback_provider: &str,
|
||||
updated_at: Option<String>,
|
||||
) -> Option<ConversationSummary> {
|
||||
let preview = head
|
||||
.iter()
|
||||
.filter_map(|value| serde_json::from_value::<ResponseItem>(value.clone()).ok())
|
||||
.find_map(|item| match codex_core::parse_turn_item(&item) {
|
||||
Some(TurnItem::UserMessage(user)) => Some(user.message()),
|
||||
_ => None,
|
||||
})?;
|
||||
|
||||
let preview = match preview.find(USER_MESSAGE_BEGIN) {
|
||||
Some(idx) => preview[idx + USER_MESSAGE_BEGIN.len()..].trim(),
|
||||
None => preview.as_str(),
|
||||
};
|
||||
|
||||
let timestamp = if session_meta.timestamp.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(session_meta.timestamp.clone())
|
||||
};
|
||||
let conversation_id = session_meta.id;
|
||||
let model_provider = session_meta
|
||||
.model_provider
|
||||
.clone()
|
||||
.unwrap_or_else(|| fallback_provider.to_string());
|
||||
let git_info = git.map(map_git_info);
|
||||
let updated_at = updated_at.or_else(|| timestamp.clone());
|
||||
|
||||
Some(ConversationSummary {
|
||||
conversation_id,
|
||||
timestamp,
|
||||
updated_at,
|
||||
path,
|
||||
preview: preview.to_string(),
|
||||
model_provider,
|
||||
cwd: session_meta.cwd.clone(),
|
||||
cli_version: session_meta.cli_version.clone(),
|
||||
source: session_meta.source.clone(),
|
||||
git_info,
|
||||
})
|
||||
}
|
||||
|
||||
fn map_git_info(git_info: &CoreGitInfo) -> ConversationGitInfo {
|
||||
ConversationGitInfo {
|
||||
sha: git_info.commit_hash.as_ref().map(|sha| sha.0.clone()),
|
||||
branch: git_info.branch.clone(),
|
||||
origin_url: git_info.repository_url.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn with_thread_spawn_agent_metadata(
|
||||
source: codex_protocol::protocol::SessionSource,
|
||||
agent_nickname: Option<String>,
|
||||
agent_role: Option<String>,
|
||||
) -> codex_protocol::protocol::SessionSource {
|
||||
if agent_nickname.is_none() && agent_role.is_none() {
|
||||
return source;
|
||||
}
|
||||
|
||||
match source {
|
||||
codex_protocol::protocol::SessionSource::SubAgent(
|
||||
codex_protocol::protocol::SubAgentSource::ThreadSpawn {
|
||||
parent_thread_id,
|
||||
depth,
|
||||
agent_path,
|
||||
agent_nickname: existing_agent_nickname,
|
||||
agent_role: existing_agent_role,
|
||||
},
|
||||
) => codex_protocol::protocol::SessionSource::SubAgent(
|
||||
codex_protocol::protocol::SubAgentSource::ThreadSpawn {
|
||||
parent_thread_id,
|
||||
depth,
|
||||
agent_path,
|
||||
agent_nickname: agent_nickname.or(existing_agent_nickname),
|
||||
agent_role: agent_role.or(existing_agent_role),
|
||||
},
|
||||
),
|
||||
_ => source,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn thread_response_active_permission_profile(
|
||||
active_permission_profile: Option<codex_protocol::models::ActivePermissionProfile>,
|
||||
) -> Option<codex_app_server_protocol::ActivePermissionProfile> {
|
||||
active_permission_profile.map(Into::into)
|
||||
}
|
||||
|
||||
pub(super) fn apply_permission_profile_selection_to_config_overrides(
|
||||
overrides: &mut ConfigOverrides,
|
||||
permissions: Option<PermissionProfileSelectionParams>,
|
||||
) {
|
||||
let Some(PermissionProfileSelectionParams::Profile { id, modifications }) = permissions else {
|
||||
return;
|
||||
};
|
||||
overrides.default_permissions = Some(id);
|
||||
overrides
|
||||
.additional_writable_roots
|
||||
.extend(modifications.unwrap_or_default().into_iter().map(
|
||||
|modification| match modification {
|
||||
PermissionProfileModificationParams::AdditionalWritableRoot { path } => {
|
||||
path.to_path_buf()
|
||||
}
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
pub(super) fn thread_response_sandbox_policy(
|
||||
permission_profile: &codex_protocol::models::PermissionProfile,
|
||||
cwd: &Path,
|
||||
) -> codex_app_server_protocol::SandboxPolicy {
|
||||
let file_system_policy = permission_profile.file_system_sandbox_policy();
|
||||
let sandbox_policy = codex_sandboxing::compatibility_sandbox_policy_for_permission_profile(
|
||||
permission_profile,
|
||||
&file_system_policy,
|
||||
permission_profile.network_sandbox_policy(),
|
||||
cwd,
|
||||
);
|
||||
sandbox_policy.into()
|
||||
}
|
||||
|
||||
fn parse_datetime(timestamp: Option<&str>) -> Option<DateTime<Utc>> {
|
||||
timestamp.and_then(|ts| {
|
||||
chrono::DateTime::parse_from_rfc3339(ts)
|
||||
.ok()
|
||||
.map(|dt| dt.with_timezone(&chrono::Utc))
|
||||
})
|
||||
}
|
||||
|
||||
async fn read_updated_at(path: &Path, created_at: Option<&str>) -> Option<String> {
|
||||
let updated_at = tokio::fs::metadata(path)
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|meta| meta.modified().ok())
|
||||
.map(|modified| {
|
||||
let updated_at: DateTime<Utc> = modified.into();
|
||||
updated_at.to_rfc3339_opts(SecondsFormat::Millis, true)
|
||||
});
|
||||
updated_at.or_else(|| created_at.map(str::to_string))
|
||||
}
|
||||
|
||||
pub(super) fn thread_started_notification(mut thread: Thread) -> ThreadStartedNotification {
|
||||
thread.turns.clear();
|
||||
ThreadStartedNotification { thread }
|
||||
}
|
||||
|
||||
pub(crate) fn summary_to_thread(
|
||||
summary: ConversationSummary,
|
||||
fallback_cwd: &AbsolutePathBuf,
|
||||
) -> Thread {
|
||||
let ConversationSummary {
|
||||
conversation_id,
|
||||
path,
|
||||
preview,
|
||||
timestamp,
|
||||
updated_at,
|
||||
model_provider,
|
||||
cwd,
|
||||
cli_version,
|
||||
source,
|
||||
git_info,
|
||||
} = summary;
|
||||
|
||||
let created_at = parse_datetime(timestamp.as_deref());
|
||||
let updated_at = parse_datetime(updated_at.as_deref()).or(created_at);
|
||||
let git_info = git_info.map(|info| ApiGitInfo {
|
||||
sha: info.sha,
|
||||
branch: info.branch,
|
||||
origin_url: info.origin_url,
|
||||
});
|
||||
let cwd =
|
||||
AbsolutePathBuf::relative_to_current_dir(path_utils::normalize_for_native_workdir(cwd))
|
||||
.unwrap_or_else(|err| {
|
||||
warn!(
|
||||
path = %path.display(),
|
||||
"failed to normalize thread cwd while summarizing thread: {err}"
|
||||
);
|
||||
fallback_cwd.clone()
|
||||
});
|
||||
|
||||
Thread {
|
||||
id: conversation_id.to_string(),
|
||||
forked_from_id: None,
|
||||
preview,
|
||||
ephemeral: false,
|
||||
model_provider,
|
||||
created_at: created_at.map(|dt| dt.timestamp()).unwrap_or(0),
|
||||
updated_at: updated_at.map(|dt| dt.timestamp()).unwrap_or(0),
|
||||
status: ThreadStatus::NotLoaded,
|
||||
path: Some(path),
|
||||
cwd,
|
||||
cli_version,
|
||||
agent_nickname: source.get_nickname(),
|
||||
agent_role: source.get_agent_role(),
|
||||
source: source.into(),
|
||||
git_info,
|
||||
name: None,
|
||||
turns: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "thread_summary_tests.rs"]
|
||||
mod thread_summary_tests;
|
||||
@@ -1,68 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
use anyhow::Result;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn extract_conversation_summary_prefers_plain_user_messages() -> Result<()> {
|
||||
let conversation_id = ThreadId::from_string("3f941c35-29b3-493b-b0a4-e25800d9aeb0")?;
|
||||
let timestamp = Some("2025-09-05T16:53:11.850Z".to_string());
|
||||
let path = PathBuf::from("rollout.jsonl");
|
||||
|
||||
let head = vec![
|
||||
json!({
|
||||
"id": conversation_id.to_string(),
|
||||
"timestamp": timestamp,
|
||||
"cwd": "/",
|
||||
"originator": "codex",
|
||||
"cli_version": "0.0.0",
|
||||
"model_provider": "test-provider"
|
||||
}),
|
||||
json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{
|
||||
"type": "input_text",
|
||||
"text": "# AGENTS.md instructions for project\n\n<INSTRUCTIONS>\n<AGENTS.md contents>\n</INSTRUCTIONS>".to_string(),
|
||||
}],
|
||||
}),
|
||||
json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{
|
||||
"type": "input_text",
|
||||
"text": format!("<prior context> {USER_MESSAGE_BEGIN}Count to 5"),
|
||||
}],
|
||||
}),
|
||||
];
|
||||
|
||||
let session_meta = serde_json::from_value::<SessionMeta>(head[0].clone())?;
|
||||
|
||||
let summary = extract_conversation_summary(
|
||||
path.clone(),
|
||||
&head,
|
||||
&session_meta,
|
||||
/*git*/ None,
|
||||
"test-provider",
|
||||
timestamp.clone(),
|
||||
)
|
||||
.expect("summary");
|
||||
|
||||
let expected = ConversationSummary {
|
||||
conversation_id,
|
||||
timestamp: timestamp.clone(),
|
||||
updated_at: timestamp,
|
||||
path,
|
||||
preview: "Count to 5".to_string(),
|
||||
model_provider: "test-provider".to_string(),
|
||||
cwd: PathBuf::from("/"),
|
||||
cli_version: "0.0.0".to_string(),
|
||||
source: codex_protocol::protocol::SessionSource::VSCode,
|
||||
git_info: None,
|
||||
};
|
||||
|
||||
assert_eq!(summary, expected);
|
||||
Ok(())
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,103 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct WindowsSandboxRequestProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
}
|
||||
|
||||
impl WindowsSandboxRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
outgoing,
|
||||
config,
|
||||
config_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn windows_sandbox_setup_start(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: WindowsSandboxSetupStartParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.windows_sandbox_setup_start_inner(request_id, params)
|
||||
.await
|
||||
.map(|()| None)
|
||||
}
|
||||
|
||||
async fn windows_sandbox_setup_start_inner(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: WindowsSandboxSetupStartParams,
|
||||
) -> Result<(), JSONRPCErrorError> {
|
||||
self.outgoing
|
||||
.send_response(
|
||||
request_id.clone(),
|
||||
WindowsSandboxSetupStartResponse { started: true },
|
||||
)
|
||||
.await;
|
||||
|
||||
let mode = match params.mode {
|
||||
WindowsSandboxSetupMode::Elevated => CoreWindowsSandboxSetupMode::Elevated,
|
||||
WindowsSandboxSetupMode::Unelevated => CoreWindowsSandboxSetupMode::Unelevated,
|
||||
};
|
||||
let config = Arc::clone(&self.config);
|
||||
let config_manager = self.config_manager.clone();
|
||||
let command_cwd = params
|
||||
.cwd
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|| config.cwd.to_path_buf());
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let connection_id = request_id.connection_id;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let derived_config = config_manager
|
||||
.load_for_cwd(
|
||||
/*request_overrides*/ None,
|
||||
ConfigOverrides {
|
||||
cwd: Some(command_cwd.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
Some(command_cwd.clone()),
|
||||
)
|
||||
.await;
|
||||
let setup_result = match derived_config {
|
||||
Ok(config) => {
|
||||
let setup_request = WindowsSandboxSetupRequest {
|
||||
mode,
|
||||
policy: config
|
||||
.permissions
|
||||
.legacy_sandbox_policy(config.cwd.as_path()),
|
||||
policy_cwd: config.cwd.to_path_buf(),
|
||||
command_cwd,
|
||||
env_map: std::env::vars().collect(),
|
||||
codex_home: config.codex_home.to_path_buf(),
|
||||
active_profile: config.active_profile.clone(),
|
||||
};
|
||||
codex_core::windows_sandbox::run_windows_sandbox_setup(setup_request).await
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
};
|
||||
let notification = WindowsSandboxSetupCompletedNotification {
|
||||
mode: match mode {
|
||||
CoreWindowsSandboxSetupMode::Elevated => WindowsSandboxSetupMode::Elevated,
|
||||
CoreWindowsSandboxSetupMode::Unelevated => WindowsSandboxSetupMode::Unelevated,
|
||||
},
|
||||
success: setup_result.is_ok(),
|
||||
error: setup_result.err().map(|err| err.to_string()),
|
||||
};
|
||||
outgoing
|
||||
.send_server_notification_to_connections(
|
||||
&[connection_id],
|
||||
ServerNotification::WindowsSandboxSetupCompleted(notification),
|
||||
)
|
||||
.await;
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -61,6 +61,7 @@ pub(crate) enum ThreadListenerCommand {
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct TurnSummary {
|
||||
pub(crate) started_at: Option<i64>,
|
||||
pub(crate) file_change_started: HashSet<String>,
|
||||
pub(crate) command_execution_started: HashSet<String>,
|
||||
pub(crate) last_error: Option<TurnError>,
|
||||
}
|
||||
|
||||
@@ -1,232 +0,0 @@
|
||||
use crate::message_processor::ConnectionSessionState;
|
||||
use crate::outgoing_message::OutgoingEnvelope;
|
||||
use codex_app_server_protocol::ExperimentalApi;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
|
||||
pub use codex_app_server_transport::AppServerTransport;
|
||||
pub(crate) use codex_app_server_transport::CHANNEL_CAPACITY;
|
||||
pub(crate) use codex_app_server_transport::ConnectionId;
|
||||
pub(crate) use codex_app_server_transport::ConnectionOrigin;
|
||||
pub(crate) use codex_app_server_transport::OutgoingMessage;
|
||||
pub(crate) use codex_app_server_transport::QueuedOutgoingMessage;
|
||||
pub(crate) use codex_app_server_transport::RemoteControlHandle;
|
||||
pub(crate) use codex_app_server_transport::TransportEvent;
|
||||
pub use codex_app_server_transport::app_server_control_socket_path;
|
||||
pub use codex_app_server_transport::auth;
|
||||
pub(crate) use codex_app_server_transport::start_control_socket_acceptor;
|
||||
pub(crate) use codex_app_server_transport::start_remote_control;
|
||||
pub(crate) use codex_app_server_transport::start_stdio_connection;
|
||||
pub(crate) use codex_app_server_transport::start_websocket_acceptor;
|
||||
|
||||
pub(crate) struct ConnectionState {
|
||||
pub(crate) outbound_initialized: Arc<AtomicBool>,
|
||||
pub(crate) outbound_experimental_api_enabled: Arc<AtomicBool>,
|
||||
pub(crate) outbound_opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
pub(crate) session: Arc<ConnectionSessionState>,
|
||||
}
|
||||
|
||||
impl ConnectionState {
|
||||
pub(crate) fn new(
|
||||
origin: ConnectionOrigin,
|
||||
outbound_initialized: Arc<AtomicBool>,
|
||||
outbound_experimental_api_enabled: Arc<AtomicBool>,
|
||||
outbound_opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
outbound_initialized,
|
||||
outbound_experimental_api_enabled,
|
||||
outbound_opted_out_notification_methods,
|
||||
session: Arc::new(ConnectionSessionState::new(origin)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct OutboundConnectionState {
|
||||
pub(crate) initialized: Arc<AtomicBool>,
|
||||
pub(crate) experimental_api_enabled: Arc<AtomicBool>,
|
||||
pub(crate) opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
pub(crate) writer: mpsc::Sender<QueuedOutgoingMessage>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
}
|
||||
|
||||
impl OutboundConnectionState {
|
||||
pub(crate) fn new(
|
||||
writer: mpsc::Sender<QueuedOutgoingMessage>,
|
||||
initialized: Arc<AtomicBool>,
|
||||
experimental_api_enabled: Arc<AtomicBool>,
|
||||
opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
) -> Self {
|
||||
Self {
|
||||
initialized,
|
||||
experimental_api_enabled,
|
||||
opted_out_notification_methods,
|
||||
writer,
|
||||
disconnect_sender,
|
||||
}
|
||||
}
|
||||
|
||||
fn can_disconnect(&self) -> bool {
|
||||
self.disconnect_sender.is_some()
|
||||
}
|
||||
|
||||
pub(crate) fn request_disconnect(&self) {
|
||||
if let Some(disconnect_sender) = &self.disconnect_sender {
|
||||
disconnect_sender.cancel();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn should_skip_notification_for_connection(
|
||||
connection_state: &OutboundConnectionState,
|
||||
message: &OutgoingMessage,
|
||||
) -> bool {
|
||||
let Ok(opted_out_notification_methods) = connection_state.opted_out_notification_methods.read()
|
||||
else {
|
||||
warn!("failed to read outbound opted-out notifications");
|
||||
return false;
|
||||
};
|
||||
match message {
|
||||
OutgoingMessage::AppServerNotification(notification) => {
|
||||
if notification.experimental_reason().is_some()
|
||||
&& !connection_state
|
||||
.experimental_api_enabled
|
||||
.load(Ordering::Acquire)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
let method = notification.to_string();
|
||||
opted_out_notification_methods.contains(method.as_str())
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn disconnect_connection(
|
||||
connections: &mut HashMap<ConnectionId, OutboundConnectionState>,
|
||||
connection_id: ConnectionId,
|
||||
) -> bool {
|
||||
if let Some(connection_state) = connections.remove(&connection_id) {
|
||||
connection_state.request_disconnect();
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
async fn send_message_to_connection(
|
||||
connections: &mut HashMap<ConnectionId, OutboundConnectionState>,
|
||||
connection_id: ConnectionId,
|
||||
message: OutgoingMessage,
|
||||
write_complete_tx: Option<tokio::sync::oneshot::Sender<()>>,
|
||||
) -> bool {
|
||||
let Some(connection_state) = connections.get(&connection_id) else {
|
||||
warn!("dropping message for disconnected connection: {connection_id:?}");
|
||||
return false;
|
||||
};
|
||||
let message = filter_outgoing_message_for_connection(connection_state, message);
|
||||
if should_skip_notification_for_connection(connection_state, &message) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let writer = connection_state.writer.clone();
|
||||
let queued_message = QueuedOutgoingMessage {
|
||||
message,
|
||||
write_complete_tx,
|
||||
};
|
||||
if connection_state.can_disconnect() {
|
||||
match writer.try_send(queued_message) {
|
||||
Ok(()) => false,
|
||||
Err(mpsc::error::TrySendError::Full(_)) => {
|
||||
warn!(
|
||||
"disconnecting slow connection after outbound queue filled: {connection_id:?}"
|
||||
);
|
||||
disconnect_connection(connections, connection_id)
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => {
|
||||
disconnect_connection(connections, connection_id)
|
||||
}
|
||||
}
|
||||
} else if writer.send(queued_message).await.is_err() {
|
||||
disconnect_connection(connections, connection_id)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_outgoing_message_for_connection(
|
||||
connection_state: &OutboundConnectionState,
|
||||
message: OutgoingMessage,
|
||||
) -> OutgoingMessage {
|
||||
let experimental_api_enabled = connection_state
|
||||
.experimental_api_enabled
|
||||
.load(Ordering::Acquire);
|
||||
match message {
|
||||
OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id,
|
||||
mut params,
|
||||
}) => {
|
||||
if !experimental_api_enabled {
|
||||
params.strip_experimental_fields();
|
||||
}
|
||||
OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id,
|
||||
params,
|
||||
})
|
||||
}
|
||||
_ => message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn route_outgoing_envelope(
|
||||
connections: &mut HashMap<ConnectionId, OutboundConnectionState>,
|
||||
envelope: OutgoingEnvelope,
|
||||
) {
|
||||
match envelope {
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message,
|
||||
write_complete_tx,
|
||||
} => {
|
||||
let _ =
|
||||
send_message_to_connection(connections, connection_id, message, write_complete_tx)
|
||||
.await;
|
||||
}
|
||||
OutgoingEnvelope::Broadcast { message } => {
|
||||
let target_connections: Vec<ConnectionId> = connections
|
||||
.iter()
|
||||
.filter_map(|(connection_id, connection_state)| {
|
||||
if connection_state.initialized.load(Ordering::Acquire)
|
||||
&& !should_skip_notification_for_connection(connection_state, &message)
|
||||
{
|
||||
Some(*connection_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
for connection_id in target_connections {
|
||||
let _ = send_message_to_connection(
|
||||
connections,
|
||||
connection_id,
|
||||
message.clone(),
|
||||
/*write_complete_tx*/ None,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "transport_tests.rs"]
|
||||
mod tests;
|
||||
@@ -86,7 +86,7 @@ pub enum AppServerWebsocketCapabilityTokenSource {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct WebsocketAuthPolicy {
|
||||
pub(crate) struct WebsocketAuthPolicy {
|
||||
pub(crate) mode: Option<WebsocketAuthMode>,
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ impl AppServerWebsocketAuthArgs {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn policy_from_settings(
|
||||
pub(crate) fn policy_from_settings(
|
||||
settings: &AppServerWebsocketAuthSettings,
|
||||
) -> io::Result<WebsocketAuthPolicy> {
|
||||
let mode = match settings.config.as_ref() {
|
||||
1210
codex-rs/app-server/src/transport/mod.rs
Normal file
1210
codex-rs/app-server/src/transport/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -36,14 +36,14 @@ pub(super) struct QueuedServerEnvelope {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RemoteControlHandle {
|
||||
pub(crate) struct RemoteControlHandle {
|
||||
enabled_tx: Arc<watch::Sender<bool>>,
|
||||
status_tx: Arc<watch::Sender<RemoteControlStatusChangedNotification>>,
|
||||
state_db_available: bool,
|
||||
}
|
||||
|
||||
impl RemoteControlHandle {
|
||||
pub fn set_enabled(&self, enabled: bool) {
|
||||
pub(crate) fn set_enabled(&self, enabled: bool) {
|
||||
let requested_enabled = enabled;
|
||||
let enabled = enabled && self.state_db_available;
|
||||
if requested_enabled && !self.state_db_available {
|
||||
@@ -56,12 +56,14 @@ impl RemoteControlHandle {
|
||||
});
|
||||
}
|
||||
|
||||
pub fn status_receiver(&self) -> watch::Receiver<RemoteControlStatusChangedNotification> {
|
||||
pub(crate) fn status_receiver(
|
||||
&self,
|
||||
) -> watch::Receiver<RemoteControlStatusChangedNotification> {
|
||||
self.status_tx.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_remote_control(
|
||||
pub(crate) async fn start_remote_control(
|
||||
remote_control_url: String,
|
||||
state_db: Option<Arc<StateRuntime>>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
@@ -21,7 +21,7 @@ use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
|
||||
pub async fn start_stdio_connection(
|
||||
pub(crate) async fn start_stdio_connection(
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
stdio_handles: &mut Vec<JoinHandle<()>>,
|
||||
initialize_client_name_tx: oneshot::Sender<String>,
|
||||
@@ -20,7 +20,7 @@ use tracing::warn;
|
||||
#[cfg(unix)]
|
||||
const CONTROL_SOCKET_MODE: u32 = 0o600;
|
||||
|
||||
pub async fn start_control_socket_acceptor(
|
||||
pub(crate) async fn start_control_socket_acceptor(
|
||||
socket_path: AbsolutePathBuf,
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
shutdown_token: CancellationToken,
|
||||
@@ -128,7 +128,7 @@ async fn websocket_upgrade_handler(
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub async fn start_websocket_acceptor(
|
||||
pub(crate) async fn start_websocket_acceptor(
|
||||
bind_address: SocketAddr,
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
shutdown_token: CancellationToken,
|
||||
@@ -1,532 +0,0 @@
|
||||
use super::*;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ThreadGoal;
|
||||
use codex_app_server_protocol::ThreadGoalStatus;
|
||||
use codex_app_server_protocol::ThreadGoalUpdatedNotification;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
fn absolute_path(path: &str) -> AbsolutePathBuf {
|
||||
AbsolutePathBuf::from_absolute_path(path).expect("absolute path")
|
||||
}
|
||||
|
||||
fn thread_goal_updated_notification() -> ServerNotification {
|
||||
ServerNotification::ThreadGoalUpdated(ThreadGoalUpdatedNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: None,
|
||||
goal: ThreadGoal {
|
||||
thread_id: "thread-1".to_string(),
|
||||
objective: "ship goal mode".to_string(),
|
||||
status: ThreadGoalStatus::Active,
|
||||
token_budget: None,
|
||||
tokens_used: 0,
|
||||
time_used_seconds: 0,
|
||||
created_at: 1,
|
||||
updated_at: 1,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn to_connection_notification_respects_opt_out_filters() {
|
||||
let connection_id = ConnectionId(7);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
let initialized = Arc::new(AtomicBool::new(true));
|
||||
let opted_out_notification_methods =
|
||||
Arc::new(RwLock::new(HashSet::from(["configWarning".to_string()])));
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
initialized,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
opted_out_notification_methods,
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "task_started".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
writer_rx.try_recv().is_err(),
|
||||
"opted-out notification should be dropped"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn to_connection_notifications_are_dropped_for_opted_out_clients() {
|
||||
let connection_id = ConnectionId(10);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::from(["configWarning".to_string()]))),
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "task_started".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
writer_rx.try_recv().is_err(),
|
||||
"opted-out notifications should not reach clients"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn to_connection_notifications_are_preserved_for_non_opted_out_clients() {
|
||||
let connection_id = ConnectionId(11);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "task_started".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let message = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("notification should reach non-opted-out clients");
|
||||
assert!(matches!(
|
||||
message.message,
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification { summary, .. }
|
||||
)) if summary == "task_started"
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn experimental_notifications_are_dropped_without_capability() {
|
||||
let connection_id = ConnectionId(12);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::AppServerNotification(thread_goal_updated_notification()),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
writer_rx.try_recv().is_err(),
|
||||
"experimental notifications should not reach clients without capability"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn experimental_notifications_are_preserved_with_capability() {
|
||||
let connection_id = ConnectionId(13);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::AppServerNotification(thread_goal_updated_notification()),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let message = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("experimental notification should reach opted-in client");
|
||||
assert!(matches!(
|
||||
message.message,
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ThreadGoalUpdated(_))
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn command_execution_request_approval_strips_additional_permissions_without_capability() {
|
||||
let connection_id = ConnectionId(8);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id: RequestId::Integer(1),
|
||||
params: codex_app_server_protocol::CommandExecutionRequestApprovalParams {
|
||||
thread_id: "thr_123".to_string(),
|
||||
turn_id: "turn_123".to_string(),
|
||||
item_id: "call_123".to_string(),
|
||||
approval_id: None,
|
||||
reason: Some("Need extra read access".to_string()),
|
||||
network_approval_context: None,
|
||||
command: Some("cat file".to_string()),
|
||||
cwd: Some(absolute_path("/tmp")),
|
||||
command_actions: None,
|
||||
additional_permissions: Some(
|
||||
codex_app_server_protocol::AdditionalPermissionProfile {
|
||||
network: None,
|
||||
file_system: Some(
|
||||
codex_app_server_protocol::AdditionalFileSystemPermissions {
|
||||
read: Some(vec![absolute_path("/tmp/allowed")]),
|
||||
write: None,
|
||||
glob_scan_max_depth: None,
|
||||
entries: None,
|
||||
},
|
||||
),
|
||||
},
|
||||
),
|
||||
proposed_execpolicy_amendment: None,
|
||||
proposed_network_policy_amendments: None,
|
||||
available_decisions: None,
|
||||
},
|
||||
}),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let message = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should be delivered to the connection");
|
||||
let json = serde_json::to_value(message.message).expect("request should serialize");
|
||||
assert_eq!(json["params"].get("additionalPermissions"), None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn command_execution_request_approval_keeps_additional_permissions_with_capability() {
|
||||
let connection_id = ConnectionId(9);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id: RequestId::Integer(1),
|
||||
params: codex_app_server_protocol::CommandExecutionRequestApprovalParams {
|
||||
thread_id: "thr_123".to_string(),
|
||||
turn_id: "turn_123".to_string(),
|
||||
item_id: "call_123".to_string(),
|
||||
approval_id: None,
|
||||
reason: Some("Need extra read access".to_string()),
|
||||
network_approval_context: None,
|
||||
command: Some("cat file".to_string()),
|
||||
cwd: Some(absolute_path("/tmp")),
|
||||
command_actions: None,
|
||||
additional_permissions: Some(
|
||||
codex_app_server_protocol::AdditionalPermissionProfile {
|
||||
network: None,
|
||||
file_system: Some(
|
||||
codex_app_server_protocol::AdditionalFileSystemPermissions {
|
||||
read: Some(vec![absolute_path("/tmp/allowed")]),
|
||||
write: None,
|
||||
glob_scan_max_depth: None,
|
||||
entries: None,
|
||||
},
|
||||
),
|
||||
},
|
||||
),
|
||||
proposed_execpolicy_amendment: None,
|
||||
proposed_network_policy_amendments: None,
|
||||
available_decisions: None,
|
||||
},
|
||||
}),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let message = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should be delivered to the connection");
|
||||
let json = serde_json::to_value(message.message).expect("request should serialize");
|
||||
let allowed_path = absolute_path("/tmp/allowed").to_string_lossy().into_owned();
|
||||
assert_eq!(
|
||||
json["params"]["additionalPermissions"],
|
||||
json!({
|
||||
"network": null,
|
||||
"fileSystem": {
|
||||
"read": [allowed_path],
|
||||
"write": null,
|
||||
},
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn broadcast_does_not_block_on_slow_connection() {
|
||||
let fast_connection_id = ConnectionId(1);
|
||||
let slow_connection_id = ConnectionId(2);
|
||||
|
||||
let (fast_writer_tx, mut fast_writer_rx) = mpsc::channel(1);
|
||||
let (slow_writer_tx, mut slow_writer_rx) = mpsc::channel(1);
|
||||
let fast_disconnect_token = CancellationToken::new();
|
||||
let slow_disconnect_token = CancellationToken::new();
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
fast_connection_id,
|
||||
OutboundConnectionState::new(
|
||||
fast_writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
Some(fast_disconnect_token.clone()),
|
||||
),
|
||||
);
|
||||
connections.insert(
|
||||
slow_connection_id,
|
||||
OutboundConnectionState::new(
|
||||
slow_writer_tx.clone(),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
Some(slow_disconnect_token.clone()),
|
||||
),
|
||||
);
|
||||
|
||||
let queued_message = OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "already-buffered".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
));
|
||||
slow_writer_tx
|
||||
.try_send(QueuedOutgoingMessage::new(queued_message))
|
||||
.expect("channel should have room");
|
||||
|
||||
let broadcast_message = OutgoingMessage::AppServerNotification(
|
||||
ServerNotification::ConfigWarning(ConfigWarningNotification {
|
||||
summary: "test".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
}),
|
||||
);
|
||||
timeout(
|
||||
Duration::from_millis(100),
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::Broadcast {
|
||||
message: broadcast_message,
|
||||
},
|
||||
),
|
||||
)
|
||||
.await
|
||||
.expect("broadcast should return even when one connection is slow");
|
||||
assert!(!connections.contains_key(&slow_connection_id));
|
||||
assert!(slow_disconnect_token.is_cancelled());
|
||||
assert!(!fast_disconnect_token.is_cancelled());
|
||||
let fast_message = fast_writer_rx
|
||||
.try_recv()
|
||||
.expect("fast connection should receive the broadcast notification");
|
||||
assert!(matches!(
|
||||
fast_message.message,
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification { summary, .. }
|
||||
)) if summary == "test"
|
||||
));
|
||||
|
||||
let slow_message = slow_writer_rx
|
||||
.try_recv()
|
||||
.expect("slow connection should retain its original buffered message");
|
||||
assert!(matches!(
|
||||
slow_message.message,
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification { summary, .. }
|
||||
)) if summary == "already-buffered"
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn to_connection_stdio_waits_instead_of_disconnecting_when_writer_queue_is_full() {
|
||||
let connection_id = ConnectionId(3);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
writer_tx
|
||||
.send(QueuedOutgoingMessage::new(
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "queued".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
))
|
||||
.await
|
||||
.expect("channel should accept the first queued message");
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
/*disconnect_sender*/ None,
|
||||
),
|
||||
);
|
||||
|
||||
let route_task = tokio::spawn(async move {
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "second".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
write_complete_tx: None,
|
||||
},
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let first = timeout(Duration::from_millis(100), writer_rx.recv())
|
||||
.await
|
||||
.expect("first queued message should be readable")
|
||||
.expect("first queued message should exist");
|
||||
timeout(Duration::from_millis(100), route_task)
|
||||
.await
|
||||
.expect("routing should finish after the first queued message is drained")
|
||||
.expect("routing task should succeed");
|
||||
|
||||
assert!(matches!(
|
||||
first.message,
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification { summary, .. }
|
||||
)) if summary == "queued"
|
||||
));
|
||||
let second = writer_rx
|
||||
.try_recv()
|
||||
.expect("second notification should be delivered once the queue has room");
|
||||
assert!(matches!(
|
||||
second.message,
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification { summary, .. }
|
||||
)) if summary == "second"
|
||||
));
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user