mirror of
https://github.com/openai/codex.git
synced 2026-05-08 21:32:33 +00:00
Compare commits
124 Commits
pr20404
...
codex/stat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea735b69da | ||
|
|
48402be6fa | ||
|
|
cc16995cc6 | ||
|
|
c2fed01550 | ||
|
|
4d201e340e | ||
|
|
0035d7bd18 | ||
|
|
5d5500650b | ||
|
|
b459ba0f4d | ||
|
|
905987c08f | ||
|
|
66e5d99eb2 | ||
|
|
5c1ec8f4fd | ||
|
|
94800ecbbf | ||
|
|
5b80f87c97 | ||
|
|
541e99cf09 | ||
|
|
1b900bee8a | ||
|
|
83a4e3b66b | ||
|
|
e3451ce6be | ||
|
|
4fd7dfe223 | ||
|
|
f20f8a719e | ||
|
|
161541310f | ||
|
|
33b19bcfde | ||
|
|
12a729f2b2 | ||
|
|
f072119ccf | ||
|
|
3c2dcbef85 | ||
|
|
2f5c06a29c | ||
|
|
8ba294ea13 | ||
|
|
5512b23c95 | ||
|
|
0269a46ab1 | ||
|
|
554223ab80 | ||
|
|
29352569b3 | ||
|
|
5730615e75 | ||
|
|
6b6581ac59 | ||
|
|
019755d570 | ||
|
|
d927f61208 | ||
|
|
d013155f40 | ||
|
|
f48b777717 | ||
|
|
c8c30d9d75 | ||
|
|
9ddfda9db7 | ||
|
|
67849d950d | ||
|
|
39555036a3 | ||
|
|
35aaa5d9fc | ||
|
|
f88701f5c8 | ||
|
|
127434cd8b | ||
|
|
9e905528bb | ||
|
|
cd2760fc08 | ||
|
|
466798aa83 | ||
|
|
a5fbcf1ab4 | ||
|
|
2952beb009 | ||
|
|
d55479488e | ||
|
|
443f6b831e | ||
|
|
aed74e5ee4 | ||
|
|
610eefb86b | ||
|
|
2817866a32 | ||
|
|
ff66b3c7eb | ||
|
|
be71b6fcd1 | ||
|
|
e4d6675632 | ||
|
|
78baa20780 | ||
|
|
9b8d585075 | ||
|
|
6784db51c0 | ||
|
|
41e171fcf2 | ||
|
|
5744b85b9a | ||
|
|
3d1d164aee | ||
|
|
227bee0445 | ||
|
|
f476338f93 | ||
|
|
0b04d1b3cc | ||
|
|
ff27d01676 | ||
|
|
70fc55b8f3 | ||
|
|
97aae46800 | ||
|
|
ad404c8400 | ||
|
|
48791920a8 | ||
|
|
96d2ea9058 | ||
|
|
a62b52f826 | ||
|
|
a93c89f497 | ||
|
|
d898cc8f3f | ||
|
|
fe05acad23 | ||
|
|
f50c02d7bc | ||
|
|
bb60b78c46 | ||
|
|
c39824c2fd | ||
|
|
6b1b227804 | ||
|
|
972b819213 | ||
|
|
af089fb21d | ||
|
|
4f96001fa7 | ||
|
|
0d9a5d20ec | ||
|
|
5affb7f9d5 | ||
|
|
acdf908268 | ||
|
|
b6f81257f8 | ||
|
|
a5ebedef67 | ||
|
|
5de7992ee5 | ||
|
|
2686873e77 | ||
|
|
9ddb267e9c | ||
|
|
6014b6679f | ||
|
|
8426edf71e | ||
|
|
7b3de63041 | ||
|
|
127be0612c | ||
|
|
d9f639ba6d | ||
|
|
ebd79231a5 | ||
|
|
8b9888d60b | ||
|
|
57e8dd5e7b | ||
|
|
9121132c8f | ||
|
|
209bc225a5 | ||
|
|
903c56aa87 | ||
|
|
70090c9ff7 | ||
|
|
8121710ffe | ||
|
|
7dd08e304c | ||
|
|
06f3b4836a | ||
|
|
31f8813e3e | ||
|
|
93d53f655b | ||
|
|
719431da6e | ||
|
|
b52083146c | ||
|
|
f2bc2f26a9 | ||
|
|
5cc5f12efc | ||
|
|
c70cdc108f | ||
|
|
487716ae74 | ||
|
|
a85d265097 | ||
|
|
c02814c106 | ||
|
|
3516cb9751 | ||
|
|
8a97f3cf03 | ||
|
|
6bd78a51b5 | ||
|
|
9a05898cf0 | ||
|
|
9136dee011 | ||
|
|
f2924bf70c | ||
|
|
47964c77db | ||
|
|
0782c6050e | ||
|
|
4c0c1b7eee |
19
.bazelrc
19
.bazelrc
@@ -153,6 +153,25 @@ common:ci-macos --config=remote
|
||||
common:ci-macos --strategy=remote
|
||||
common:ci-macos --strategy=TestRunner=darwin-sandbox,local
|
||||
|
||||
# On Windows, use Linux remote execution for build actions but keep test actions
|
||||
# on the Windows runner so Bazel's normal test sharding and flaky-test retries
|
||||
# still run against Windows binaries.
|
||||
common:ci-windows-cross --config=ci-windows
|
||||
common:ci-windows-cross --build_metadata=TAG_windows_cross_compile=true
|
||||
common:ci-windows-cross --config=remote
|
||||
common:ci-windows-cross --host_platform=//:rbe
|
||||
common:ci-windows-cross --strategy=remote
|
||||
common:ci-windows-cross --strategy=TestRunner=local
|
||||
common:ci-windows-cross --local_test_jobs=4
|
||||
common:ci-windows-cross --test_env=RUST_TEST_THREADS=1
|
||||
# Native Windows CI still covers these tests. The cross-built gnullvm binaries
|
||||
# currently crash in V8-backed code-mode tests and hang in PowerShell AST parser
|
||||
# tests when those binaries are run on the Windows runner.
|
||||
common:ci-windows-cross --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::,powershell
|
||||
common:ci-windows-cross --platforms=//:windows_x86_64_gnullvm
|
||||
common:ci-windows-cross --extra_execution_platforms=//:rbe,//:windows_x86_64_msvc
|
||||
common:ci-windows-cross --extra_toolchains=//:windows_gnullvm_tests_on_msvc_host_toolchain
|
||||
|
||||
# Linux-only V8 CI config.
|
||||
common:ci-v8 --config=ci
|
||||
common:ci-v8 --build_metadata=TAG_workflow=v8
|
||||
|
||||
11
.codex/environments/environment.toml
Normal file
11
.codex/environments/environment.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
|
||||
version = 1
|
||||
name = "codex"
|
||||
|
||||
[setup]
|
||||
script = ""
|
||||
|
||||
[[actions]]
|
||||
name = "Run"
|
||||
icon = "run"
|
||||
command = "cargo +1.93.0 run --manifest-path=codex-rs/Cargo.toml --bin codex -- -c mcp_oauth_credentials_store=file"
|
||||
@@ -27,10 +27,10 @@ Accept any of the following:
|
||||
2. Run the watcher script to snapshot PR/review/CI state (or consume each streamed snapshot from `--watch`).
|
||||
3. Inspect the `actions` list in the JSON response.
|
||||
4. If `diagnose_ci_failure` is present, inspect failed run logs and classify the failure.
|
||||
5. If the failure is likely caused by the current branch, patch code locally, commit, and push.
|
||||
5. If the failure is likely caused by the current branch, patch code locally, commit, and push. Do not patch random flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are unrelated to the branch.
|
||||
6. If `process_review_comment` is present, inspect surfaced review items and decide whether to address them.
|
||||
7. If a review item is actionable and correct, patch code locally, commit, push, and then mark the associated review thread/comment as resolved once the fix is on GitHub.
|
||||
8. If a review item from another author is non-actionable, already addressed, or not valid, post one reply on the comment/thread explaining that decision (for example answering the question or explaining why no change is needed). Prefix the GitHub reply body with `[codex]` so it is clear the response is automated. If the watcher later surfaces your own reply, treat that self-authored item as already handled and do not reply again.
|
||||
8. Do not post replies to human-authored review comments/threads unless the user explicitly confirms the exact response. If a human review item is non-actionable, already addressed, or not valid, surface the item and recommended response to the user instead of replying on GitHub.
|
||||
9. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
|
||||
10. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
|
||||
11. On every loop, look for newly surfaced review feedback before acting on CI failures or mergeability state, then verify mergeability / merge-conflict status (for example via `gh pr view`) alongside CI.
|
||||
@@ -69,12 +69,18 @@ python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr <number-or-url> --o
|
||||
Use `gh` commands to inspect failed runs before deciding to rerun.
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh run view <run-id> --log-failed`
|
||||
- `gh api repos/<owner>/<repo>/actions/runs/<run-id>/jobs -X GET -f per_page=100`
|
||||
- `gh api repos/<owner>/<repo>/actions/jobs/<job-id>/logs > /tmp/codex-gh-job-<job-id>-logs.zip`
|
||||
- `gh run view <run-id> --log-failed` as a fallback after the overall workflow run is complete
|
||||
|
||||
Prefer treating failures as branch-related when logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
|
||||
`gh run view --log-failed` is workflow-run scoped and may not expose failed-job logs until the overall run finishes. For faster diagnosis, poll the run's jobs first and, as soon as a specific job has failed, fetch that job's logs directly from the Actions job logs endpoint. The watcher includes a `failed_jobs` list with each failed job's `job_id` and `logs_endpoint` when GitHub exposes one.
|
||||
|
||||
Prefer treating failures as branch-related when failed-job logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
|
||||
|
||||
Prefer treating failures as flaky/unrelated when logs show transient infra/external issues (timeouts, runner provisioning failures, registry/network outages, GitHub Actions infra errors).
|
||||
|
||||
Do not attempt to fix flaky/unrelated failures by changing tests, build scripts, CI configuration, dependency pins, or infrastructure-adjacent code unless the logs clearly connect the failure to the PR branch. For flaky/unrelated failures, rerun only when the watcher recommends `retry_failed_checks`; otherwise wait or stop for user help.
|
||||
|
||||
If classification is ambiguous, perform one manual diagnosis attempt before choosing rerun.
|
||||
|
||||
Read `.codex/skills/babysit-pr/references/heuristics.md` for a concise checklist.
|
||||
@@ -99,7 +105,8 @@ When you agree with a comment and it is actionable:
|
||||
5. Resume watching on the new SHA immediately (do not stop after reporting the push).
|
||||
6. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
|
||||
|
||||
If you disagree or the comment is non-actionable/already addressed, reply once directly on the GitHub comment/thread so the reviewer gets an explicit answer, then continue the watcher loop. Prefix any GitHub reply to a code review comment/thread with `[codex]` so it is clear the response is automated and not from the human user. If the watcher later surfaces your own reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
|
||||
Do not post replies to human-authored GitHub review comments/threads automatically. If you disagree with a human comment, believe it is non-actionable/already addressed, or need to answer a question, report the item to the user with a suggested response and wait for explicit confirmation before posting anything on GitHub. If the user approves a response, prefix it with `[codex]` so it is clear the response is automated and not from the human user.
|
||||
If the watcher later surfaces your own approved reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
|
||||
If a code review comment/thread is already marked as resolved in GitHub, treat it as non-actionable and safely ignore it unless new unresolved follow-up feedback appears.
|
||||
|
||||
## Git Safety Rules
|
||||
@@ -125,11 +132,11 @@ Use this loop in a live Codex session:
|
||||
2. Read `actions`.
|
||||
3. First check whether the PR is now merged or otherwise closed; if so, report that terminal state and stop polling immediately.
|
||||
4. Check CI summary, new review items, and mergeability/conflict status.
|
||||
5. Diagnose CI failures and classify branch-related vs flaky/unrelated.
|
||||
6. For each surfaced review item from another author, either reply once with an explanation if it is non-actionable or patch/commit/push and then resolve it if it is actionable. If a later snapshot surfaces your own reply, treat it as informational and continue without responding again.
|
||||
5. Diagnose CI failures and classify branch-related vs flaky/unrelated. If the overall run is still pending but `failed_jobs` already includes a failed job, fetch that job's logs and diagnose immediately instead of waiting for the whole workflow run to finish. Patch only when the failure is branch-related.
|
||||
6. For each surfaced review item from another author, patch/commit/push and then resolve it if it is actionable. If it is non-actionable, already addressed, or requires a written answer, surface it to the user with a suggested response instead of posting automatically. If a later snapshot surfaces your own approved reply, treat it as informational and continue without responding again.
|
||||
7. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
|
||||
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit.
|
||||
9. If you pushed a commit, resolved a review thread, replied to a review comment, or triggered a rerun, report the action briefly and continue polling (do not stop).
|
||||
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit. Do not make code changes for unrelated flakes or infrastructure failures just to get CI green.
|
||||
9. If you pushed a commit, resolved a review thread, or triggered a rerun, report the action briefly and continue polling (do not stop). If a human review comment needs a written GitHub response, stop and ask for confirmation before posting.
|
||||
10. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
|
||||
11. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report that the PR is currently ready to merge but keep the watcher running so new review comments are surfaced quickly while the PR remains open.
|
||||
12. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
interface:
|
||||
display_name: "PR Babysitter"
|
||||
short_description: "Watch PR review comments, CI, and merge conflicts"
|
||||
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watcher’s --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
|
||||
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watcher’s --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Do not post replies to human-authored review comments unless the user explicitly confirms the exact response. Do not patch unrelated flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are not caused by the branch. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
|
||||
|
||||
@@ -23,9 +23,11 @@ Used to discover failed workflow runs and rerunnable run IDs.
|
||||
### Failed log inspection
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh api repos/{owner}/{repo}/actions/runs/{run_id}/jobs -X GET -f per_page=100`
|
||||
- `gh api repos/{owner}/{repo}/actions/jobs/{job_id}/logs > /tmp/codex-gh-job-{job_id}-logs.zip`
|
||||
- `gh run view <run-id> --log-failed`
|
||||
|
||||
Used by Codex to classify branch-related vs flaky/unrelated failures.
|
||||
Used by Codex to classify branch-related vs flaky/unrelated failures. Prefer the direct job log endpoint as soon as a job has failed because `gh run view --log-failed` may not produce failed-job logs until the overall workflow run completes.
|
||||
|
||||
### Retry failed jobs only
|
||||
|
||||
@@ -70,3 +72,11 @@ Reruns only failed jobs (and dependencies) for a workflow run.
|
||||
- `conclusion`
|
||||
- `html_url`
|
||||
- `head_sha`
|
||||
|
||||
### Actions run jobs API (`jobs[]`)
|
||||
|
||||
- `id`
|
||||
- `name`
|
||||
- `status`
|
||||
- `conclusion`
|
||||
- `html_url`
|
||||
|
||||
@@ -18,6 +18,8 @@ Treat as **likely flaky or unrelated** when evidence points to transient or exte
|
||||
- Cloud/service rate limits or transient API outages
|
||||
- Non-deterministic failures in unrelated integration tests with known flake patterns
|
||||
|
||||
Do not patch likely flaky/unrelated failures. Use the retry budget for rerunnable failures, wait for pending jobs, or stop and report the blocker when the failure is persistent or infrastructure-owned.
|
||||
|
||||
If uncertain, inspect failed logs once before choosing rerun.
|
||||
|
||||
## Decision tree (fix vs rerun vs stop)
|
||||
@@ -25,9 +27,11 @@ If uncertain, inspect failed logs once before choosing rerun.
|
||||
1. If PR is merged/closed: stop.
|
||||
2. If there are failed checks:
|
||||
- Diagnose first.
|
||||
- If checks are still pending but an individual job has already failed: fetch that job's logs and diagnose now.
|
||||
- If branch-related: fix locally, commit, push.
|
||||
- If likely flaky/unrelated and all checks for the current SHA are terminal: rerun failed jobs.
|
||||
- If checks are still pending: wait.
|
||||
- If likely flaky/unrelated and not safely rerunnable: stop and report the blocker; do not edit unrelated tests, build scripts, CI configuration, dependency pins, or infrastructure code.
|
||||
- If checks are still pending and no failed job is available yet: wait.
|
||||
3. If flaky reruns for the same SHA reach the configured limit (default 3): stop and report persistent failure.
|
||||
4. Independently, process any new human review comments.
|
||||
|
||||
@@ -40,12 +44,15 @@ Address the comment when:
|
||||
- The requested change does not conflict with the user’s intent or recent guidance.
|
||||
- The change can be made safely without unrelated refactors.
|
||||
|
||||
Fix valid human review feedback in code when possible, but do not post a GitHub reply to a human-authored comment/thread unless the user explicitly confirms the exact response.
|
||||
|
||||
Do not auto-fix when:
|
||||
|
||||
- The comment is ambiguous and needs clarification.
|
||||
- The request conflicts with explicit user instructions.
|
||||
- The proposed change requires product/design decisions the user has not made.
|
||||
- The codebase is in a dirty/unrelated state that makes safe editing uncertain.
|
||||
- The comment only needs a written answer or disagreement response; propose the reply to the user instead of posting it automatically.
|
||||
|
||||
## Stop-and-ask conditions
|
||||
|
||||
@@ -56,3 +63,4 @@ Stop and ask the user instead of continuing automatically when:
|
||||
- The PR branch cannot be pushed.
|
||||
- CI failures persist after the flaky retry budget.
|
||||
- Reviewer feedback requires a product decision or cross-team coordination.
|
||||
- A human review comment requires a written GitHub reply instead of a code change.
|
||||
|
||||
@@ -338,6 +338,66 @@ def failed_runs_from_workflow_runs(runs, head_sha):
|
||||
return failed_runs
|
||||
|
||||
|
||||
def get_jobs_for_run(repo, run_id):
|
||||
endpoint = f"repos/{repo}/actions/runs/{run_id}/jobs"
|
||||
data = gh_json(["api", endpoint, "-X", "GET", "-f", "per_page=100"], repo=repo)
|
||||
if not isinstance(data, dict):
|
||||
raise GhCommandError("Unexpected payload from actions run jobs API")
|
||||
jobs = data.get("jobs") or []
|
||||
if not isinstance(jobs, list):
|
||||
raise GhCommandError("Expected `jobs` to be a list")
|
||||
return jobs
|
||||
|
||||
|
||||
def failed_jobs_from_workflow_runs(repo, runs, head_sha):
|
||||
failed_jobs = []
|
||||
for run in runs:
|
||||
if not isinstance(run, dict):
|
||||
continue
|
||||
if str(run.get("head_sha") or "") != head_sha:
|
||||
continue
|
||||
run_id = run.get("id")
|
||||
if run_id in (None, ""):
|
||||
continue
|
||||
run_status = str(run.get("status") or "")
|
||||
run_conclusion = str(run.get("conclusion") or "")
|
||||
if run_status.lower() == "completed" and run_conclusion not in FAILED_RUN_CONCLUSIONS:
|
||||
continue
|
||||
jobs = get_jobs_for_run(repo, run_id)
|
||||
for job in jobs:
|
||||
if not isinstance(job, dict):
|
||||
continue
|
||||
conclusion = str(job.get("conclusion") or "")
|
||||
if conclusion not in FAILED_RUN_CONCLUSIONS:
|
||||
continue
|
||||
job_id = job.get("id")
|
||||
logs_endpoint = None
|
||||
if job_id not in (None, ""):
|
||||
logs_endpoint = f"repos/{repo}/actions/jobs/{job_id}/logs"
|
||||
failed_jobs.append(
|
||||
{
|
||||
"run_id": run_id,
|
||||
"workflow_name": run.get("name") or run.get("display_title") or "",
|
||||
"run_status": run_status,
|
||||
"run_conclusion": run_conclusion,
|
||||
"job_id": job_id,
|
||||
"job_name": str(job.get("name") or ""),
|
||||
"status": str(job.get("status") or ""),
|
||||
"conclusion": conclusion,
|
||||
"html_url": str(job.get("html_url") or ""),
|
||||
"logs_endpoint": logs_endpoint,
|
||||
}
|
||||
)
|
||||
failed_jobs.sort(
|
||||
key=lambda item: (
|
||||
str(item.get("workflow_name") or ""),
|
||||
str(item.get("job_name") or ""),
|
||||
str(item.get("job_id") or ""),
|
||||
)
|
||||
)
|
||||
return failed_jobs
|
||||
|
||||
|
||||
def get_authenticated_login():
|
||||
data = gh_json(["api", "user"])
|
||||
if not isinstance(data, dict) or not data.get("login"):
|
||||
@@ -568,7 +628,7 @@ def is_pr_ready_to_merge(pr, checks_summary, new_review_items):
|
||||
return True
|
||||
|
||||
|
||||
def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries_used, max_retries):
|
||||
def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_items, retries_used, max_retries):
|
||||
actions = []
|
||||
if pr["closed"] or pr["merged"]:
|
||||
if new_review_items:
|
||||
@@ -583,7 +643,7 @@ def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries
|
||||
if new_review_items:
|
||||
actions.append("process_review_comment")
|
||||
|
||||
has_failed_pr_checks = checks_summary["failed_count"] > 0
|
||||
has_failed_pr_checks = checks_summary["failed_count"] > 0 or bool(failed_jobs)
|
||||
if has_failed_pr_checks:
|
||||
if checks_summary["all_terminal"] and retries_used >= max_retries:
|
||||
actions.append("stop_exhausted_retries")
|
||||
@@ -621,12 +681,14 @@ def collect_snapshot(args):
|
||||
checks_summary = summarize_checks(checks)
|
||||
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
|
||||
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
|
||||
failed_jobs = failed_jobs_from_workflow_runs(pr["repo"], workflow_runs, pr["head_sha"])
|
||||
|
||||
retries_used = current_retry_count(state, pr["head_sha"])
|
||||
actions = recommend_actions(
|
||||
pr,
|
||||
checks_summary,
|
||||
failed_runs,
|
||||
failed_jobs,
|
||||
new_review_items,
|
||||
retries_used,
|
||||
args.max_flaky_retries,
|
||||
@@ -641,6 +703,7 @@ def collect_snapshot(args):
|
||||
"pr": pr,
|
||||
"checks": checks_summary,
|
||||
"failed_runs": failed_runs,
|
||||
"failed_jobs": failed_jobs,
|
||||
"new_review_items": new_review_items,
|
||||
"actions": actions,
|
||||
"retry_state": {
|
||||
|
||||
@@ -75,6 +75,11 @@ def test_collect_snapshot_fetches_review_items_before_ci(monkeypatch, tmp_path):
|
||||
"failed_runs_from_workflow_runs",
|
||||
lambda *args, **kwargs: call_order.append("failed_runs") or [],
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"failed_jobs_from_workflow_runs",
|
||||
lambda *args, **kwargs: call_order.append("failed_jobs") or [],
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"recommend_actions",
|
||||
@@ -100,6 +105,7 @@ def test_recommend_actions_prioritizes_review_comments():
|
||||
sample_pr(),
|
||||
sample_checks(failed_count=1),
|
||||
[{"run_id": 99}],
|
||||
[],
|
||||
[{"kind": "review_comment", "id": "1"}],
|
||||
0,
|
||||
3,
|
||||
@@ -119,6 +125,7 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
|
||||
"pr": sample_pr(),
|
||||
"checks": sample_checks(),
|
||||
"failed_runs": [],
|
||||
"failed_jobs": [],
|
||||
"new_review_items": [],
|
||||
"actions": ["ready_to_merge"],
|
||||
"retry_state": {
|
||||
@@ -153,3 +160,58 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
|
||||
|
||||
assert sleeps == [30, 30]
|
||||
assert [event for event, _ in events] == ["snapshot", "snapshot"]
|
||||
|
||||
|
||||
def test_failed_jobs_include_direct_logs_endpoint(monkeypatch):
|
||||
jobs_by_run = {
|
||||
99: [
|
||||
{
|
||||
"id": 555,
|
||||
"name": "unit tests",
|
||||
"status": "completed",
|
||||
"conclusion": "failure",
|
||||
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
|
||||
},
|
||||
{
|
||||
"id": 556,
|
||||
"name": "lint",
|
||||
"status": "completed",
|
||||
"conclusion": "success",
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"get_jobs_for_run",
|
||||
lambda repo, run_id: jobs_by_run[run_id],
|
||||
)
|
||||
|
||||
failed_jobs = gh_pr_watch.failed_jobs_from_workflow_runs(
|
||||
"openai/codex",
|
||||
[
|
||||
{
|
||||
"id": 99,
|
||||
"name": "CI",
|
||||
"status": "in_progress",
|
||||
"conclusion": "",
|
||||
"head_sha": "abc123",
|
||||
}
|
||||
],
|
||||
"abc123",
|
||||
)
|
||||
|
||||
assert failed_jobs == [
|
||||
{
|
||||
"run_id": 99,
|
||||
"workflow_name": "CI",
|
||||
"run_status": "in_progress",
|
||||
"run_conclusion": "",
|
||||
"job_id": 555,
|
||||
"job_name": "unit tests",
|
||||
"status": "completed",
|
||||
"conclusion": "failure",
|
||||
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
|
||||
"logs_endpoint": "repos/openai/codex/actions/jobs/555/logs",
|
||||
}
|
||||
]
|
||||
|
||||
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
@@ -5,9 +5,9 @@ tool entries, such as Maven, that can change independently of this repo and
|
||||
cause avoidable cache misses.
|
||||
|
||||
This script derives a smaller, cache-stable PATH that keeps the Windows
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths, Git,
|
||||
PowerShell, Node, Python, DotSlash, and the standard Windows system
|
||||
directories.
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths,
|
||||
MinGW runtime DLL paths for gnullvm-built tests, Git, PowerShell, Node, Python,
|
||||
DotSlash, and the standard Windows system directories.
|
||||
`setup-bazel-ci` runs this after exporting the MSVC environment, and the script
|
||||
publishes the result via `GITHUB_ENV` as `CODEX_BAZEL_WINDOWS_PATH` so later
|
||||
steps can pass that explicit PATH to Bazel.
|
||||
@@ -49,6 +49,8 @@ foreach ($pathEntry in ($env:PATH -split ';')) {
|
||||
$pathEntry -like '*Microsoft Visual Studio*' -or
|
||||
$pathEntry -like '*Windows Kits*' -or
|
||||
$pathEntry -like '*Microsoft SDKs*' -or
|
||||
$pathEntry -eq 'C:\mingw64\bin' -or
|
||||
$pathEntry -like 'C:\msys64\*\bin' -or
|
||||
$pathEntry -like 'C:\Program Files\Git\*' -or
|
||||
$pathEntry -like 'C:\Program Files\PowerShell\*' -or
|
||||
$pathEntry -like 'C:\hostedtoolcache\windows\node\*' -or
|
||||
@@ -85,6 +87,12 @@ if ($pwshCommand) {
|
||||
Add-StablePathEntry (Split-Path $pwshCommand.Source -Parent)
|
||||
}
|
||||
|
||||
foreach ($mingwPath in @('C:\mingw64\bin', 'C:\msys64\mingw64\bin', 'C:\msys64\ucrt64\bin')) {
|
||||
if (Test-Path $mingwPath) {
|
||||
Add-StablePathEntry $mingwPath
|
||||
}
|
||||
}
|
||||
|
||||
if ($windowsAppsPath) {
|
||||
Add-StablePathEntry $windowsAppsPath
|
||||
}
|
||||
|
||||
110
.github/scripts/run-bazel-ci.sh
vendored
110
.github/scripts/run-bazel-ci.sh
vendored
@@ -6,6 +6,7 @@ print_failed_bazel_test_logs=0
|
||||
print_failed_bazel_action_summary=0
|
||||
remote_download_toplevel=0
|
||||
windows_msvc_host_platform=0
|
||||
windows_cross_compile=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
@@ -25,6 +26,10 @@ while [[ $# -gt 0 ]]; do
|
||||
windows_msvc_host_platform=1
|
||||
shift
|
||||
;;
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -37,7 +42,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] -- <bazel args> -- <targets>" >&2
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] [--windows-cross-compile] -- <bazel args> -- <targets>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -61,7 +66,11 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
ci_config=ci-windows
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -105,8 +114,8 @@ print_bazel_test_log_tails() {
|
||||
while IFS= read -r target; do
|
||||
failed_targets+=("$target")
|
||||
done < <(
|
||||
grep -E '^FAIL: //' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
|
||||
grep -E '^(FAIL: //|ERROR: .* Testing //)' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#; s#^ERROR: .* Testing (//[^ ]+) failed:.*#\1#' \
|
||||
| sort -u
|
||||
)
|
||||
|
||||
@@ -244,6 +253,12 @@ if [[ ${#bazel_args[@]} -eq 0 || ${#bazel_targets[@]} -eq 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Fork PRs do not receive the BuildBuddy secret needed for the remote
|
||||
# cross-compile config. Preserve the previous local Windows build shape.
|
||||
windows_msvc_host_platform=1
|
||||
fi
|
||||
|
||||
post_config_bazel_args=()
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_msvc_host_platform -eq 1 ]]; then
|
||||
has_host_platform_override=0
|
||||
@@ -269,6 +284,25 @@ if [[ $remote_download_toplevel -eq 1 ]]; then
|
||||
post_config_bazel_args+=(--remote_download_toplevel)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# `--enable_platform_specific_config` expands `common:windows` on Windows
|
||||
# hosts after ordinary rc configs, which can override `ci-windows-cross`'s
|
||||
# RBE host platform. Repeat the host platform on the command line so V8 and
|
||||
# other genrules execute on Linux RBE workers instead of Git Bash locally.
|
||||
#
|
||||
# Bazel also derives the default genrule shell from the client host. Without
|
||||
# an explicit shell executable, remote Linux actions can be asked to run
|
||||
# `C:\Program Files\Git\usr\bin\bash.exe`.
|
||||
post_config_bazel_args+=(--host_platform=//:rbe --shell_executable=/bin/bash)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The Windows cross-compile config depends on remote execution. Fork PRs do
|
||||
# not receive the BuildBuddy secret, so fall back to the existing local build
|
||||
# shape and keep its lower concurrency cap.
|
||||
post_config_bazel_args+=(--jobs=8)
|
||||
fi
|
||||
|
||||
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
|
||||
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
|
||||
# each job its own repo contents cache so they do not fight over the shared
|
||||
@@ -287,37 +321,57 @@ if [[ -n "${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR:-}" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
pass_windows_build_env=1
|
||||
if [[ $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Remote build actions execute on Linux RBE workers. Passing the Windows
|
||||
# runner's build environment there makes Bazel genrules try to execute
|
||||
# C:\Program Files\Git\usr\bin\bash.exe on Linux.
|
||||
pass_windows_build_env=0
|
||||
fi
|
||||
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -z "${CODEX_BAZEL_WINDOWS_PATH:-}" ]]; then
|
||||
echo "CODEX_BAZEL_WINDOWS_PATH must be set for Windows Bazel CI." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
elif [[ $windows_cross_compile -eq 1 ]]; then
|
||||
# Remote build actions run on Linux RBE workers. Give their shell snippets
|
||||
# a Linux PATH while preserving CODEX_BAZEL_WINDOWS_PATH below for local
|
||||
# Windows test execution.
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=/usr/bin:/bin"
|
||||
"--host_action_env=PATH=/usr/bin:/bin"
|
||||
)
|
||||
fi
|
||||
post_config_bazel_args+=("--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}")
|
||||
fi
|
||||
|
||||
bazel_console_log="$(mktemp)"
|
||||
|
||||
13
.github/scripts/run-bazel-query-ci.sh
vendored
13
.github/scripts/run-bazel-query-ci.sh
vendored
@@ -6,8 +6,13 @@ set -euo pipefail
|
||||
# invocation so target-discovery queries can reuse the same Bazel server.
|
||||
|
||||
query_args=()
|
||||
windows_cross_compile=0
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -20,7 +25,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 [<bazel query args>...] -- <query expression>" >&2
|
||||
echo "Usage: $0 [--windows-cross-compile] [<bazel query args>...] -- <query expression>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -32,7 +37,11 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
ci_config=ci-windows
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
133
.github/workflows/bazel.yml
vendored
133
.github/workflows/bazel.yml
vendored
@@ -17,13 +17,10 @@ concurrency:
|
||||
cancel-in-progress: ${{ github.ref_name != 'main' }}
|
||||
jobs:
|
||||
test:
|
||||
# Even though a no-cache-hit Windows build seems to exceed the 30-minute
|
||||
# limit on occasion, the more common reason for exceeding the limit is a
|
||||
# true test failure in a rust_test() marked "flaky" that gets run 3x.
|
||||
# In that case, extra time generally does not give us more signal.
|
||||
#
|
||||
# Ultimately we need true distributed builds (e.g.,
|
||||
# https://www.buildbuddy.io/docs/rbe-setup/) to speed things up.
|
||||
# PRs use a fast Windows cross-compiled test leg for pre-merge signal.
|
||||
# Post-merge pushes to main also run the native Windows test job below,
|
||||
# which keeps V8/code-mode coverage without putting PR latency back on the
|
||||
# critical path.
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -47,13 +44,16 @@ jobs:
|
||||
# - os: ubuntu-24.04-arm
|
||||
# target: aarch64-unknown-linux-gnu
|
||||
|
||||
# Windows
|
||||
# Windows fast path: build the windows-gnullvm binaries with Linux
|
||||
# RBE, then run the resulting Windows tests on the Windows runner.
|
||||
# The main-only native Windows job below preserves full V8/code-mode
|
||||
# coverage post-merge.
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
# Configure a human readable name for each job
|
||||
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
name: Bazel test on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
@@ -91,6 +91,7 @@ jobs:
|
||||
)
|
||||
|
||||
bazel_wrapper_args=(
|
||||
--print-failed-action-summary
|
||||
--print-failed-test-logs
|
||||
)
|
||||
bazel_test_args=(
|
||||
@@ -100,8 +101,19 @@ jobs:
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
)
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_test_args+=(--jobs=8)
|
||||
bazel_wrapper_args+=(
|
||||
--windows-cross-compile
|
||||
--remote-download-toplevel
|
||||
)
|
||||
# Tradeoff: the Linux-RBE-built windows-gnullvm V8 archive
|
||||
# currently crashes during direct V8/code-mode smoke tests on the
|
||||
# Windows runner. Keep the broader fast Windows suite in PR CI and
|
||||
# rely on the main-only native Windows job below for full
|
||||
# V8/code-mode signal while we investigate the cross-built archive.
|
||||
bazel_targets+=(
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
fi
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
@@ -130,6 +142,75 @@ jobs:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
test-windows-native-main:
|
||||
# Native Windows Bazel tests are slower and frequently approach the
|
||||
# 30-minute PR budget, but they provide the full V8/code-mode signal that
|
||||
# the fast cross-compiled PR leg intentionally trades away. Run this only
|
||||
# for post-merge commits to main and give it a larger timeout.
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 40
|
||||
runs-on: windows-latest
|
||||
name: Bazel test on windows-latest for x86_64-pc-windows-gnullvm (native main)
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
with:
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
install-test-prereqs: "true"
|
||||
|
||||
- name: bazel test //...
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
bazel_targets=(
|
||||
//...
|
||||
# Keep standalone V8 library targets out of the ordinary Bazel CI
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
)
|
||||
|
||||
bazel_test_args=(
|
||||
test
|
||||
--test_tag_filters=-argument-comment-lint
|
||||
--test_verbose_timeout_warnings
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--build_metadata=TAG_windows_native_main=true
|
||||
)
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
--print-failed-action-summary \
|
||||
--print-failed-test-logs \
|
||||
-- \
|
||||
"${bazel_test_args[@]}" \
|
||||
-- \
|
||||
"${bazel_targets[@]}"
|
||||
|
||||
- name: Upload Bazel execution logs
|
||||
if: always() && !cancelled()
|
||||
continue-on-error: true
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: bazel-execution-logs-test-windows-native-x86_64-pc-windows-gnullvm
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
clippy:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
@@ -170,17 +251,24 @@ jobs:
|
||||
--build_metadata=TAG_job=clippy
|
||||
)
|
||||
bazel_wrapper_args=()
|
||||
bazel_target_list_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# Keep this aligned with the Windows Bazel test job. With the
|
||||
# default `//:local_windows` host platform, Windows `rust_test`
|
||||
# targets such as `//codex-rs/core:core-all-test` can be skipped
|
||||
# by `--skip_incompatible_explicit_targets`, which hides clippy
|
||||
# diagnostics from integration-test modules.
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
# Keep this aligned with the fast Windows Bazel test job: use
|
||||
# Linux RBE for clippy build actions while targeting Windows
|
||||
# gnullvm. Fork/community PRs without the BuildBuddy secret fall
|
||||
# back inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
bazel_target_list_args+=(--windows-cross-compile)
|
||||
if [[ -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The fork fallback can see incompatible explicit Windows-cross
|
||||
# internal test binaries in the generated target list. Preserve
|
||||
# the old local-fallback behavior there.
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
fi
|
||||
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh)"
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh "${bazel_target_list_args[@]}")"
|
||||
bazel_targets=()
|
||||
while IFS= read -r target; do
|
||||
bazel_targets+=("${target}")
|
||||
@@ -252,7 +340,12 @@ jobs:
|
||||
# Rust debug assertions explicitly.
|
||||
bazel_wrapper_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
# This is build-only signal, so use the same Linux-RBE
|
||||
# cross-compile path as the fast Windows test and clippy jobs.
|
||||
# Fork/community PRs without the BuildBuddy secret fall back
|
||||
# inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
fi
|
||||
|
||||
bazel_build_args=(
|
||||
|
||||
4
.github/workflows/cargo-deny.yml
vendored
4
.github/workflows/cargo-deny.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
|
||||
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@82eb9f621fbc699dd0918f3ea06864c14cc84246 # v2
|
||||
with:
|
||||
rust-version: stable
|
||||
rust-version: 1.93.0
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
|
||||
12
.github/workflows/issue-labeler.yml
vendored
12
.github/workflows/issue-labeler.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
6. iOS — Issues with the Codex iOS app.
|
||||
|
||||
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior. Use "memory" for agentic memory storage/retrieval and "performance" for high process memory utilization or memory leaks.
|
||||
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
2. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
|
||||
@@ -68,7 +68,15 @@ jobs:
|
||||
21. session - Issues involving session or thread management, including resume, fork, archive, rename/title, thread history, rollout persistence, compaction, checkpoints, retention, and cross-session state.
|
||||
22. config - Issues involving config.toml, config keys, config key merging, config updates, profiles, hooks config, project config, agent role TOMLs, instruction/personality config, and config schema behavior.
|
||||
23. plan - Issues involving plan mode, planning workflows, or plan-specific tools/behavior.
|
||||
24. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, or plan.
|
||||
24. computer-use - Issues involving agentic computer use or SkyComputerUseService.
|
||||
25. browser - Issues involving agentic browser use, IAB, or the built-in browser within the Codex app.
|
||||
26. memory - Issues involving agentic memory storage and retrieval.
|
||||
27. imagen - Issues involving image generation.
|
||||
28. remote - Issues involving remote access, remote control, or SSH.
|
||||
29. performance - Issues involving slow, laggy performance, high memory utilization, or memory leaks.
|
||||
30. automations - Issues involving scheduled automation tasks or heartbeats.
|
||||
31. pets - Issues involving pets avatars and animations.
|
||||
32. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, plan, computer-use, browser, memory, imagen, remote, performance, automations, or pets.
|
||||
|
||||
Issue number: ${{ github.event.issue.number }}
|
||||
|
||||
|
||||
2
.github/workflows/rust-release.yml
vendored
2
.github/workflows/rust-release.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- uses: dtolnay/rust-toolchain@c2b55edffaf41a251c410bb32bed22afefa800f1 # 1.92
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
34
BUILD.bazel
34
BUILD.bazel
@@ -30,6 +30,40 @@ platform(
|
||||
parents = ["@platforms//host"],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_gnullvm",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_msvc",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
)
|
||||
|
||||
toolchain(
|
||||
name = "windows_gnullvm_tests_on_msvc_host_toolchain",
|
||||
exec_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
target_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
toolchain = "@bazel_tools//tools/test:empty_toolchain",
|
||||
toolchain_type = "@bazel_tools//tools/test:default_test_toolchain_type",
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "rbe",
|
||||
actual = "@rbe_platform",
|
||||
|
||||
@@ -6,4 +6,6 @@ ignore = [
|
||||
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
|
||||
"RUSTSEC-2024-0320", # yaml-rust via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2025-0141", # bincode via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2026-0118", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
"RUSTSEC-2026-0119", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
]
|
||||
|
||||
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
|
||||
73
codex-rs/Cargo.lock
generated
73
codex-rs/Cargo.lock
generated
@@ -1857,8 +1857,8 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-analytics",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-app-server-transport",
|
||||
"codex-arg0",
|
||||
"codex-backend-client",
|
||||
"codex-chatgpt",
|
||||
@@ -1882,6 +1882,7 @@ dependencies = [
|
||||
"codex-model-provider-info",
|
||||
"codex-models-manager",
|
||||
"codex-otel",
|
||||
"codex-plugin",
|
||||
"codex-protocol",
|
||||
"codex-rmcp-client",
|
||||
"codex-rollout",
|
||||
@@ -1890,23 +1891,17 @@ dependencies = [
|
||||
"codex-state",
|
||||
"codex-thread-store",
|
||||
"codex-tools",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-cli",
|
||||
"codex-utils-json-to-toml",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"core_test_support",
|
||||
"flate2",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"reqwest",
|
||||
"rmcp",
|
||||
@@ -2004,6 +1999,45 @@ dependencies = [
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-app-server-transport"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-config",
|
||||
"codex-core",
|
||||
"codex-login",
|
||||
"codex-model-provider",
|
||||
"codex-state",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"tempfile",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-apply-patch"
|
||||
version = "0.0.0"
|
||||
@@ -2100,9 +2134,11 @@ dependencies = [
|
||||
"codex-app-server-protocol",
|
||||
"codex-connectors",
|
||||
"codex-core",
|
||||
"codex-core-plugins",
|
||||
"codex-git-utils",
|
||||
"codex-login",
|
||||
"codex-model-provider",
|
||||
"codex-plugin",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-cli",
|
||||
"pretty_assertions",
|
||||
@@ -2184,6 +2220,7 @@ dependencies = [
|
||||
"opentelemetry_sdk",
|
||||
"pretty_assertions",
|
||||
"rand 0.9.3",
|
||||
"rcgen",
|
||||
"reqwest",
|
||||
"rustls",
|
||||
"rustls-native-certs",
|
||||
@@ -2485,6 +2522,7 @@ name = "codex-core-api"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"codex-analytics",
|
||||
"codex-app-server-protocol",
|
||||
"codex-arg0",
|
||||
"codex-config",
|
||||
"codex-core",
|
||||
@@ -2503,6 +2541,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
"codex-analytics",
|
||||
"codex-app-server-protocol",
|
||||
"codex-config",
|
||||
"codex-core-skills",
|
||||
@@ -2999,6 +3038,23 @@ dependencies = [
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-mcp"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-output-truncation",
|
||||
"pretty_assertions",
|
||||
"rmcp",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-read"
|
||||
version = "0.0.0"
|
||||
@@ -3515,6 +3571,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
"codex-core-api",
|
||||
"serde_json",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
@@ -3589,6 +3646,7 @@ dependencies = [
|
||||
"codex-install-context",
|
||||
"codex-login",
|
||||
"codex-mcp",
|
||||
"codex-model-provider",
|
||||
"codex-model-provider-info",
|
||||
"codex-models-manager",
|
||||
"codex-otel",
|
||||
@@ -3902,6 +3960,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"codex-otel",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-pty",
|
||||
|
||||
@@ -8,6 +8,7 @@ members = [
|
||||
"ansi-escape",
|
||||
"async-utils",
|
||||
"app-server",
|
||||
"app-server-transport",
|
||||
"app-server-client",
|
||||
"app-server-protocol",
|
||||
"app-server-test-client",
|
||||
@@ -51,6 +52,7 @@ members = [
|
||||
"login",
|
||||
"codex-mcp",
|
||||
"mcp-server",
|
||||
"memories/mcp",
|
||||
"memories/read",
|
||||
"memories/write",
|
||||
"model-provider-info",
|
||||
@@ -127,6 +129,7 @@ codex-ansi-escape = { path = "ansi-escape" }
|
||||
codex-api = { path = "codex-api" }
|
||||
codex-aws-auth = { path = "aws-auth" }
|
||||
codex-app-server = { path = "app-server" }
|
||||
codex-app-server-transport = { path = "app-server-transport" }
|
||||
codex-app-server-client = { path = "app-server-client" }
|
||||
codex-app-server-protocol = { path = "app-server-protocol" }
|
||||
codex-app-server-test-client = { path = "app-server-test-client" }
|
||||
@@ -166,6 +169,7 @@ codex-keyring-store = { path = "keyring-store" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
codex-lmstudio = { path = "lmstudio" }
|
||||
codex-login = { path = "login" }
|
||||
codex-memories-mcp = { path = "memories/mcp" }
|
||||
codex-memories-read = { path = "memories/read" }
|
||||
codex-memories-write = { path = "memories/write" }
|
||||
codex-mcp = { path = "codex-mcp" }
|
||||
@@ -320,6 +324,10 @@ quick-xml = "0.38.4"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
rcgen = { version = "0.14.7", default-features = false, features = [
|
||||
"aws_lc_rs",
|
||||
"pem",
|
||||
] }
|
||||
regex = "1.12.3"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = { version = "0.12", features = ["cookies"] }
|
||||
@@ -455,6 +463,7 @@ unwrap_used = "deny"
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = [
|
||||
"codex-agent-graph-store",
|
||||
"codex-memories-mcp",
|
||||
"icu_provider",
|
||||
"openssl-sys",
|
||||
"codex-utils-readiness",
|
||||
|
||||
@@ -46,7 +46,7 @@ Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.t
|
||||
|
||||
### Notifications
|
||||
|
||||
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
The legacy `notify` setting is deprecated and will be removed in a future release. Existing configurations still work, but new automation should use lifecycle hooks instead. The [notify documentation](../docs/config.md#notify) explains the remaining compatibility behavior. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
|
||||
### `codex exec` to run Codex programmatically/non-interactively
|
||||
|
||||
|
||||
@@ -98,6 +98,7 @@ use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::HookEventName;
|
||||
use codex_protocol::protocol::HookRunStatus;
|
||||
use codex_protocol::protocol::HookSource;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use codex_protocol::protocol::TokenUsage;
|
||||
@@ -302,17 +303,19 @@ fn sample_turn_completed_notification(
|
||||
})
|
||||
}
|
||||
|
||||
fn sample_turn_resolved_config(turn_id: &str) -> TurnResolvedConfigFact {
|
||||
fn sample_turn_resolved_config(thread_id: &str, turn_id: &str) -> TurnResolvedConfigFact {
|
||||
TurnResolvedConfigFact {
|
||||
turn_id: turn_id.to_string(),
|
||||
thread_id: "thread-2".to_string(),
|
||||
thread_id: thread_id.to_string(),
|
||||
num_input_images: 1,
|
||||
submission_type: None,
|
||||
ephemeral: false,
|
||||
session_source: SessionSource::Exec,
|
||||
model: "gpt-5".to_string(),
|
||||
model_provider: "openai".to_string(),
|
||||
permission_profile: CorePermissionProfile::read_only(),
|
||||
permission_profile: CorePermissionProfile::from_legacy_sandbox_policy(
|
||||
&SandboxPolicy::new_read_only_policy(),
|
||||
),
|
||||
permission_profile_cwd: PathBuf::from("/tmp"),
|
||||
reasoning_effort: None,
|
||||
reasoning_summary: None,
|
||||
@@ -416,6 +419,38 @@ async fn ingest_rejected_turn_steer(
|
||||
/*include_started*/ false, /*include_token_usage*/ false,
|
||||
)
|
||||
.await;
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Initialize {
|
||||
connection_id: 8,
|
||||
params: InitializeParams {
|
||||
client_info: ClientInfo {
|
||||
name: "codex-web".to_string(),
|
||||
title: None,
|
||||
version: "1.0.0".to_string(),
|
||||
},
|
||||
capabilities: None,
|
||||
},
|
||||
product_client_id: "codex-web".to_string(),
|
||||
runtime: sample_runtime_metadata(),
|
||||
rpc_transport: AppServerRpcTransport::Stdio,
|
||||
},
|
||||
out,
|
||||
)
|
||||
.await;
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::ClientResponse {
|
||||
connection_id: 8,
|
||||
request_id: RequestId::Integer(6),
|
||||
response: Box::new(sample_thread_resume_response(
|
||||
"thread-2", /*ephemeral*/ false, "gpt-5",
|
||||
)),
|
||||
},
|
||||
out,
|
||||
)
|
||||
.await;
|
||||
out.clear();
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::ClientRequest {
|
||||
@@ -516,7 +551,7 @@ async fn ingest_turn_prerequisites(
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::TurnResolvedConfig(Box::new(
|
||||
sample_turn_resolved_config("turn-2"),
|
||||
sample_turn_resolved_config("thread-2", "turn-2"),
|
||||
))),
|
||||
out,
|
||||
)
|
||||
@@ -1433,6 +1468,110 @@ async fn subagent_thread_started_publishes_without_initialize() {
|
||||
assert_eq!(payload[0]["event_params"]["subagent_source"], "review");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn subagent_thread_started_inherits_parent_connection_for_new_thread() {
|
||||
let mut reducer = AnalyticsReducer::default();
|
||||
let mut events = Vec::new();
|
||||
let parent_thread_id =
|
||||
codex_protocol::ThreadId::from_string("44444444-4444-4444-4444-444444444444")
|
||||
.expect("valid parent thread id");
|
||||
let parent_thread_id_string = parent_thread_id.to_string();
|
||||
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Initialize {
|
||||
connection_id: 7,
|
||||
params: InitializeParams {
|
||||
client_info: ClientInfo {
|
||||
name: "parent-client".to_string(),
|
||||
title: None,
|
||||
version: "1.0.0".to_string(),
|
||||
},
|
||||
capabilities: None,
|
||||
},
|
||||
product_client_id: "parent-client".to_string(),
|
||||
runtime: sample_runtime_metadata(),
|
||||
rpc_transport: AppServerRpcTransport::Stdio,
|
||||
},
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::ClientResponse {
|
||||
connection_id: 7,
|
||||
request_id: RequestId::Integer(1),
|
||||
response: Box::new(sample_thread_start_response(
|
||||
&parent_thread_id_string,
|
||||
/*ephemeral*/ false,
|
||||
"gpt-5",
|
||||
)),
|
||||
},
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::SubAgentThreadStarted(
|
||||
SubAgentThreadStartedInput {
|
||||
thread_id: "thread-review".to_string(),
|
||||
parent_thread_id: None,
|
||||
product_client_id: "parent-client".to_string(),
|
||||
client_name: "parent-client".to_string(),
|
||||
client_version: "1.0.0".to_string(),
|
||||
model: "gpt-5".to_string(),
|
||||
ephemeral: false,
|
||||
subagent_source: SubAgentSource::ThreadSpawn {
|
||||
parent_thread_id,
|
||||
depth: 1,
|
||||
agent_path: None,
|
||||
agent_nickname: None,
|
||||
agent_role: None,
|
||||
},
|
||||
created_at: 130,
|
||||
},
|
||||
)),
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
|
||||
events.clear();
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::Compaction(Box::new(
|
||||
CodexCompactionEvent {
|
||||
thread_id: "thread-review".to_string(),
|
||||
turn_id: "turn-compact".to_string(),
|
||||
trigger: CompactionTrigger::Manual,
|
||||
reason: CompactionReason::UserRequested,
|
||||
implementation: CompactionImplementation::Responses,
|
||||
phase: CompactionPhase::StandaloneTurn,
|
||||
strategy: CompactionStrategy::Memento,
|
||||
status: CompactionStatus::Completed,
|
||||
error: None,
|
||||
active_context_tokens_before: 131_000,
|
||||
active_context_tokens_after: 64_000,
|
||||
started_at: 100,
|
||||
completed_at: 101,
|
||||
duration_ms: Some(1200),
|
||||
},
|
||||
))),
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
|
||||
let payload = serde_json::to_value(&events).expect("serialize events");
|
||||
assert_eq!(
|
||||
payload[0]["event_params"]["app_server_client"]["product_client_id"],
|
||||
"parent-client"
|
||||
);
|
||||
assert_eq!(
|
||||
payload[0]["event_params"]["parent_thread_id"],
|
||||
"44444444-4444-4444-4444-444444444444"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_used_event_serializes_expected_shape() {
|
||||
let tracking = TrackEventsContext {
|
||||
@@ -1493,6 +1632,25 @@ fn plugin_management_event_serializes_expected_shape() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_management_event_can_use_remote_plugin_id_override() {
|
||||
let mut plugin = sample_plugin_metadata();
|
||||
plugin.remote_plugin_id = Some("plugins~Plugin_remote".to_string());
|
||||
let event = TrackEventRequest::PluginInstalled(CodexPluginEventRequest {
|
||||
event_type: "codex_plugin_installed",
|
||||
event_params: codex_plugin_metadata(plugin),
|
||||
});
|
||||
|
||||
let payload = serde_json::to_value(&event).expect("serialize plugin installed event");
|
||||
|
||||
assert_eq!(
|
||||
payload["event_params"]["plugin_id"],
|
||||
"plugins~Plugin_remote"
|
||||
);
|
||||
assert_eq!(payload["event_params"]["plugin_name"], "sample");
|
||||
assert_eq!(payload["event_params"]["marketplace_name"], "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hook_run_event_serializes_expected_shape() {
|
||||
let tracking = TrackEventsContext {
|
||||
@@ -2127,7 +2285,7 @@ async fn turn_start_error_response_discards_pending_start_request() {
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::TurnResolvedConfig(Box::new(
|
||||
sample_turn_resolved_config("turn-2"),
|
||||
sample_turn_resolved_config("thread-2", "turn-2"),
|
||||
))),
|
||||
&mut out,
|
||||
)
|
||||
@@ -2479,6 +2637,7 @@ async fn turn_completed_without_started_notification_emits_null_started_at() {
|
||||
fn sample_plugin_metadata() -> PluginTelemetryMetadata {
|
||||
PluginTelemetryMetadata {
|
||||
plugin_id: PluginId::parse("sample@test").expect("valid plugin id"),
|
||||
remote_plugin_id: None,
|
||||
capability_summary: Some(PluginCapabilitySummary {
|
||||
config_name: "sample@test".to_string(),
|
||||
display_name: "sample".to_string(),
|
||||
|
||||
@@ -587,11 +587,16 @@ pub(crate) fn codex_app_metadata(
|
||||
}
|
||||
|
||||
pub(crate) fn codex_plugin_metadata(plugin: PluginTelemetryMetadata) -> CodexPluginMetadata {
|
||||
let capability_summary = plugin.capability_summary;
|
||||
let PluginTelemetryMetadata {
|
||||
plugin_id,
|
||||
remote_plugin_id,
|
||||
capability_summary,
|
||||
} = plugin;
|
||||
let event_plugin_id = remote_plugin_id.unwrap_or_else(|| plugin_id.as_key());
|
||||
CodexPluginMetadata {
|
||||
plugin_id: Some(plugin.plugin_id.as_key()),
|
||||
plugin_name: Some(plugin.plugin_id.plugin_name),
|
||||
marketplace_name: Some(plugin.plugin_id.marketplace_name),
|
||||
plugin_id: Some(event_plugin_id),
|
||||
plugin_name: Some(plugin_id.plugin_name),
|
||||
marketplace_name: Some(plugin_id.marketplace_name),
|
||||
has_skills: capability_summary
|
||||
.as_ref()
|
||||
.map(|summary| summary.has_skills),
|
||||
|
||||
@@ -74,8 +74,7 @@ pub(crate) struct AnalyticsReducer {
|
||||
requests: HashMap<(u64, RequestId), RequestState>,
|
||||
turns: HashMap<String, TurnState>,
|
||||
connections: HashMap<u64, ConnectionState>,
|
||||
thread_connections: HashMap<String, u64>,
|
||||
thread_metadata: HashMap<String, ThreadMetadataState>,
|
||||
threads: HashMap<String, ThreadAnalyticsState>,
|
||||
}
|
||||
|
||||
struct ConnectionState {
|
||||
@@ -83,6 +82,69 @@ struct ConnectionState {
|
||||
runtime: CodexRuntimeMetadata,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ThreadAnalyticsState {
|
||||
connection_id: Option<u64>,
|
||||
metadata: Option<ThreadMetadataState>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct AnalyticsDropSite<'a> {
|
||||
event_name: &'static str,
|
||||
thread_id: &'a str,
|
||||
turn_id: Option<&'a str>,
|
||||
review_id: Option<&'a str>,
|
||||
item_id: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl<'a> AnalyticsDropSite<'a> {
|
||||
fn guardian(input: &'a GuardianReviewEventParams) -> Self {
|
||||
Self {
|
||||
event_name: "guardian",
|
||||
thread_id: &input.thread_id,
|
||||
turn_id: Some(&input.turn_id),
|
||||
review_id: Some(&input.review_id),
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn compaction(input: &'a CodexCompactionEvent) -> Self {
|
||||
Self {
|
||||
event_name: "compaction",
|
||||
thread_id: &input.thread_id,
|
||||
turn_id: Some(&input.turn_id),
|
||||
review_id: None,
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn turn_steer(thread_id: &'a str) -> Self {
|
||||
Self {
|
||||
event_name: "turn steer",
|
||||
thread_id,
|
||||
turn_id: None,
|
||||
review_id: None,
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn turn(thread_id: &'a str, turn_id: &'a str) -> Self {
|
||||
Self {
|
||||
event_name: "turn",
|
||||
thread_id,
|
||||
turn_id: Some(turn_id),
|
||||
review_id: None,
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum MissingAnalyticsContext {
|
||||
ThreadConnection,
|
||||
Connection { connection_id: u64 },
|
||||
ThreadMetadata,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ThreadMetadataState {
|
||||
thread_source: Option<&'static str>,
|
||||
@@ -274,6 +336,26 @@ impl AnalyticsReducer {
|
||||
input: SubAgentThreadStartedInput,
|
||||
out: &mut Vec<TrackEventRequest>,
|
||||
) {
|
||||
let parent_thread_id = input
|
||||
.parent_thread_id
|
||||
.clone()
|
||||
.or_else(|| subagent_parent_thread_id(&input.subagent_source));
|
||||
let parent_connection_id = parent_thread_id
|
||||
.as_ref()
|
||||
.and_then(|parent_thread_id| self.threads.get(parent_thread_id))
|
||||
.and_then(|thread| thread.connection_id);
|
||||
let thread_state = self.threads.entry(input.thread_id.clone()).or_default();
|
||||
thread_state
|
||||
.metadata
|
||||
.get_or_insert_with(|| ThreadMetadataState {
|
||||
thread_source: Some("subagent"),
|
||||
initialization_mode: ThreadInitializationMode::New,
|
||||
subagent_source: Some(subagent_source_name(&input.subagent_source)),
|
||||
parent_thread_id,
|
||||
});
|
||||
if thread_state.connection_id.is_none() {
|
||||
thread_state.connection_id = parent_connection_id;
|
||||
}
|
||||
out.push(TrackEventRequest::ThreadInitialized(
|
||||
subagent_thread_started_event_request(input),
|
||||
));
|
||||
@@ -284,23 +366,9 @@ impl AnalyticsReducer {
|
||||
input: GuardianReviewEventParams,
|
||||
out: &mut Vec<TrackEventRequest>,
|
||||
) {
|
||||
let Some(connection_id) = self.thread_connections.get(&input.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
review_id = %input.review_id,
|
||||
"dropping guardian analytics event: missing thread connection metadata"
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(connection_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
review_id = %input.review_id,
|
||||
connection_id,
|
||||
"dropping guardian analytics event: missing connection metadata"
|
||||
);
|
||||
let Some(connection_state) =
|
||||
self.thread_connection_or_warn(AnalyticsDropSite::guardian(&input))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::GuardianReview(Box::new(
|
||||
@@ -686,10 +754,13 @@ impl AnalyticsReducer {
|
||||
};
|
||||
let thread_metadata =
|
||||
ThreadMetadataState::from_thread_metadata(&thread_source, initialization_mode);
|
||||
self.thread_connections
|
||||
.insert(thread_id.clone(), connection_id);
|
||||
self.thread_metadata
|
||||
.insert(thread_id.clone(), thread_metadata.clone());
|
||||
self.threads.insert(
|
||||
thread_id.clone(),
|
||||
ThreadAnalyticsState {
|
||||
connection_id: Some(connection_id),
|
||||
metadata: Some(thread_metadata.clone()),
|
||||
},
|
||||
);
|
||||
out.push(TrackEventRequest::ThreadInitialized(
|
||||
ThreadInitializedEvent {
|
||||
event_type: "codex_thread_initialized",
|
||||
@@ -710,29 +781,9 @@ impl AnalyticsReducer {
|
||||
}
|
||||
|
||||
fn ingest_compaction(&mut self, input: CodexCompactionEvent, out: &mut Vec<TrackEventRequest>) {
|
||||
let Some(connection_id) = self.thread_connections.get(&input.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
"dropping compaction analytics event: missing thread connection metadata"
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(connection_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
connection_id,
|
||||
"dropping compaction analytics event: missing connection metadata"
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Some(thread_metadata) = self.thread_metadata.get(&input.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
"dropping compaction analytics event: missing thread lifecycle metadata"
|
||||
);
|
||||
let Some((connection_state, thread_metadata)) =
|
||||
self.thread_context_or_warn(AnalyticsDropSite::compaction(&input))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::Compaction(Box::new(
|
||||
@@ -787,11 +838,13 @@ impl AnalyticsReducer {
|
||||
let Some(connection_state) = self.connections.get(&connection_id) else {
|
||||
return;
|
||||
};
|
||||
let Some(thread_metadata) = self.thread_metadata.get(&pending_request.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %pending_request.thread_id,
|
||||
"dropping turn steer analytics event: missing thread lifecycle metadata"
|
||||
);
|
||||
let drop_site = AnalyticsDropSite::turn_steer(&pending_request.thread_id);
|
||||
let Some(thread_metadata) = self
|
||||
.threads
|
||||
.get(drop_site.thread_id)
|
||||
.and_then(|thread| thread.metadata.as_ref())
|
||||
else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadMetadata);
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::TurnSteer(CodexTurnSteerEventRequest {
|
||||
@@ -824,42 +877,34 @@ impl AnalyticsReducer {
|
||||
{
|
||||
return;
|
||||
}
|
||||
let connection_metadata = turn_state
|
||||
.connection_id
|
||||
.and_then(|connection_id| self.connections.get(&connection_id))
|
||||
.map(|connection_state| {
|
||||
(
|
||||
connection_state.app_server_client.clone(),
|
||||
connection_state.runtime.clone(),
|
||||
)
|
||||
});
|
||||
let Some((app_server_client, runtime)) = connection_metadata else {
|
||||
if let Some(connection_id) = turn_state.connection_id {
|
||||
tracing::warn!(
|
||||
turn_id,
|
||||
connection_id,
|
||||
"dropping turn analytics event: missing connection metadata"
|
||||
);
|
||||
}
|
||||
return;
|
||||
};
|
||||
let Some(thread_id) = turn_state.thread_id.as_ref() else {
|
||||
return;
|
||||
};
|
||||
let Some(thread_metadata) = self.thread_metadata.get(thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id,
|
||||
turn_id,
|
||||
"dropping turn analytics event: missing thread lifecycle metadata"
|
||||
let Some(connection_id) = turn_state.connection_id else {
|
||||
return;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(&connection_id) else {
|
||||
warn_missing_analytics_context(
|
||||
&AnalyticsDropSite::turn(thread_id, turn_id),
|
||||
MissingAnalyticsContext::Connection { connection_id },
|
||||
);
|
||||
return;
|
||||
};
|
||||
let drop_site = AnalyticsDropSite::turn(thread_id, turn_id);
|
||||
let Some(thread_metadata) = self
|
||||
.threads
|
||||
.get(drop_site.thread_id)
|
||||
.and_then(|thread| thread.metadata.as_ref())
|
||||
else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadMetadata);
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::TurnEvent(Box::new(
|
||||
CodexTurnEventRequest {
|
||||
event_type: "codex_turn_event",
|
||||
event_params: codex_turn_event_params(
|
||||
app_server_client,
|
||||
runtime,
|
||||
connection_state.app_server_client.clone(),
|
||||
connection_state.runtime.clone(),
|
||||
turn_id.to_string(),
|
||||
turn_state,
|
||||
thread_metadata,
|
||||
@@ -868,6 +913,67 @@ impl AnalyticsReducer {
|
||||
)));
|
||||
self.turns.remove(turn_id);
|
||||
}
|
||||
|
||||
fn thread_connection_or_warn(
|
||||
&self,
|
||||
drop_site: AnalyticsDropSite<'_>,
|
||||
) -> Option<&ConnectionState> {
|
||||
let Some(thread_state) = self.threads.get(drop_site.thread_id) else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadConnection);
|
||||
return None;
|
||||
};
|
||||
let Some(connection_id) = thread_state.connection_id else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadConnection);
|
||||
return None;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(&connection_id) else {
|
||||
warn_missing_analytics_context(
|
||||
&drop_site,
|
||||
MissingAnalyticsContext::Connection { connection_id },
|
||||
);
|
||||
return None;
|
||||
};
|
||||
Some(connection_state)
|
||||
}
|
||||
|
||||
fn thread_context_or_warn(
|
||||
&self,
|
||||
drop_site: AnalyticsDropSite<'_>,
|
||||
) -> Option<(&ConnectionState, &ThreadMetadataState)> {
|
||||
let connection_state = self.thread_connection_or_warn(drop_site)?;
|
||||
let Some(thread_metadata) = self
|
||||
.threads
|
||||
.get(drop_site.thread_id)
|
||||
.and_then(|thread| thread.metadata.as_ref())
|
||||
else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadMetadata);
|
||||
return None;
|
||||
};
|
||||
Some((connection_state, thread_metadata))
|
||||
}
|
||||
}
|
||||
|
||||
fn warn_missing_analytics_context(
|
||||
drop_site: &AnalyticsDropSite<'_>,
|
||||
missing: MissingAnalyticsContext,
|
||||
) {
|
||||
let (missing_context, connection_id) = match missing {
|
||||
MissingAnalyticsContext::ThreadConnection => ("thread_connection", None),
|
||||
MissingAnalyticsContext::Connection { connection_id } => {
|
||||
("connection", Some(connection_id))
|
||||
}
|
||||
MissingAnalyticsContext::ThreadMetadata => ("thread_metadata", None),
|
||||
};
|
||||
tracing::warn!(
|
||||
thread_id = %drop_site.thread_id,
|
||||
turn_id = ?drop_site.turn_id,
|
||||
review_id = ?drop_site.review_id,
|
||||
item_id = ?drop_site.item_id,
|
||||
missing_context,
|
||||
connection_id,
|
||||
"dropping {} analytics event: missing analytics context",
|
||||
drop_site.event_name
|
||||
);
|
||||
}
|
||||
|
||||
fn codex_turn_event_params(
|
||||
@@ -979,7 +1085,7 @@ fn sandbox_policy_mode(permission_profile: &PermissionProfile, cwd: &Path) -> &'
|
||||
if permission_profile.network_sandbox_policy().is_enabled() {
|
||||
"full_access"
|
||||
} else {
|
||||
"custom_permissions"
|
||||
"external_sandbox"
|
||||
}
|
||||
} else if file_system_policy
|
||||
.get_writable_roots_with_cwd(cwd)
|
||||
@@ -1089,7 +1195,7 @@ mod tests {
|
||||
use codex_protocol::permissions::NetworkSandboxPolicy;
|
||||
|
||||
#[test]
|
||||
fn managed_full_disk_with_restricted_network_reports_custom_permissions() {
|
||||
fn managed_full_disk_with_restricted_network_reports_external_sandbox() {
|
||||
let permission_profile = PermissionProfile::from_runtime_permissions_with_enforcement(
|
||||
SandboxEnforcement::Managed,
|
||||
&FileSystemSandboxPolicy::unrestricted(),
|
||||
@@ -1098,7 +1204,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
sandbox_policy_mode(&permission_profile, Path::new("/")),
|
||||
"custom_permissions"
|
||||
"external_sandbox"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ pub use codex_app_server::in_process::DEFAULT_IN_PROCESS_CHANNEL_CAPACITY;
|
||||
pub use codex_app_server::in_process::InProcessServerEvent;
|
||||
use codex_app_server::in_process::InProcessStartArgs;
|
||||
use codex_app_server::in_process::LogDbLayer;
|
||||
pub use codex_app_server::in_process::StateDbHandle;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientNotification;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
@@ -99,10 +100,6 @@ pub mod legacy_core {
|
||||
pub use codex_core::personality_migration::*;
|
||||
}
|
||||
|
||||
pub mod plugins {
|
||||
pub use codex_core::plugins::PluginsManager;
|
||||
}
|
||||
|
||||
pub mod review_format {
|
||||
pub use codex_core::review_format::*;
|
||||
}
|
||||
@@ -304,7 +301,15 @@ impl fmt::Display for TypedRequestError {
|
||||
write!(f, "{method} transport error: {source}")
|
||||
}
|
||||
Self::Server { method, source } => {
|
||||
write!(f, "{method} failed: {}", source.message)
|
||||
write!(
|
||||
f,
|
||||
"{method} failed: {} (code {})",
|
||||
source.message, source.code
|
||||
)?;
|
||||
if let Some(data) = source.data.as_ref() {
|
||||
write!(f, ", data: {data}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Self::Deserialize { method, source } => {
|
||||
write!(f, "{method} response decode error: {source}")
|
||||
@@ -339,6 +344,8 @@ pub struct InProcessClientStartArgs {
|
||||
pub feedback: CodexFeedback,
|
||||
/// SQLite tracing layer used to flush recently emitted logs before feedback upload.
|
||||
pub log_db: Option<LogDbLayer>,
|
||||
/// Process-wide SQLite state handle shared with the embedded app-server.
|
||||
pub state_db: Option<StateDbHandle>,
|
||||
/// Environment manager used by core execution and filesystem operations.
|
||||
pub environment_manager: Arc<EnvironmentManager>,
|
||||
/// Startup warnings emitted after initialize succeeds.
|
||||
@@ -400,6 +407,7 @@ impl InProcessClientStartArgs {
|
||||
thread_config_loader,
|
||||
feedback: self.feedback,
|
||||
log_db: self.log_db,
|
||||
state_db: self.state_db,
|
||||
environment_manager: self.environment_manager,
|
||||
config_warnings: self.config_warnings,
|
||||
session_source: self.session_source,
|
||||
@@ -979,6 +987,7 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source,
|
||||
@@ -1919,11 +1928,15 @@ mod tests {
|
||||
method: "thread/read".to_string(),
|
||||
source: JSONRPCErrorError {
|
||||
code: -32603,
|
||||
data: None,
|
||||
data: Some(serde_json::json!({"detail": "config lock mismatch"})),
|
||||
message: "internal".to_string(),
|
||||
},
|
||||
};
|
||||
assert_eq!(std::error::Error::source(&server).is_some(), false);
|
||||
assert_eq!(
|
||||
server.to_string(),
|
||||
"thread/read failed: internal (code -32603), data: {\"detail\":\"config lock mismatch\"}"
|
||||
);
|
||||
|
||||
let deserialize = TypedRequestError::Deserialize {
|
||||
method: "thread/start".to_string(),
|
||||
@@ -2049,6 +2062,7 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: environment_manager.clone(),
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::Exec,
|
||||
@@ -2088,6 +2102,7 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::Exec,
|
||||
|
||||
@@ -2217,6 +2217,25 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginUninstallParams": {
|
||||
"properties": {
|
||||
"pluginId": {
|
||||
@@ -2831,6 +2850,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -3453,10 +3494,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
@@ -3858,10 +3895,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
@@ -4121,44 +4154,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ThreadTurnsListParams": {
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadUnarchiveParams": {
|
||||
"properties": {
|
||||
"threadId": {
|
||||
@@ -4889,30 +4884,6 @@
|
||||
"title": "Thread/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"thread/turns/list"
|
||||
],
|
||||
"title": "Thread/turns/listRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ThreadTurnsListParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Thread/turns/listRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Append raw Responses API items to the thread history without starting a user turn.",
|
||||
"properties": {
|
||||
@@ -5106,6 +5077,30 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
|
||||
@@ -1032,6 +1032,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
"FileChangeOutputDeltaNotification": {
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
@@ -3930,7 +3931,7 @@
|
||||
"ThreadRealtimeStartedNotification": {
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
@@ -5191,6 +5192,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Deprecated legacy apply_patch output stream notification.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
|
||||
@@ -569,30 +569,6 @@
|
||||
"title": "Thread/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/v2/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"thread/turns/list"
|
||||
],
|
||||
"title": "Thread/turns/listRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ThreadTurnsListParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Thread/turns/listRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Append raw Responses API items to the thread history without starting a user turn.",
|
||||
"properties": {
|
||||
@@ -786,6 +762,30 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/v2/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -4289,6 +4289,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Deprecated legacy apply_patch output stream notification.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
@@ -8600,6 +8601,7 @@
|
||||
},
|
||||
"FileChangeOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
@@ -11954,6 +11956,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginDetail": {
|
||||
"properties": {
|
||||
"apps": {
|
||||
@@ -12339,6 +12358,31 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -12349,7 +12393,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
"$ref": "#/definitions/v2/PluginShareListItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -12396,6 +12440,40 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -12480,6 +12558,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/v2/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -13704,6 +13791,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -15051,10 +15160,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
@@ -16436,7 +16541,7 @@
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
@@ -16552,10 +16657,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
@@ -17150,76 +17251,6 @@
|
||||
"title": "ThreadTokenUsageUpdatedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"title": "ThreadTurnsListParams",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"backwardsCursor": {
|
||||
"description": "Opaque cursor to pass as `cursor` when reversing `sortDirection`. This is only populated when the page contains at least one turn. Use it with the opposite `sortDirection` to include the anchor turn again and catch updates to that turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/Turn"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"nextCursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn. if None, there are no more turns to return.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"data"
|
||||
],
|
||||
"title": "ThreadTurnsListResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadUnarchiveParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
|
||||
@@ -1328,30 +1328,6 @@
|
||||
"title": "Thread/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"thread/turns/list"
|
||||
],
|
||||
"title": "Thread/turns/listRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ThreadTurnsListParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Thread/turns/listRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Append raw Responses API items to the thread history without starting a user turn.",
|
||||
"properties": {
|
||||
@@ -1545,6 +1521,30 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -5099,6 +5099,7 @@
|
||||
},
|
||||
"FileChangeOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
@@ -8608,6 +8609,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginDetail": {
|
||||
"properties": {
|
||||
"apps": {
|
||||
@@ -8993,6 +9011,31 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -9003,7 +9046,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -9050,6 +9093,40 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -9134,6 +9211,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -10358,6 +10444,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -11307,6 +11415,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Deprecated legacy apply_patch output stream notification.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
@@ -12937,10 +13046,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
@@ -14322,7 +14427,7 @@
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
@@ -14438,10 +14543,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
@@ -15036,76 +15137,6 @@
|
||||
"title": "ThreadTokenUsageUpdatedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"title": "ThreadTurnsListParams",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"backwardsCursor": {
|
||||
"description": "Opaque cursor to pass as `cursor` when reversing `sortDirection`. This is only populated when the page contains at least one turn. Use it with the opposite `sortDirection` to include the anchor turn again and catch updates to that turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/Turn"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"nextCursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn. if None, there are no more turns to return.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"data"
|
||||
],
|
||||
"title": "ThreadTurnsListResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadUnarchiveParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
|
||||
@@ -38,6 +38,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginInstallPolicy": {
|
||||
"enum": [
|
||||
"NOT_AVAILABLE",
|
||||
@@ -299,6 +316,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
@@ -44,6 +44,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginDetail": {
|
||||
"properties": {
|
||||
"apps": {
|
||||
@@ -318,6 +335,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
@@ -12,6 +12,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginInstallPolicy": {
|
||||
"enum": [
|
||||
"NOT_AVAILABLE",
|
||||
@@ -150,6 +167,31 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -234,6 +276,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -278,7 +329,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
|
||||
21
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadParams.json
generated
Normal file
21
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadParams.json
generated
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
}
|
||||
13
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadResponse.json
generated
Normal file
13
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadResponse.json
generated
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -732,6 +732,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
@@ -190,10 +190,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
},
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
|
||||
@@ -862,6 +862,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -1045,10 +1067,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"SortDirection": {
|
||||
"enum": [
|
||||
"asc",
|
||||
"desc"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"title": "ThreadTurnsListParams",
|
||||
"type": "object"
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -14,4 +14,4 @@ export type ResponseItem = { "type": "message", role: string, content: Array<Con
|
||||
/**
|
||||
* Set when using the Responses API.
|
||||
*/
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "other" };
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "context_compaction", encrypted_content?: string, } | { "type": "other" };
|
||||
|
||||
@@ -2,4 +2,9 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* Deprecated legacy notification for `apply_patch` textual output.
|
||||
*
|
||||
* The server no longer emits this notification.
|
||||
*/
|
||||
export type FileChangeOutputDeltaNotification = { threadId: string, turnId: string, itemId: string, delta: string, };
|
||||
|
||||
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginAvailability.ts
generated
Normal file
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginAvailability.ts
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginAvailability = "AVAILABLE" | "DISABLED_BY_ADMIN";
|
||||
7
codex-rs/app-server-protocol/schema/typescript/v2/PluginShareListItem.ts
generated
Normal file
7
codex-rs/app-server-protocol/schema/typescript/v2/PluginShareListItem.ts
generated
Normal file
@@ -0,0 +1,7 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { AbsolutePathBuf } from "../AbsolutePathBuf";
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
|
||||
export type PluginShareListItem = { plugin: PluginSummary, shareUrl: string, localPluginPath: AbsolutePathBuf | null, };
|
||||
@@ -1,6 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
import type { PluginShareListItem } from "./PluginShareListItem";
|
||||
|
||||
export type PluginShareListResponse = { data: Array<PluginSummary>, };
|
||||
export type PluginShareListResponse = { data: Array<PluginShareListItem>, };
|
||||
|
||||
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadParams.ts
generated
Normal file
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadParams.ts
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadParams = { remoteMarketplaceName: string, remotePluginId: string, skillName: string, };
|
||||
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadResponse.ts
generated
Normal file
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadResponse.ts
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadResponse = { contents: string | null, };
|
||||
@@ -2,8 +2,13 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { PluginAuthPolicy } from "./PluginAuthPolicy";
|
||||
import type { PluginAvailability } from "./PluginAvailability";
|
||||
import type { PluginInstallPolicy } from "./PluginInstallPolicy";
|
||||
import type { PluginInterface } from "./PluginInterface";
|
||||
import type { PluginSource } from "./PluginSource";
|
||||
|
||||
export type PluginSummary = { id: string, name: string, source: PluginSource, installed: boolean, enabled: boolean, installPolicy: PluginInstallPolicy, authPolicy: PluginAuthPolicy, interface: PluginInterface | null, };
|
||||
export type PluginSummary = { id: string, name: string, source: PluginSource, installed: boolean, enabled: boolean, installPolicy: PluginInstallPolicy, authPolicy: PluginAuthPolicy,
|
||||
/**
|
||||
* Availability state for installing and using the plugin.
|
||||
*/
|
||||
availability: PluginAvailability, interface: PluginInterface | null, };
|
||||
|
||||
@@ -23,9 +23,4 @@ model?: string | null, modelProvider?: string | null, serviceTier?: ServiceTier
|
||||
* Override where approval requests are routed for review on this thread
|
||||
* and subsequent turns.
|
||||
*/
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, ephemeral?: boolean, /**
|
||||
* When true, return only thread metadata and live fork state without
|
||||
* populating `thread.turns`. This is useful when the client plans to call
|
||||
* `thread/turns/list` immediately after forking.
|
||||
*/
|
||||
excludeTurns?: boolean};
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, ephemeral?: boolean};
|
||||
|
||||
@@ -6,4 +6,4 @@ import type { RealtimeConversationVersion } from "../RealtimeConversationVersion
|
||||
/**
|
||||
* EXPERIMENTAL - emitted when thread realtime startup is accepted.
|
||||
*/
|
||||
export type ThreadRealtimeStartedNotification = { threadId: string, sessionId: string | null, version: RealtimeConversationVersion, };
|
||||
export type ThreadRealtimeStartedNotification = { threadId: string, realtimeSessionId: string | null, version: RealtimeConversationVersion, };
|
||||
|
||||
@@ -26,9 +26,4 @@ model?: string | null, modelProvider?: string | null, serviceTier?: ServiceTier
|
||||
* Override where approval requests are routed for review on this thread
|
||||
* and subsequent turns.
|
||||
*/
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null, /**
|
||||
* When true, return only thread metadata and live-resume state without
|
||||
* populating `thread.turns`. This is useful when the client plans to call
|
||||
* `thread/turns/list` immediately after resuming.
|
||||
*/
|
||||
excludeTurns?: boolean};
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null};
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { SortDirection } from "./SortDirection";
|
||||
|
||||
export type ThreadTurnsListParams = { threadId: string,
|
||||
/**
|
||||
* Opaque cursor to pass to the next call to continue after the last turn.
|
||||
*/
|
||||
cursor?: string | null,
|
||||
/**
|
||||
* Optional turn page size.
|
||||
*/
|
||||
limit?: number | null,
|
||||
/**
|
||||
* Optional turn pagination direction; defaults to descending.
|
||||
*/
|
||||
sortDirection?: SortDirection | null, };
|
||||
@@ -1,18 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { Turn } from "./Turn";
|
||||
|
||||
export type ThreadTurnsListResponse = { data: Array<Turn>,
|
||||
/**
|
||||
* Opaque cursor to pass to the next call to continue after the last turn.
|
||||
* if None, there are no more turns to return.
|
||||
*/
|
||||
nextCursor: string | null,
|
||||
/**
|
||||
* Opaque cursor to pass as `cursor` when reversing `sortDirection`.
|
||||
* This is only populated when the page contains at least one turn.
|
||||
* Use it with the opposite `sortDirection` to include the anchor turn again
|
||||
* and catch updates to that turn.
|
||||
*/
|
||||
backwardsCursor: string | null, };
|
||||
@@ -270,6 +270,7 @@ export type { PermissionsRequestApprovalParams } from "./PermissionsRequestAppro
|
||||
export type { PermissionsRequestApprovalResponse } from "./PermissionsRequestApprovalResponse";
|
||||
export type { PlanDeltaNotification } from "./PlanDeltaNotification";
|
||||
export type { PluginAuthPolicy } from "./PluginAuthPolicy";
|
||||
export type { PluginAvailability } from "./PluginAvailability";
|
||||
export type { PluginDetail } from "./PluginDetail";
|
||||
export type { PluginInstallParams } from "./PluginInstallParams";
|
||||
export type { PluginInstallPolicy } from "./PluginInstallPolicy";
|
||||
@@ -282,10 +283,13 @@ export type { PluginReadParams } from "./PluginReadParams";
|
||||
export type { PluginReadResponse } from "./PluginReadResponse";
|
||||
export type { PluginShareDeleteParams } from "./PluginShareDeleteParams";
|
||||
export type { PluginShareDeleteResponse } from "./PluginShareDeleteResponse";
|
||||
export type { PluginShareListItem } from "./PluginShareListItem";
|
||||
export type { PluginShareListParams } from "./PluginShareListParams";
|
||||
export type { PluginShareListResponse } from "./PluginShareListResponse";
|
||||
export type { PluginShareSaveParams } from "./PluginShareSaveParams";
|
||||
export type { PluginShareSaveResponse } from "./PluginShareSaveResponse";
|
||||
export type { PluginSkillReadParams } from "./PluginSkillReadParams";
|
||||
export type { PluginSkillReadResponse } from "./PluginSkillReadResponse";
|
||||
export type { PluginSource } from "./PluginSource";
|
||||
export type { PluginSummary } from "./PluginSummary";
|
||||
export type { PluginUninstallParams } from "./PluginUninstallParams";
|
||||
@@ -395,8 +399,6 @@ export type { ThreadStatus } from "./ThreadStatus";
|
||||
export type { ThreadStatusChangedNotification } from "./ThreadStatusChangedNotification";
|
||||
export type { ThreadTokenUsage } from "./ThreadTokenUsage";
|
||||
export type { ThreadTokenUsageUpdatedNotification } from "./ThreadTokenUsageUpdatedNotification";
|
||||
export type { ThreadTurnsListParams } from "./ThreadTurnsListParams";
|
||||
export type { ThreadTurnsListResponse } from "./ThreadTurnsListResponse";
|
||||
export type { ThreadUnarchiveParams } from "./ThreadUnarchiveParams";
|
||||
export type { ThreadUnarchiveResponse } from "./ThreadUnarchiveResponse";
|
||||
export type { ThreadUnarchivedNotification } from "./ThreadUnarchivedNotification";
|
||||
|
||||
@@ -14,6 +14,7 @@ pub use export::generate_ts_with_options;
|
||||
pub use export::generate_types;
|
||||
pub use jsonrpc_lite::*;
|
||||
pub use protocol::common::*;
|
||||
pub use protocol::event_mapping::*;
|
||||
pub use protocol::item_builders::*;
|
||||
pub use protocol::thread_history::*;
|
||||
pub use protocol::v1::ApplyPatchApprovalParams;
|
||||
|
||||
@@ -564,6 +564,7 @@ client_request_definitions! {
|
||||
serialization: thread_id(params.thread_id),
|
||||
response: v2::ThreadReadResponse,
|
||||
},
|
||||
#[experimental("thread/turns/list")]
|
||||
ThreadTurnsList => "thread/turns/list" {
|
||||
params: v2::ThreadTurnsListParams,
|
||||
// Explicitly concurrent: this primarily reads append-only rollout storage.
|
||||
@@ -611,6 +612,11 @@ client_request_definitions! {
|
||||
serialization: global("config"),
|
||||
response: v2::PluginReadResponse,
|
||||
},
|
||||
PluginSkillRead => "plugin/skill/read" {
|
||||
params: v2::PluginSkillReadParams,
|
||||
serialization: global("config"),
|
||||
response: v2::PluginSkillReadResponse,
|
||||
},
|
||||
PluginShareSave => "plugin/share/save" {
|
||||
params: v2::PluginShareSaveParams,
|
||||
serialization: global("config"),
|
||||
@@ -1397,6 +1403,7 @@ server_notification_definitions! {
|
||||
CommandExecOutputDelta => "command/exec/outputDelta" (v2::CommandExecOutputDeltaNotification),
|
||||
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
|
||||
TerminalInteraction => "item/commandExecution/terminalInteraction" (v2::TerminalInteractionNotification),
|
||||
/// Deprecated legacy apply_patch output stream notification.
|
||||
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
|
||||
FileChangePatchUpdated => "item/fileChange/patchUpdated" (v2::FileChangePatchUpdatedNotification),
|
||||
ServerRequestResolved => "serverRequest/resolved" (v2::ServerRequestResolvedNotification),
|
||||
@@ -2558,7 +2565,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: Some(Some("You are on a call".to_string())),
|
||||
session_id: Some("sess_456".to_string()),
|
||||
realtime_session_id: Some("sess_456".to_string()),
|
||||
transport: None,
|
||||
voice: Some(RealtimeVoice::Marin),
|
||||
},
|
||||
@@ -2571,7 +2578,7 @@ mod tests {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"prompt": "You are on a call",
|
||||
"sessionId": "sess_456",
|
||||
"realtimeSessionId": "sess_456",
|
||||
"transport": null,
|
||||
"voice": "marin"
|
||||
}
|
||||
@@ -2589,7 +2596,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: None,
|
||||
session_id: None,
|
||||
realtime_session_id: None,
|
||||
transport: None,
|
||||
voice: None,
|
||||
},
|
||||
@@ -2601,7 +2608,7 @@ mod tests {
|
||||
"params": {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2615,7 +2622,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: Some(None),
|
||||
session_id: None,
|
||||
realtime_session_id: None,
|
||||
transport: None,
|
||||
voice: None,
|
||||
},
|
||||
@@ -2628,7 +2635,7 @@ mod tests {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"prompt": null,
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2642,7 +2649,7 @@ mod tests {
|
||||
"params": {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2659,7 +2666,7 @@ mod tests {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"prompt": null,
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2771,7 +2778,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: Some(Some("You are on a call".to_string())),
|
||||
session_id: None,
|
||||
realtime_session_id: None,
|
||||
transport: None,
|
||||
voice: None,
|
||||
},
|
||||
@@ -2854,7 +2861,7 @@ mod tests {
|
||||
let notification =
|
||||
ServerNotification::ThreadRealtimeStarted(v2::ThreadRealtimeStartedNotification {
|
||||
thread_id: "thr_123".to_string(),
|
||||
session_id: Some("sess_456".to_string()),
|
||||
realtime_session_id: Some("sess_456".to_string()),
|
||||
version: RealtimeConversationVersion::V1,
|
||||
});
|
||||
let reason = crate::experimental_api::ExperimentalApi::experimental_reason(¬ification);
|
||||
|
||||
578
codex-rs/app-server-protocol/src/protocol/event_mapping.rs
Normal file
578
codex-rs/app-server-protocol/src/protocol/event_mapping.rs
Normal file
@@ -0,0 +1,578 @@
|
||||
use crate::protocol::common::ServerNotification;
|
||||
use crate::protocol::item_builders::build_command_execution_begin_item;
|
||||
use crate::protocol::item_builders::build_command_execution_end_item;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use crate::protocol::v2::AgentMessageDeltaNotification;
|
||||
use crate::protocol::v2::CollabAgentState;
|
||||
use crate::protocol::v2::CollabAgentTool;
|
||||
use crate::protocol::v2::CollabAgentToolCallStatus;
|
||||
use crate::protocol::v2::CommandExecutionOutputDeltaNotification;
|
||||
use crate::protocol::v2::DynamicToolCallOutputContentItem;
|
||||
use crate::protocol::v2::DynamicToolCallStatus;
|
||||
use crate::protocol::v2::FileChangePatchUpdatedNotification;
|
||||
use crate::protocol::v2::ItemCompletedNotification;
|
||||
use crate::protocol::v2::ItemStartedNotification;
|
||||
use crate::protocol::v2::PlanDeltaNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryPartAddedNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryTextDeltaNotification;
|
||||
use crate::protocol::v2::ReasoningTextDeltaNotification;
|
||||
use crate::protocol::v2::TerminalInteractionNotification;
|
||||
use crate::protocol::v2::ThreadItem;
|
||||
use codex_protocol::dynamic_tools::DynamicToolCallOutputContentItem as CoreDynamicToolCallOutputContentItem;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Build the v2 app-server notification that directly corresponds to a single core event.
|
||||
///
|
||||
/// This only covers the stateless event-to-notification projections that have a one-to-one
|
||||
/// mapping. Callers remain responsible for any surrounding state checks or side effects before
|
||||
/// invoking this helper.
|
||||
pub fn item_event_to_server_notification(
|
||||
msg: EventMsg,
|
||||
thread_id: &str,
|
||||
turn_id: &str,
|
||||
) -> ServerNotification {
|
||||
let thread_id = thread_id.to_string();
|
||||
let turn_id = turn_id.to_string();
|
||||
match msg {
|
||||
EventMsg::DynamicToolCallResponse(response) => {
|
||||
let status = if response.success {
|
||||
DynamicToolCallStatus::Completed
|
||||
} else {
|
||||
DynamicToolCallStatus::Failed
|
||||
};
|
||||
let duration_ms = i64::try_from(response.duration.as_millis()).ok();
|
||||
let item = ThreadItem::DynamicToolCall {
|
||||
id: response.call_id,
|
||||
namespace: response.namespace,
|
||||
tool: response.tool,
|
||||
arguments: response.arguments,
|
||||
status,
|
||||
content_items: Some(
|
||||
response
|
||||
.content_items
|
||||
.into_iter()
|
||||
.map(|item| match item {
|
||||
CoreDynamicToolCallOutputContentItem::InputText { text } => {
|
||||
DynamicToolCallOutputContentItem::InputText { text }
|
||||
}
|
||||
CoreDynamicToolCallOutputContentItem::InputImage { image_url } => {
|
||||
DynamicToolCallOutputContentItem::InputImage { image_url }
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
success: Some(response.success),
|
||||
duration_ms,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id: response.turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentSpawnBegin(begin_event) => {
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::SpawnAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: Vec::new(),
|
||||
prompt: Some(begin_event.prompt),
|
||||
model: Some(begin_event.model),
|
||||
reasoning_effort: Some(begin_event.reasoning_effort),
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentSpawnEnd(end_event) => {
|
||||
let has_receiver = end_event.new_thread_id.is_some();
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ if has_receiver => CollabAgentToolCallStatus::Completed,
|
||||
_ => CollabAgentToolCallStatus::Failed,
|
||||
};
|
||||
let (receiver_thread_ids, agents_states) = match end_event.new_thread_id {
|
||||
Some(id) => {
|
||||
let receiver_id = id.to_string();
|
||||
let received_status = CollabAgentState::from(end_event.status.clone());
|
||||
(
|
||||
vec![receiver_id.clone()],
|
||||
[(receiver_id, received_status)].into_iter().collect(),
|
||||
)
|
||||
}
|
||||
None => (Vec::new(), HashMap::new()),
|
||||
};
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::SpawnAgent,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: Some(end_event.prompt),
|
||||
model: Some(end_event.model),
|
||||
reasoning_effort: Some(end_event.reasoning_effort),
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentInteractionBegin(begin_event) => {
|
||||
let receiver_thread_ids = vec![begin_event.receiver_thread_id.to_string()];
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::SendInput,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: Some(begin_event.prompt),
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentInteractionEnd(end_event) => {
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ => CollabAgentToolCallStatus::Completed,
|
||||
};
|
||||
let receiver_id = end_event.receiver_thread_id.to_string();
|
||||
let received_status = CollabAgentState::from(end_event.status);
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::SendInput,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id.clone()],
|
||||
prompt: Some(end_event.prompt),
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: [(receiver_id, received_status)].into_iter().collect(),
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabWaitingBegin(begin_event) => {
|
||||
let receiver_thread_ids = begin_event
|
||||
.receiver_thread_ids
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::Wait,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabWaitingEnd(end_event) => {
|
||||
let status = if end_event.statuses.values().any(|status| {
|
||||
matches!(
|
||||
status,
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound
|
||||
)
|
||||
}) {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
} else {
|
||||
CollabAgentToolCallStatus::Completed
|
||||
};
|
||||
let receiver_thread_ids = end_event.statuses.keys().map(ToString::to_string).collect();
|
||||
let agents_states = end_event
|
||||
.statuses
|
||||
.iter()
|
||||
.map(|(id, status)| (id.to_string(), CollabAgentState::from(status.clone())))
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::Wait,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabCloseBegin(begin_event) => {
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::CloseAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![begin_event.receiver_thread_id.to_string()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabCloseEnd(end_event) => {
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ => CollabAgentToolCallStatus::Completed,
|
||||
};
|
||||
let receiver_id = end_event.receiver_thread_id.to_string();
|
||||
let agents_states = [(
|
||||
receiver_id.clone(),
|
||||
CollabAgentState::from(end_event.status),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::CloseAgent,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabResumeBegin(begin_event) => {
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![begin_event.receiver_thread_id.to_string()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabResumeEnd(end_event) => {
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ => CollabAgentToolCallStatus::Completed,
|
||||
};
|
||||
let receiver_id = end_event.receiver_thread_id.to_string();
|
||||
let agents_states = [(
|
||||
receiver_id.clone(),
|
||||
CollabAgentState::from(end_event.status),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
})
|
||||
}
|
||||
EventMsg::AgentMessageContentDelta(event) => {
|
||||
let codex_protocol::protocol::AgentMessageContentDeltaEvent { item_id, delta, .. } =
|
||||
event;
|
||||
ServerNotification::AgentMessageDelta(AgentMessageDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id,
|
||||
delta,
|
||||
})
|
||||
}
|
||||
EventMsg::PlanDelta(event) => ServerNotification::PlanDelta(PlanDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
delta: event.delta,
|
||||
}),
|
||||
EventMsg::ReasoningContentDelta(event) => {
|
||||
ServerNotification::ReasoningSummaryTextDelta(ReasoningSummaryTextDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
delta: event.delta,
|
||||
summary_index: event.summary_index,
|
||||
})
|
||||
}
|
||||
EventMsg::ReasoningRawContentDelta(event) => {
|
||||
ServerNotification::ReasoningTextDelta(ReasoningTextDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
delta: event.delta,
|
||||
content_index: event.content_index,
|
||||
})
|
||||
}
|
||||
EventMsg::AgentReasoningSectionBreak(event) => {
|
||||
ServerNotification::ReasoningSummaryPartAdded(ReasoningSummaryPartAddedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
summary_index: event.summary_index,
|
||||
})
|
||||
}
|
||||
EventMsg::ItemStarted(item_started_event) => {
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: item_started_event.item.into(),
|
||||
})
|
||||
}
|
||||
EventMsg::ItemCompleted(item_completed_event) => {
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: item_completed_event.item.into(),
|
||||
})
|
||||
}
|
||||
EventMsg::PatchApplyUpdated(event) => {
|
||||
ServerNotification::FileChangePatchUpdated(FileChangePatchUpdatedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.call_id,
|
||||
changes: convert_patch_changes(&event.changes),
|
||||
})
|
||||
}
|
||||
EventMsg::ExecCommandBegin(exec_command_begin_event) => {
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_command_execution_begin_item(&exec_command_begin_event),
|
||||
})
|
||||
}
|
||||
EventMsg::ExecCommandOutputDelta(exec_command_output_delta_event) => {
|
||||
let item_id = exec_command_output_delta_event.call_id;
|
||||
let delta = String::from_utf8_lossy(&exec_command_output_delta_event.chunk).to_string();
|
||||
ServerNotification::CommandExecutionOutputDelta(
|
||||
CommandExecutionOutputDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id,
|
||||
delta,
|
||||
},
|
||||
)
|
||||
}
|
||||
EventMsg::TerminalInteraction(terminal_event) => {
|
||||
ServerNotification::TerminalInteraction(TerminalInteractionNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: terminal_event.call_id,
|
||||
process_id: terminal_event.process_id,
|
||||
stdin: terminal_event.stdin,
|
||||
})
|
||||
}
|
||||
EventMsg::ExecCommandEnd(exec_command_end_event) => {
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_command_execution_end_item(&exec_command_end_event),
|
||||
})
|
||||
}
|
||||
_ => unreachable!("unsupported item event"),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::CollabResumeBeginEvent;
|
||||
use codex_protocol::protocol::CollabResumeEndEvent;
|
||||
use codex_protocol::protocol::ExecCommandOutputDeltaEvent;
|
||||
use codex_protocol::protocol::ExecOutputStream;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn assert_item_started_server_notification(
|
||||
notification: ServerNotification,
|
||||
expected: ItemStartedNotification,
|
||||
) {
|
||||
match notification {
|
||||
ServerNotification::ItemStarted(payload) => assert_eq!(payload, expected),
|
||||
other => panic!("expected item started notification, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_item_completed_server_notification(
|
||||
notification: ServerNotification,
|
||||
expected: ItemCompletedNotification,
|
||||
) {
|
||||
match notification {
|
||||
ServerNotification::ItemCompleted(payload) => assert_eq!(payload, expected),
|
||||
other => panic!("expected item completed notification, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_command_execution_output_delta_server_notification(
|
||||
notification: ServerNotification,
|
||||
expected: CommandExecutionOutputDeltaNotification,
|
||||
) {
|
||||
match notification {
|
||||
ServerNotification::CommandExecutionOutputDelta(payload) => {
|
||||
assert_eq!(payload, expected)
|
||||
}
|
||||
other => panic!("expected command execution output delta, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collab_resume_begin_maps_to_item_started_resume_agent() {
|
||||
let event = CollabResumeBeginEvent {
|
||||
call_id: "call-1".to_string(),
|
||||
sender_thread_id: ThreadId::new(),
|
||||
receiver_thread_id: ThreadId::new(),
|
||||
receiver_agent_nickname: None,
|
||||
receiver_agent_role: None,
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::CollabResumeBegin(event.clone()),
|
||||
"thread-1",
|
||||
"turn-1",
|
||||
);
|
||||
assert_item_started_server_notification(
|
||||
notification,
|
||||
ItemStartedNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
item: ThreadItem::CollabAgentToolCall {
|
||||
id: event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![event.receiver_thread_id.to_string()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collab_resume_end_maps_to_item_completed_resume_agent() {
|
||||
let event = CollabResumeEndEvent {
|
||||
call_id: "call-2".to_string(),
|
||||
sender_thread_id: ThreadId::new(),
|
||||
receiver_thread_id: ThreadId::new(),
|
||||
receiver_agent_nickname: None,
|
||||
receiver_agent_role: None,
|
||||
status: codex_protocol::protocol::AgentStatus::NotFound,
|
||||
};
|
||||
|
||||
let receiver_id = event.receiver_thread_id.to_string();
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::CollabResumeEnd(event.clone()),
|
||||
"thread-2",
|
||||
"turn-2",
|
||||
);
|
||||
assert_item_completed_server_notification(
|
||||
notification,
|
||||
ItemCompletedNotification {
|
||||
thread_id: "thread-2".to_string(),
|
||||
turn_id: "turn-2".to_string(),
|
||||
item: ThreadItem::CollabAgentToolCall {
|
||||
id: event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status: CollabAgentToolCallStatus::Failed,
|
||||
sender_thread_id: event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id.clone()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: [(
|
||||
receiver_id,
|
||||
CollabAgentState::from(codex_protocol::protocol::AgentStatus::NotFound),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exec_command_output_delta_maps_to_command_execution_output_delta() {
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent {
|
||||
call_id: "call-1".to_string(),
|
||||
stream: ExecOutputStream::Stdout,
|
||||
chunk: b"hello".to_vec(),
|
||||
}),
|
||||
"thread-1",
|
||||
"turn-1",
|
||||
);
|
||||
|
||||
assert_command_execution_output_delta_server_notification(
|
||||
notification,
|
||||
CommandExecutionOutputDeltaNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
item_id: "call-1".to_string(),
|
||||
delta: "hello".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
//! Shared builders for synthetic [`ThreadItem`] values emitted by the app-server layer.
|
||||
//! Shared builders for app-server [`ThreadItem`] values derived from compatibility events.
|
||||
//!
|
||||
//! These items do not come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! Instead, the app-server synthesizes them so clients can render a coherent lifecycle for
|
||||
//! approvals and other pre-execution flows before the underlying tool has started or when the
|
||||
//! tool never starts at all.
|
||||
//! Most live tool items now come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! These builders remain for approval flows, rebuilt legacy history, and other pre-execution
|
||||
//! paths where the underlying tool has not started or never starts at all.
|
||||
//!
|
||||
//! Keeping these builders in one place is useful for two reasons:
|
||||
//! - Live notifications and rebuilt `thread/read` history both need to construct the same
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
|
||||
|
||||
pub mod common;
|
||||
pub mod event_mapping;
|
||||
pub mod item_builders;
|
||||
mod mappers;
|
||||
mod serde_helpers;
|
||||
|
||||
@@ -217,7 +217,6 @@ impl ThreadHistoryBuilder {
|
||||
EventMsg::Error(payload) => self.handle_error(payload),
|
||||
EventMsg::TokenCount(_) => {}
|
||||
EventMsg::ThreadRolledBack(payload) => self.handle_thread_rollback(payload),
|
||||
EventMsg::UndoCompleted(_) => {}
|
||||
EventMsg::TurnAborted(payload) => self.handle_turn_aborted(payload),
|
||||
EventMsg::TurnStarted(payload) => self.handle_turn_started(payload),
|
||||
EventMsg::TurnComplete(payload) => self.handle_turn_complete(payload),
|
||||
@@ -357,7 +356,10 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
@@ -378,7 +380,10 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ use std::path::PathBuf;
|
||||
|
||||
use crate::RequestId;
|
||||
use crate::protocol::common::AuthMode;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use codex_experimental_api_macros::ExperimentalApi;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::account::ProviderAccount;
|
||||
@@ -30,6 +31,8 @@ use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::config_types::WebSearchToolConfig;
|
||||
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
|
||||
use codex_protocol::items::McpToolCallError as CoreMcpToolCallError;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::mcp::CallToolResult as CoreMcpCallToolResult;
|
||||
use codex_protocol::mcp::Resource as McpResource;
|
||||
@@ -2782,6 +2785,24 @@ impl From<CoreMcpCallToolResult> for McpServerToolCallResponse {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpCallToolResult> for McpToolCallResult {
|
||||
fn from(result: CoreMcpCallToolResult) -> Self {
|
||||
Self {
|
||||
content: result.content,
|
||||
structured_content: result.structured_content,
|
||||
meta: result.meta,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallError> for McpToolCallError {
|
||||
fn from(error: CoreMcpToolCallError) -> Self {
|
||||
Self {
|
||||
message: error.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -3610,8 +3631,9 @@ pub struct ThreadStartParams {
|
||||
#[experimental("thread/start.experimentalRawEvents")]
|
||||
#[serde(default)]
|
||||
pub experimental_raw_events: bool,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on resume/fork/read.
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
#[experimental("thread/start.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -3738,10 +3760,12 @@ pub struct ThreadResumeParams {
|
||||
/// When true, return only thread metadata and live-resume state without
|
||||
/// populating `thread.turns`. This is useful when the client plans to call
|
||||
/// `thread/turns/list` immediately after resuming.
|
||||
#[experimental("thread/resume.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
#[experimental("thread/resume.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -3842,10 +3866,12 @@ pub struct ThreadForkParams {
|
||||
/// When true, return only thread metadata and live fork state without
|
||||
/// populating `thread.turns`. This is useful when the client plans to call
|
||||
/// `thread/turns/list` immediately after forking.
|
||||
#[experimental("thread/fork.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
#[experimental("thread/fork.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -4607,6 +4633,22 @@ pub struct PluginReadResponse {
|
||||
pub plugin: PluginDetail,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadParams {
|
||||
pub remote_marketplace_name: String,
|
||||
pub remote_plugin_id: String,
|
||||
pub skill_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadResponse {
|
||||
pub contents: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -4633,7 +4675,7 @@ pub struct PluginShareListParams {}
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListResponse {
|
||||
pub data: Vec<PluginSummary>,
|
||||
pub data: Vec<PluginShareListItem>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -4648,6 +4690,15 @@ pub struct PluginShareDeleteParams {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareDeleteResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListItem {
|
||||
pub plugin: PluginSummary,
|
||||
pub share_url: String,
|
||||
pub local_plugin_path: Option<AbsolutePathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(rename_all = "snake_case")]
|
||||
@@ -4825,6 +4876,21 @@ pub enum PluginAuthPolicy {
|
||||
OnUse,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Default, JsonSchema, TS)]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum PluginAvailability {
|
||||
/// Plugin-service currently sends `"ENABLED"` for available remote plugins.
|
||||
/// Codex app-server exposes `"AVAILABLE"` in its API; the alias keeps
|
||||
/// decoding compatible with that upstream response.
|
||||
#[serde(rename = "AVAILABLE", alias = "ENABLED")]
|
||||
#[ts(rename = "AVAILABLE")]
|
||||
#[default]
|
||||
Available,
|
||||
#[serde(rename = "DISABLED_BY_ADMIN")]
|
||||
#[ts(rename = "DISABLED_BY_ADMIN")]
|
||||
DisabledByAdmin,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -4836,6 +4902,9 @@ pub struct PluginSummary {
|
||||
pub enabled: bool,
|
||||
pub install_policy: PluginInstallPolicy,
|
||||
pub auth_policy: PluginAuthPolicy,
|
||||
/// Availability state for installing and using the plugin.
|
||||
#[serde(default)]
|
||||
pub availability: PluginAvailability,
|
||||
pub interface: Option<PluginInterface>,
|
||||
}
|
||||
|
||||
@@ -5293,7 +5362,7 @@ pub struct ThreadRealtimeStartParams {
|
||||
#[ts(optional = nullable)]
|
||||
pub prompt: Option<Option<String>>,
|
||||
#[ts(optional = nullable)]
|
||||
pub session_id: Option<String>,
|
||||
pub realtime_session_id: Option<String>,
|
||||
#[ts(optional = nullable)]
|
||||
pub transport: Option<ThreadRealtimeStartTransport>,
|
||||
#[ts(optional = nullable)]
|
||||
@@ -5383,7 +5452,7 @@ pub struct ThreadRealtimeListVoicesResponse {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ThreadRealtimeStartedNotification {
|
||||
pub thread_id: String,
|
||||
pub session_id: Option<String>,
|
||||
pub realtime_session_id: Option<String>,
|
||||
pub version: RealtimeConversationVersion,
|
||||
}
|
||||
|
||||
@@ -6417,6 +6486,10 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
query: search.query,
|
||||
action: Some(WebSearchAction::from(search.action)),
|
||||
},
|
||||
CoreTurnItem::ImageView(image) => ThreadItem::ImageView {
|
||||
id: image.id,
|
||||
path: image.path,
|
||||
},
|
||||
CoreTurnItem::ImageGeneration(image) => ThreadItem::ImageGeneration {
|
||||
id: image.id,
|
||||
status: image.status,
|
||||
@@ -6424,6 +6497,32 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
result: image.result,
|
||||
saved_path: image.saved_path,
|
||||
},
|
||||
CoreTurnItem::FileChange(file_change) => ThreadItem::FileChange {
|
||||
id: file_change.id,
|
||||
changes: convert_patch_changes(&file_change.changes),
|
||||
status: file_change
|
||||
.status
|
||||
.as_ref()
|
||||
.map(PatchApplyStatus::from)
|
||||
.unwrap_or(PatchApplyStatus::InProgress),
|
||||
},
|
||||
CoreTurnItem::McpToolCall(mcp) => {
|
||||
let duration_ms = mcp
|
||||
.duration
|
||||
.and_then(|duration| i64::try_from(duration.as_millis()).ok());
|
||||
|
||||
ThreadItem::McpToolCall {
|
||||
id: mcp.id,
|
||||
server: mcp.server,
|
||||
tool: mcp.tool,
|
||||
status: McpToolCallStatus::from(mcp.status),
|
||||
arguments: mcp.arguments,
|
||||
mcp_app_resource_uri: mcp.mcp_app_resource_uri,
|
||||
result: mcp.result.map(McpToolCallResult::from).map(Box::new),
|
||||
error: mcp.error.map(McpToolCallError::from),
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
CoreTurnItem::ContextCompaction(compaction) => {
|
||||
ThreadItem::ContextCompaction { id: compaction.id }
|
||||
}
|
||||
@@ -6533,6 +6632,16 @@ impl From<&CorePatchApplyStatus> for PatchApplyStatus {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallStatus> for McpToolCallStatus {
|
||||
fn from(value: CoreMcpToolCallStatus) -> Self {
|
||||
match value {
|
||||
CoreMcpToolCallStatus::InProgress => McpToolCallStatus::InProgress,
|
||||
CoreMcpToolCallStatus::Completed => McpToolCallStatus::Completed,
|
||||
CoreMcpToolCallStatus::Failed => McpToolCallStatus::Failed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -6992,6 +7101,9 @@ pub struct CommandExecOutputDeltaNotification {
|
||||
pub cap_reached: bool,
|
||||
}
|
||||
|
||||
/// Deprecated legacy notification for `apply_patch` textual output.
|
||||
///
|
||||
/// The server no longer emits this notification.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -8030,10 +8142,15 @@ mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::items::AgentMessageContent;
|
||||
use codex_protocol::items::AgentMessageItem;
|
||||
use codex_protocol::items::FileChangeItem;
|
||||
use codex_protocol::items::ImageViewItem;
|
||||
use codex_protocol::items::McpToolCallItem;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::ReasoningItem;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::items::UserMessageItem;
|
||||
use codex_protocol::items::WebSearchItem;
|
||||
use codex_protocol::mcp::CallToolResult;
|
||||
use codex_protocol::models::WebSearchAction as CoreWebSearchAction;
|
||||
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
@@ -8043,6 +8160,7 @@ mod tests {
|
||||
use serde_json::json;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
fn absolute_path_string(path: &str) -> String {
|
||||
let path = format!("/{}", path.trim_start_matches('/'));
|
||||
@@ -10310,6 +10428,111 @@ mod tests {
|
||||
}),
|
||||
}
|
||||
);
|
||||
|
||||
let image_view_item = TurnItem::ImageView(ImageViewItem {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(image_view_item),
|
||||
ThreadItem::ImageView {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
}
|
||||
);
|
||||
|
||||
let file_change_item = TurnItem::FileChange(FileChangeItem {
|
||||
id: "patch-1".to_string(),
|
||||
changes: [(
|
||||
PathBuf::from("README.md"),
|
||||
codex_protocol::protocol::FileChange::Add {
|
||||
content: "hello\n".to_string(),
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
status: Some(codex_protocol::protocol::PatchApplyStatus::Completed),
|
||||
auto_approved: None,
|
||||
stdout: Some("Done!".to_string()),
|
||||
stderr: Some(String::new()),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(file_change_item),
|
||||
ThreadItem::FileChange {
|
||||
id: "patch-1".to_string(),
|
||||
changes: vec![FileUpdateChange {
|
||||
path: "README.md".to_string(),
|
||||
kind: PatchChangeKind::Add,
|
||||
diff: "hello\n".to_string(),
|
||||
}],
|
||||
status: PatchApplyStatus::Completed,
|
||||
}
|
||||
);
|
||||
|
||||
let mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
status: CoreMcpToolCallStatus::InProgress,
|
||||
result: None,
|
||||
error: None,
|
||||
duration: None,
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
}
|
||||
);
|
||||
|
||||
let completed_mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
status: CoreMcpToolCallStatus::Completed,
|
||||
result: Some(CallToolResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
is_error: Some(false),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
}),
|
||||
error: None,
|
||||
duration: Some(Duration::from_millis(42)),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(completed_mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::Completed,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: Some(Box::new(McpToolCallResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
})),
|
||||
error: None,
|
||||
duration_ms: Some(42),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -10644,6 +10867,23 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_skill_read_params_serialization_uses_remote_plugin_id() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginSkillReadParams {
|
||||
remote_marketplace_name: "chatgpt-global".to_string(),
|
||||
remote_plugin_id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
skill_name: "plan-work".to_string(),
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"remoteMarketplaceName": "chatgpt-global",
|
||||
"remotePluginId": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"skillName": "plan-work",
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_params_and_response_serialization_use_camel_case_fields() {
|
||||
let plugin_path = if cfg!(windows) {
|
||||
@@ -10709,36 +10949,74 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_list_response_serializes_plugin_summaries() {
|
||||
fn plugin_share_list_response_serializes_share_items() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginShareListResponse {
|
||||
data: vec![PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
interface: None,
|
||||
data: vec![PluginShareListItem {
|
||||
plugin: PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
availability: PluginAvailability::Available,
|
||||
interface: None,
|
||||
},
|
||||
share_url: "https://chatgpt.example/plugins/share/share-key-1".to_string(),
|
||||
local_plugin_path: None,
|
||||
}],
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"data": [{
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"interface": null,
|
||||
"plugin": {
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"availability": "AVAILABLE",
|
||||
"interface": null,
|
||||
},
|
||||
"shareUrl": "https://chatgpt.example/plugins/share/share-key-1",
|
||||
"localPluginPath": null,
|
||||
}],
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_summary_defaults_missing_availability_to_available() {
|
||||
let summary: PluginSummary = serde_json::from_value(json!({
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"interface": null,
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(summary.availability, PluginAvailability::Available);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_availability_deserializes_enabled_alias() {
|
||||
let availability: PluginAvailability = serde_json::from_value(json!("ENABLED")).unwrap();
|
||||
|
||||
assert_eq!(availability, PluginAvailability::Available);
|
||||
assert_eq!(
|
||||
serde_json::to_value(availability).unwrap(),
|
||||
json!("AVAILABLE")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_uninstall_params_serialization_omits_force_remote_sync() {
|
||||
assert_eq!(
|
||||
|
||||
@@ -48,8 +48,8 @@ use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::LoginAccountResponse;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::PermissionProfileSelectionParams;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxPolicy;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use codex_app_server_protocol::ThreadDecrementElicitationParams;
|
||||
@@ -620,18 +620,11 @@ fn shell_quote(input: &str) -> String {
|
||||
format!("'{}'", input.replace('\'', "'\\''"))
|
||||
}
|
||||
|
||||
fn select_permission_profile(id: &str) -> PermissionProfileSelectionParams {
|
||||
PermissionProfileSelectionParams::Profile {
|
||||
id: id.to_string(),
|
||||
modifications: None,
|
||||
}
|
||||
}
|
||||
|
||||
struct SendMessagePolicies<'a> {
|
||||
command_name: &'static str,
|
||||
experimental_api: bool,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
permission_profile_id: Option<&'static str>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
dynamic_tools: &'a Option<Vec<DynamicToolSpec>>,
|
||||
}
|
||||
|
||||
@@ -649,7 +642,7 @@ async fn send_message(
|
||||
command_name: "send-message",
|
||||
experimental_api: false,
|
||||
approval_policy: None,
|
||||
permission_profile_id: None,
|
||||
sandbox_policy: None,
|
||||
dynamic_tools: &dynamic_tools,
|
||||
},
|
||||
)
|
||||
@@ -692,7 +685,7 @@ async fn send_message_v2_endpoint(
|
||||
command_name: "send-message-v2",
|
||||
experimental_api,
|
||||
approval_policy: None,
|
||||
permission_profile_id: None,
|
||||
sandbox_policy: None,
|
||||
dynamic_tools,
|
||||
},
|
||||
)
|
||||
@@ -748,7 +741,9 @@ async fn trigger_zsh_fork_multi_cmd_approval(
|
||||
..Default::default()
|
||||
};
|
||||
turn_params.approval_policy = Some(AskForApproval::OnRequest);
|
||||
turn_params.permissions = Some(select_permission_profile(":read-only"));
|
||||
turn_params.sandbox_policy = Some(SandboxPolicy::ReadOnly {
|
||||
network_access: false,
|
||||
});
|
||||
|
||||
let turn_response = client.turn_start(turn_params)?;
|
||||
println!("< turn/start response: {turn_response:?}");
|
||||
@@ -887,7 +882,9 @@ async fn trigger_cmd_approval(
|
||||
command_name: "trigger-cmd-approval",
|
||||
experimental_api: true,
|
||||
approval_policy: Some(AskForApproval::OnRequest),
|
||||
permission_profile_id: Some(":read-only"),
|
||||
sandbox_policy: Some(SandboxPolicy::ReadOnly {
|
||||
network_access: false,
|
||||
}),
|
||||
dynamic_tools,
|
||||
},
|
||||
)
|
||||
@@ -911,7 +908,9 @@ async fn trigger_patch_approval(
|
||||
command_name: "trigger-patch-approval",
|
||||
experimental_api: true,
|
||||
approval_policy: Some(AskForApproval::OnRequest),
|
||||
permission_profile_id: Some(":read-only"),
|
||||
sandbox_policy: Some(SandboxPolicy::ReadOnly {
|
||||
network_access: false,
|
||||
}),
|
||||
dynamic_tools,
|
||||
},
|
||||
)
|
||||
@@ -932,7 +931,7 @@ async fn no_trigger_cmd_approval(
|
||||
command_name: "no-trigger-cmd-approval",
|
||||
experimental_api: true,
|
||||
approval_policy: None,
|
||||
permission_profile_id: None,
|
||||
sandbox_policy: None,
|
||||
dynamic_tools,
|
||||
},
|
||||
)
|
||||
@@ -968,9 +967,7 @@ async fn send_message_v2_with_policies(
|
||||
..Default::default()
|
||||
};
|
||||
turn_params.approval_policy = policies.approval_policy;
|
||||
turn_params.permissions = policies
|
||||
.permission_profile_id
|
||||
.map(select_permission_profile);
|
||||
turn_params.sandbox_policy = policies.sandbox_policy;
|
||||
|
||||
let turn_response = client.turn_start(turn_params)?;
|
||||
println!("< turn/start response: {turn_response:?}");
|
||||
@@ -1263,7 +1260,7 @@ fn live_elicitation_timeout_pause(
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
approval_policy: Some(AskForApproval::Never),
|
||||
permissions: Some(select_permission_profile(":danger-no-sandbox")),
|
||||
sandbox_policy: Some(SandboxPolicy::DangerFullAccess),
|
||||
effort: Some(ReasoningEffort::High),
|
||||
cwd: Some(workspace),
|
||||
..Default::default()
|
||||
|
||||
6
codex-rs/app-server-transport/BUILD.bazel
Normal file
6
codex-rs/app-server-transport/BUILD.bazel
Normal file
@@ -0,0 +1,6 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "app-server-transport",
|
||||
crate_name = "codex_app_server_transport",
|
||||
)
|
||||
58
codex-rs/app-server-transport/Cargo.toml
Normal file
58
codex-rs/app-server-transport/Cargo.toml
Normal file
@@ -0,0 +1,58 @@
|
||||
[package]
|
||||
name = "codex-app-server-transport"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_app_server_transport"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
axum = { workspace = true, default-features = false, features = [
|
||||
"http1",
|
||||
"json",
|
||||
"tokio",
|
||||
"ws",
|
||||
] }
|
||||
base64 = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-api = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-model-provider = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-rustls-provider = { workspace = true }
|
||||
constant_time_eq = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
time = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
url = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = { workspace = true }
|
||||
codex-config = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
20
codex-rs/app-server-transport/src/lib.rs
Normal file
20
codex-rs/app-server-transport/src/lib.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
mod outgoing_message;
|
||||
mod transport;
|
||||
|
||||
pub use outgoing_message::ConnectionId;
|
||||
pub use outgoing_message::OutgoingError;
|
||||
pub use outgoing_message::OutgoingMessage;
|
||||
pub use outgoing_message::OutgoingResponse;
|
||||
pub use outgoing_message::QueuedOutgoingMessage;
|
||||
pub use transport::AppServerTransport;
|
||||
pub use transport::AppServerTransportParseError;
|
||||
pub use transport::CHANNEL_CAPACITY;
|
||||
pub use transport::ConnectionOrigin;
|
||||
pub use transport::RemoteControlHandle;
|
||||
pub use transport::TransportEvent;
|
||||
pub use transport::app_server_control_socket_path;
|
||||
pub use transport::auth;
|
||||
pub use transport::start_control_socket_acceptor;
|
||||
pub use transport::start_remote_control;
|
||||
pub use transport::start_stdio_connection;
|
||||
pub use transport::start_websocket_acceptor;
|
||||
58
codex-rs/app-server-transport/src/outgoing_message.rs
Normal file
58
codex-rs/app-server-transport/src/outgoing_message.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use std::fmt;
|
||||
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::Result;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub struct ConnectionId(pub u64);
|
||||
|
||||
impl fmt::Display for ConnectionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing message from the server to the client.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum OutgoingMessage {
|
||||
Request(ServerRequest),
|
||||
/// AppServerNotification is specific to the case where this is run as an
|
||||
/// "app server" as opposed to an MCP server.
|
||||
AppServerNotification(ServerNotification),
|
||||
Response(OutgoingResponse),
|
||||
Error(OutgoingError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingResponse {
|
||||
pub id: RequestId,
|
||||
pub result: Result,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingError {
|
||||
pub error: JSONRPCErrorError,
|
||||
pub id: RequestId,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct QueuedOutgoingMessage {
|
||||
pub message: OutgoingMessage,
|
||||
pub write_complete_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl QueuedOutgoingMessage {
|
||||
pub fn new(message: OutgoingMessage) -> Self {
|
||||
Self {
|
||||
message,
|
||||
write_complete_tx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -86,7 +86,7 @@ pub enum AppServerWebsocketCapabilityTokenSource {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub(crate) struct WebsocketAuthPolicy {
|
||||
pub struct WebsocketAuthPolicy {
|
||||
pub(crate) mode: Option<WebsocketAuthMode>,
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ impl AppServerWebsocketAuthArgs {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn policy_from_settings(
|
||||
pub fn policy_from_settings(
|
||||
settings: &AppServerWebsocketAuthSettings,
|
||||
) -> io::Result<WebsocketAuthPolicy> {
|
||||
let mode = match settings.config.as_ref() {
|
||||
478
codex-rs/app-server-transport/src/transport/mod.rs
Normal file
478
codex-rs/app-server-transport/src/transport/mod.rs
Normal file
@@ -0,0 +1,478 @@
|
||||
pub mod auth;
|
||||
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::OutgoingError;
|
||||
use crate::outgoing_message::OutgoingMessage;
|
||||
use crate::outgoing_message::QueuedOutgoingMessage;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::error;
|
||||
use tracing::warn;
|
||||
|
||||
/// Size of the bounded channels used to communicate between tasks. The value
|
||||
/// is a balance between throughput and memory usage - 128 messages should be
|
||||
/// plenty for an interactive CLI.
|
||||
pub const CHANNEL_CAPACITY: usize = 128;
|
||||
|
||||
mod remote_control;
|
||||
mod stdio;
|
||||
mod unix_socket;
|
||||
#[cfg(test)]
|
||||
mod unix_socket_tests;
|
||||
mod websocket;
|
||||
|
||||
pub use remote_control::RemoteControlHandle;
|
||||
pub use remote_control::start_remote_control;
|
||||
pub use stdio::start_stdio_connection;
|
||||
pub use unix_socket::start_control_socket_acceptor;
|
||||
pub use websocket::start_websocket_acceptor;
|
||||
|
||||
const OVERLOADED_ERROR_CODE: i64 = -32001;
|
||||
|
||||
const APP_SERVER_CONTROL_SOCKET_DIR_NAME: &str = "app-server-control";
|
||||
const APP_SERVER_CONTROL_SOCKET_FILE_NAME: &str = "app-server-control.sock";
|
||||
|
||||
pub fn app_server_control_socket_path(codex_home: &Path) -> std::io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(
|
||||
codex_home
|
||||
.join(APP_SERVER_CONTROL_SOCKET_DIR_NAME)
|
||||
.join(APP_SERVER_CONTROL_SOCKET_FILE_NAME),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum AppServerTransport {
|
||||
Stdio,
|
||||
UnixSocket { socket_path: AbsolutePathBuf },
|
||||
WebSocket { bind_address: SocketAddr },
|
||||
Off,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum AppServerTransportParseError {
|
||||
UnsupportedListenUrl(String),
|
||||
InvalidUnixSocketPath { listen_url: String, message: String },
|
||||
InvalidWebSocketListenUrl(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AppServerTransportParseError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AppServerTransportParseError::UnsupportedListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"unsupported --listen URL `{listen_url}`; expected `stdio://`, `unix://`, `unix://PATH`, `ws://IP:PORT`, or `off`"
|
||||
),
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url,
|
||||
message,
|
||||
} => write!(
|
||||
f,
|
||||
"invalid unix socket --listen URL `{listen_url}`; failed to resolve socket path: {message}"
|
||||
),
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"invalid websocket --listen URL `{listen_url}`; expected `ws://IP:PORT`"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for AppServerTransportParseError {}
|
||||
|
||||
impl AppServerTransport {
|
||||
pub const DEFAULT_LISTEN_URL: &'static str = "stdio://";
|
||||
|
||||
pub fn from_listen_url(listen_url: &str) -> Result<Self, AppServerTransportParseError> {
|
||||
if listen_url == Self::DEFAULT_LISTEN_URL {
|
||||
return Ok(Self::Stdio);
|
||||
}
|
||||
|
||||
if let Some(raw_socket_path) = listen_url.strip_prefix("unix://") {
|
||||
let socket_path = if raw_socket_path.is_empty() {
|
||||
let codex_home = find_codex_home().map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: format!("failed to resolve CODEX_HOME: {err}"),
|
||||
}
|
||||
})?;
|
||||
app_server_control_socket_path(&codex_home).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
} else {
|
||||
AbsolutePathBuf::relative_to_current_dir(raw_socket_path).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
};
|
||||
return Ok(Self::UnixSocket { socket_path });
|
||||
}
|
||||
|
||||
if listen_url == "off" {
|
||||
return Ok(Self::Off);
|
||||
}
|
||||
|
||||
if let Some(socket_addr) = listen_url.strip_prefix("ws://") {
|
||||
let bind_address = socket_addr.parse::<SocketAddr>().map_err(|_| {
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url.to_string())
|
||||
})?;
|
||||
return Ok(Self::WebSocket { bind_address });
|
||||
}
|
||||
|
||||
Err(AppServerTransportParseError::UnsupportedListenUrl(
|
||||
listen_url.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AppServerTransport {
|
||||
type Err = AppServerTransportParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::from_listen_url(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TransportEvent {
|
||||
ConnectionOpened {
|
||||
connection_id: ConnectionId,
|
||||
origin: ConnectionOrigin,
|
||||
writer: mpsc::Sender<QueuedOutgoingMessage>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
},
|
||||
ConnectionClosed {
|
||||
connection_id: ConnectionId,
|
||||
},
|
||||
IncomingMessage {
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConnectionOrigin {
|
||||
Stdio,
|
||||
InProcess,
|
||||
WebSocket,
|
||||
RemoteControl,
|
||||
}
|
||||
|
||||
impl ConnectionOrigin {
|
||||
pub fn allows_device_key_requests(self) -> bool {
|
||||
// Device-key endpoints are only for local connections that own the app-server instance.
|
||||
// Do not include remote transports such as SSH or remote-control websocket connections.
|
||||
matches!(self, Self::Stdio | Self::InProcess)
|
||||
}
|
||||
}
|
||||
|
||||
static CONNECTION_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
fn next_connection_id() -> ConnectionId {
|
||||
ConnectionId(CONNECTION_ID_COUNTER.fetch_add(1, Ordering::Relaxed))
|
||||
}
|
||||
|
||||
async fn forward_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
payload: &str,
|
||||
) -> bool {
|
||||
match serde_json::from_str::<JSONRPCMessage>(payload) {
|
||||
Ok(message) => {
|
||||
enqueue_incoming_message(transport_event_tx, writer, connection_id, message).await
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Failed to deserialize JSONRPCMessage: {err}");
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn enqueue_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
) -> bool {
|
||||
let event = TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message,
|
||||
};
|
||||
match transport_event_tx.try_send(event) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Request(request),
|
||||
})) => {
|
||||
let overload_error = OutgoingMessage::Error(OutgoingError {
|
||||
id: request.id,
|
||||
error: JSONRPCErrorError {
|
||||
code: OVERLOADED_ERROR_CODE,
|
||||
message: "Server overloaded; retry later.".to_string(),
|
||||
data: None,
|
||||
},
|
||||
});
|
||||
match writer.try_send(QueuedOutgoingMessage::new(overload_error)) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(_overload_error)) => {
|
||||
warn!(
|
||||
"dropping overload response for connection {:?}: outbound queue is full",
|
||||
connection_id
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Full(event)) => transport_event_tx.send(event).await.is_ok(),
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_outgoing_message(outgoing_message: OutgoingMessage) -> Option<String> {
|
||||
let value = match serde_json::to_value(outgoing_message) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("Failed to convert OutgoingMessage to JSON value: {err}");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
match serde_json::to_string(&value) {
|
||||
Ok(json) => Some(json),
|
||||
Err(err) => {
|
||||
error!("Failed to serialize JSONRPCMessage: {err}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[test]
|
||||
fn listen_off_parses_as_off_transport() {
|
||||
assert_eq!(
|
||||
AppServerTransport::from_listen_url("off"),
|
||||
Ok(AppServerTransport::Off)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_returns_overload_error_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
assert!(
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request).await
|
||||
);
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should stay queued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let overload = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should receive overload error");
|
||||
let overload_json =
|
||||
serde_json::to_value(overload.message).expect("serialize overload error");
|
||||
assert_eq!(
|
||||
overload_json,
|
||||
json!({
|
||||
"id": 7,
|
||||
"error": {
|
||||
"code": OVERLOADED_ERROR_CODE,
|
||||
"message": "Server overloaded; retry later."
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_response_waits_instead_of_dropping_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, _writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let response = JSONRPCMessage::Response(JSONRPCResponse {
|
||||
id: RequestId::Integer(7),
|
||||
result: json!({"ok": true}),
|
||||
});
|
||||
let transport_event_tx_for_enqueue = transport_event_tx.clone();
|
||||
let writer_tx_for_enqueue = writer_tx.clone();
|
||||
let enqueue_handle = tokio::spawn(async move {
|
||||
enqueue_incoming_message(
|
||||
&transport_event_tx_for_enqueue,
|
||||
&writer_tx_for_enqueue,
|
||||
connection_id,
|
||||
response,
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should be dequeued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let enqueue_result = enqueue_handle.await.expect("enqueue task should not panic");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let forwarded_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("response should be forwarded instead of dropped");
|
||||
match forwarded_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message: JSONRPCMessage::Response(JSONRPCResponse { id, result }),
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(id, RequestId::Integer(7));
|
||||
assert_eq!(result, json!({"ok": true}));
|
||||
}
|
||||
_ => panic!("expected forwarded response message"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_does_not_block_when_writer_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, _transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
}),
|
||||
})
|
||||
.await
|
||||
.expect("transport queue should accept first message");
|
||||
|
||||
writer_tx
|
||||
.send(QueuedOutgoingMessage::new(
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "queued".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
))
|
||||
.await
|
||||
.expect("writer queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
|
||||
let enqueue_result = timeout(
|
||||
Duration::from_millis(100),
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request),
|
||||
)
|
||||
.await
|
||||
.expect("enqueue should not block while writer queue is full");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let queued_outgoing = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("writer queue should still contain original message");
|
||||
let queued_json =
|
||||
serde_json::to_value(queued_outgoing.message).expect("serialize queued message");
|
||||
assert_eq!(
|
||||
queued_json,
|
||||
json!({
|
||||
"method": "configWarning",
|
||||
"params": {
|
||||
"summary": "queued",
|
||||
"details": null,
|
||||
},
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -195,7 +195,7 @@ impl ClientTracker {
|
||||
})
|
||||
.await
|
||||
}
|
||||
ClientEvent::Ack => Ok(()),
|
||||
ClientEvent::ClientMessageChunk { .. } | ClientEvent::Ack { .. } => Ok(()),
|
||||
ClientEvent::Ping => {
|
||||
if let Some(client) = self.clients.get_mut(&client_key) {
|
||||
client.last_activity_at = Instant::now();
|
||||
@@ -1,6 +1,7 @@
|
||||
mod client_tracker;
|
||||
mod enroll;
|
||||
mod protocol;
|
||||
mod segment;
|
||||
mod websocket;
|
||||
|
||||
use crate::transport::remote_control::websocket::RemoteControlChannels;
|
||||
@@ -35,14 +36,14 @@ pub(super) struct QueuedServerEnvelope {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct RemoteControlHandle {
|
||||
pub struct RemoteControlHandle {
|
||||
enabled_tx: Arc<watch::Sender<bool>>,
|
||||
status_tx: Arc<watch::Sender<RemoteControlStatusChangedNotification>>,
|
||||
state_db_available: bool,
|
||||
}
|
||||
|
||||
impl RemoteControlHandle {
|
||||
pub(crate) fn set_enabled(&self, enabled: bool) {
|
||||
pub fn set_enabled(&self, enabled: bool) {
|
||||
let requested_enabled = enabled;
|
||||
let enabled = enabled && self.state_db_available;
|
||||
if requested_enabled && !self.state_db_available {
|
||||
@@ -55,14 +56,12 @@ impl RemoteControlHandle {
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn status_receiver(
|
||||
&self,
|
||||
) -> watch::Receiver<RemoteControlStatusChangedNotification> {
|
||||
pub fn status_receiver(&self) -> watch::Receiver<RemoteControlStatusChangedNotification> {
|
||||
self.status_tx.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn start_remote_control(
|
||||
pub async fn start_remote_control(
|
||||
remote_control_url: String,
|
||||
state_db: Option<Arc<StateRuntime>>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
@@ -121,5 +120,7 @@ pub(crate) async fn start_remote_control(
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod segment_tests;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
@@ -47,10 +47,20 @@ pub enum ClientEvent {
|
||||
ClientMessage {
|
||||
message: JSONRPCMessage,
|
||||
},
|
||||
ClientMessageChunk {
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
message_chunk_base64: String,
|
||||
},
|
||||
/// Backend-generated acknowledgement for all server envelopes addressed to
|
||||
/// `client_id` and `stream_id` whose envelope `seq_id` is less than or equal
|
||||
/// to this ack's `seq_id`. This cursor is stream-scoped.
|
||||
Ack,
|
||||
/// to this ack's `seq_id`. Chunk acknowledgements carry `segment_id` so the
|
||||
/// sender can retain only the still-unacked wire chunks on reconnect.
|
||||
Ack {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
segment_id: Option<usize>,
|
||||
},
|
||||
Ping,
|
||||
ClientClosed,
|
||||
}
|
||||
@@ -85,6 +95,12 @@ pub enum ServerEvent {
|
||||
ServerMessage {
|
||||
message: Box<OutgoingMessage>,
|
||||
},
|
||||
ServerMessageChunk {
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
message_chunk_base64: String,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
Ack,
|
||||
Pong {
|
||||
@@ -92,6 +108,15 @@ pub enum ServerEvent {
|
||||
},
|
||||
}
|
||||
|
||||
impl ServerEvent {
|
||||
pub(crate) fn segment_id(&self) -> Option<usize> {
|
||||
match self {
|
||||
Self::ServerMessageChunk { segment_id, .. } => Some(*segment_id),
|
||||
Self::ServerMessage { .. } | Self::Ack | Self::Pong { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub(crate) struct ServerEnvelope {
|
||||
@@ -0,0 +1,449 @@
|
||||
use super::protocol::ClientEnvelope;
|
||||
use super::protocol::ClientEvent;
|
||||
use super::protocol::ClientId;
|
||||
use super::protocol::ServerEnvelope;
|
||||
use super::protocol::ServerEvent;
|
||||
use super::protocol::StreamId;
|
||||
use base64::DecodeSliceError;
|
||||
use base64::Engine;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Write;
|
||||
use tokio::time::Instant;
|
||||
use tracing::warn;
|
||||
|
||||
pub(super) const REMOTE_CONTROL_SEGMENT_TARGET_BYTES: usize = 100 * 1024;
|
||||
pub(super) const REMOTE_CONTROL_SEGMENT_MAX_BYTES: usize = 150 * 1024;
|
||||
pub(super) const REMOTE_CONTROL_REASSEMBLED_MAX_BYTES: usize = 100 * 1024 * 1024;
|
||||
pub(super) const REMOTE_CONTROL_SEGMENT_COUNT_MAX: usize = 1024;
|
||||
const REMOTE_CONTROL_SEGMENT_ASSEMBLY_MAX_COUNT: usize = 128;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ClientSegmentAssembly {
|
||||
stream_id: StreamId,
|
||||
metadata: ClientSegmentMetadata,
|
||||
raw: Vec<u8>,
|
||||
next_segment_id: usize,
|
||||
last_chunk_seen_at: Instant,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct ClientSegmentMetadata {
|
||||
seq_id: u64,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(super) struct ClientSegmentReassembler {
|
||||
assemblies: HashMap<ClientId, ClientSegmentAssembly>,
|
||||
}
|
||||
|
||||
pub(super) enum ClientSegmentObservation {
|
||||
Forward(Box<ClientEnvelope>),
|
||||
Pending,
|
||||
Dropped,
|
||||
}
|
||||
|
||||
impl ClientSegmentReassembler {
|
||||
pub(super) fn observe(&mut self, envelope: ClientEnvelope) -> ClientSegmentObservation {
|
||||
let ClientEvent::ClientMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64,
|
||||
} = &envelope.event
|
||||
else {
|
||||
return ClientSegmentObservation::Forward(Box::new(envelope));
|
||||
};
|
||||
let segment_id = *segment_id;
|
||||
let segment_count = *segment_count;
|
||||
let message_size_bytes = *message_size_bytes;
|
||||
|
||||
let Some(metadata) = ClientSegmentMetadata::from_envelope(&envelope) else {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope without seq_id"
|
||||
);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
};
|
||||
let Some(stream_id) = envelope.stream_id.clone() else {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope without stream_id"
|
||||
);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
};
|
||||
if self.should_ignore_chunk(&envelope.client_id, &stream_id, metadata.seq_id, segment_id) {
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
if segment_count == 0
|
||||
|| segment_count > REMOTE_CONTROL_SEGMENT_COUNT_MAX
|
||||
|| segment_id >= segment_count
|
||||
|| message_size_bytes == 0
|
||||
|| message_size_bytes > REMOTE_CONTROL_REASSEMBLED_MAX_BYTES
|
||||
|| message_chunk_base64.is_empty()
|
||||
{
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping invalid segmented remote-control client envelope"
|
||||
);
|
||||
self.remove_assembly(&envelope.client_id, &stream_id);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
match self.assemblies.get(&envelope.client_id) {
|
||||
Some(assembly) if assembly.stream_id != stream_id => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"resetting segmented remote-control client envelope after stream change"
|
||||
);
|
||||
self.assemblies.insert(
|
||||
envelope.client_id.clone(),
|
||||
ClientSegmentAssembly {
|
||||
stream_id: stream_id.clone(),
|
||||
metadata: metadata.clone(),
|
||||
raw: Vec::new(),
|
||||
next_segment_id: 0,
|
||||
last_chunk_seen_at: now,
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(_) => {}
|
||||
None => {
|
||||
self.evict_assemblies_if_full();
|
||||
self.assemblies.insert(
|
||||
envelope.client_id.clone(),
|
||||
ClientSegmentAssembly {
|
||||
stream_id: stream_id.clone(),
|
||||
metadata: metadata.clone(),
|
||||
raw: Vec::new(),
|
||||
next_segment_id: 0,
|
||||
last_chunk_seen_at: now,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
let result = {
|
||||
let Some(assembly) = self.assemblies.get_mut(&envelope.client_id) else {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope without assembly"
|
||||
);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
};
|
||||
if metadata.seq_id < assembly.metadata.seq_id {
|
||||
AssemblyUpdate::Ignore
|
||||
} else if assembly.metadata != metadata {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"resetting segmented remote-control client envelope after metadata mismatch"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
} else if segment_id < assembly.next_segment_id {
|
||||
AssemblyUpdate::Pending
|
||||
} else if segment_id != assembly.next_segment_id {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping out-of-order segmented remote-control client envelope"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
} else {
|
||||
assembly.last_chunk_seen_at = now;
|
||||
let chunk_start = assembly.raw.len();
|
||||
let decoded_chunk_len = base64::decoded_len_estimate(message_chunk_base64.len());
|
||||
let chunk_end = usize::min(
|
||||
message_size_bytes,
|
||||
chunk_start.saturating_add(decoded_chunk_len),
|
||||
);
|
||||
assembly.raw.resize(chunk_end, 0);
|
||||
match base64::engine::general_purpose::STANDARD.decode_slice(
|
||||
message_chunk_base64.as_bytes(),
|
||||
&mut assembly.raw[chunk_start..],
|
||||
) {
|
||||
Ok(decoded_chunk_len) => {
|
||||
assembly.raw.truncate(chunk_start + decoded_chunk_len);
|
||||
assembly.next_segment_id += 1;
|
||||
if assembly.next_segment_id < segment_count {
|
||||
AssemblyUpdate::Pending
|
||||
} else if assembly.raw.len() != message_size_bytes {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping reassembled remote-control client envelope with mismatched size"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
} else {
|
||||
match serde_json::from_slice::<JSONRPCMessage>(&assembly.raw) {
|
||||
Ok(message) => AssemblyUpdate::Complete(message),
|
||||
Err(err) => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping invalid reassembled remote-control client envelope: {err}"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(DecodeSliceError::OutputSliceTooSmall) => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope after size overflow"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope with invalid base64: {err}"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
AssemblyUpdate::Pending => ClientSegmentObservation::Pending,
|
||||
AssemblyUpdate::Ignore => ClientSegmentObservation::Dropped,
|
||||
AssemblyUpdate::Drop => {
|
||||
self.remove_assembly(&envelope.client_id, &stream_id);
|
||||
ClientSegmentObservation::Dropped
|
||||
}
|
||||
AssemblyUpdate::Complete(message) => {
|
||||
self.remove_assembly(&envelope.client_id, &stream_id);
|
||||
ClientSegmentObservation::Forward(Box::new(ClientEnvelope {
|
||||
event: ClientEvent::ClientMessage { message },
|
||||
..envelope
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn invalidate_stream(&mut self, client_id: &ClientId, stream_id: &StreamId) {
|
||||
self.remove_assembly(client_id, stream_id);
|
||||
}
|
||||
|
||||
pub(super) fn invalidate_client(&mut self, client_id: &ClientId) {
|
||||
self.assemblies.remove(client_id);
|
||||
}
|
||||
|
||||
pub(super) fn should_ignore_chunk(
|
||||
&self,
|
||||
client_id: &ClientId,
|
||||
stream_id: &StreamId,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
) -> bool {
|
||||
self.assemblies.get(client_id).is_some_and(|assembly| {
|
||||
assembly.stream_id == *stream_id
|
||||
&& (seq_id < assembly.metadata.seq_id
|
||||
|| (seq_id == assembly.metadata.seq_id
|
||||
&& segment_id < assembly.next_segment_id))
|
||||
})
|
||||
}
|
||||
|
||||
fn remove_assembly(&mut self, client_id: &ClientId, stream_id: &StreamId) {
|
||||
if self
|
||||
.assemblies
|
||||
.get(client_id)
|
||||
.is_some_and(|assembly| &assembly.stream_id == stream_id)
|
||||
{
|
||||
self.assemblies.remove(client_id);
|
||||
}
|
||||
}
|
||||
|
||||
fn evict_assemblies_if_full(&mut self) {
|
||||
while self.assemblies.len() >= REMOTE_CONTROL_SEGMENT_ASSEMBLY_MAX_COUNT {
|
||||
let Some(client_id) = self
|
||||
.assemblies
|
||||
.iter()
|
||||
.min_by_key(|(_, assembly)| assembly.last_chunk_seen_at)
|
||||
.map(|(client_id, _)| client_id.clone())
|
||||
else {
|
||||
return;
|
||||
};
|
||||
self.assemblies.remove(&client_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum AssemblyUpdate {
|
||||
Pending,
|
||||
Ignore,
|
||||
Drop,
|
||||
Complete(JSONRPCMessage),
|
||||
}
|
||||
|
||||
impl ClientSegmentMetadata {
|
||||
fn from_envelope(envelope: &ClientEnvelope) -> Option<Self> {
|
||||
let ClientEvent::ClientMessageChunk {
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
..
|
||||
} = &envelope.event
|
||||
else {
|
||||
return None;
|
||||
};
|
||||
Some(Self {
|
||||
seq_id: envelope.seq_id?,
|
||||
segment_count: *segment_count,
|
||||
message_size_bytes: *message_size_bytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn split_server_envelope_for_transport(
|
||||
envelope: ServerEnvelope,
|
||||
) -> io::Result<Vec<ServerEnvelope>> {
|
||||
if !matches!(envelope.event, ServerEvent::ServerMessage { .. }) {
|
||||
return Ok(vec![envelope]);
|
||||
}
|
||||
|
||||
let envelope_size_bytes = serialized_len(&envelope)?;
|
||||
if envelope_size_bytes <= REMOTE_CONTROL_SEGMENT_MAX_BYTES {
|
||||
return Ok(vec![envelope]);
|
||||
}
|
||||
|
||||
let ServerEvent::ServerMessage { message } = envelope.event.clone() else {
|
||||
unreachable!("server message variant checked above");
|
||||
};
|
||||
let raw = serde_json::to_vec(message.as_ref()).map_err(io::Error::other)?;
|
||||
let message_size_bytes = raw.len();
|
||||
if message_size_bytes > REMOTE_CONTROL_REASSEMBLED_MAX_BYTES {
|
||||
warn!("dropping remote-control server envelope that exceeds reassembled size limit");
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let minimal_segment_count =
|
||||
usize::min(message_size_bytes.max(1), REMOTE_CONTROL_SEGMENT_COUNT_MAX);
|
||||
let minimal_chunk = &raw[..usize::min(raw.len(), 1)];
|
||||
if serialized_chunk_len(
|
||||
&envelope,
|
||||
/*segment_id*/ 0,
|
||||
minimal_segment_count,
|
||||
message_size_bytes,
|
||||
minimal_chunk,
|
||||
)? > REMOTE_CONTROL_SEGMENT_MAX_BYTES
|
||||
{
|
||||
warn!("dropping remote-control server envelope that cannot fit within segment size limit");
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut segment_count = usize::max(
|
||||
2,
|
||||
message_size_bytes.div_ceil(REMOTE_CONTROL_SEGMENT_TARGET_BYTES),
|
||||
);
|
||||
loop {
|
||||
let chunk_size = usize::max(1, message_size_bytes.div_ceil(segment_count));
|
||||
segment_count = message_size_bytes.div_ceil(chunk_size);
|
||||
let segments_fit = raw
|
||||
.chunks(chunk_size)
|
||||
.enumerate()
|
||||
.all(|(segment_id, chunk)| {
|
||||
serialized_chunk_len(
|
||||
&envelope,
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
chunk,
|
||||
)
|
||||
.is_ok_and(|size| size <= REMOTE_CONTROL_SEGMENT_MAX_BYTES)
|
||||
});
|
||||
if segments_fit {
|
||||
return raw
|
||||
.chunks(chunk_size)
|
||||
.enumerate()
|
||||
.map(|(segment_id, chunk)| {
|
||||
build_chunk_envelope(
|
||||
&envelope,
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
chunk,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
if chunk_size == 1 {
|
||||
warn!(
|
||||
"dropping remote-control server envelope that cannot fit within segment size limit"
|
||||
);
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let next_segment_count = segment_count + 1;
|
||||
let next_chunk_size = usize::max(1, message_size_bytes.div_ceil(next_segment_count));
|
||||
segment_count = if next_chunk_size == chunk_size {
|
||||
message_size_bytes
|
||||
} else {
|
||||
next_segment_count
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn serialized_chunk_len(
|
||||
envelope: &ServerEnvelope,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> io::Result<usize> {
|
||||
serialized_len(&build_chunk_envelope(
|
||||
envelope,
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
chunk,
|
||||
)?)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct CountingWriter {
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Write for CountingWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.len += buf.len();
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn serialized_len(value: &impl serde::Serialize) -> io::Result<usize> {
|
||||
let mut writer = CountingWriter::default();
|
||||
serde_json::to_writer(&mut writer, value).map_err(io::Error::other)?;
|
||||
Ok(writer.len)
|
||||
}
|
||||
|
||||
fn build_chunk_envelope(
|
||||
envelope: &ServerEnvelope,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> io::Result<ServerEnvelope> {
|
||||
if segment_count > REMOTE_CONTROL_SEGMENT_COUNT_MAX {
|
||||
return Err(io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
"remote-control segment count exceeds maximum",
|
||||
));
|
||||
}
|
||||
Ok(ServerEnvelope {
|
||||
event: ServerEvent::ServerMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64: base64::engine::general_purpose::STANDARD.encode(chunk),
|
||||
},
|
||||
client_id: envelope.client_id.clone(),
|
||||
stream_id: envelope.stream_id.clone(),
|
||||
seq_id: envelope.seq_id,
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,386 @@
|
||||
use super::protocol::ClientEnvelope;
|
||||
use super::protocol::ClientEvent;
|
||||
use super::protocol::ClientId;
|
||||
use super::protocol::ServerEnvelope;
|
||||
use super::protocol::ServerEvent;
|
||||
use super::protocol::StreamId;
|
||||
use super::segment::ClientSegmentObservation;
|
||||
use super::segment::ClientSegmentReassembler;
|
||||
use super::segment::REMOTE_CONTROL_SEGMENT_MAX_BYTES;
|
||||
use super::segment::split_server_envelope_for_transport;
|
||||
use crate::outgoing_message::OutgoingMessage;
|
||||
use base64::Engine;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn reassembles_client_message_chunks() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
let reassembled = match reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id,
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)) {
|
||||
ClientSegmentObservation::Forward(reassembled) => *reassembled,
|
||||
ClientSegmentObservation::Pending | ClientSegmentObservation::Dropped => {
|
||||
panic!("message should reassemble")
|
||||
}
|
||||
};
|
||||
assert_eq!(reassembled.client_id, client_id);
|
||||
assert_eq!(
|
||||
reassembled.stream_id,
|
||||
Some(StreamId("stream-1".to_string()))
|
||||
);
|
||||
assert_eq!(reassembled.seq_id, Some(7));
|
||||
assert_eq!(reassembled.cursor, None);
|
||||
match reassembled.event {
|
||||
ClientEvent::ClientMessage {
|
||||
message: reassembled_message,
|
||||
} => assert_eq!(reassembled_message, message),
|
||||
other => panic!("expected client message, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn splits_large_server_messages_into_wire_chunks() {
|
||||
let envelope = ServerEnvelope {
|
||||
event: ServerEvent::ServerMessage {
|
||||
message: Box::new(OutgoingMessage::AppServerNotification(
|
||||
ServerNotification::ConfigWarning(ConfigWarningNotification {
|
||||
summary: "x".repeat(REMOTE_CONTROL_SEGMENT_MAX_BYTES),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
}),
|
||||
)),
|
||||
},
|
||||
client_id: ClientId("client-1".to_string()),
|
||||
stream_id: StreamId("stream-1".to_string()),
|
||||
seq_id: 9,
|
||||
};
|
||||
|
||||
let segments = split_server_envelope_for_transport(envelope).expect("split should succeed");
|
||||
|
||||
assert!(segments.len() > 1);
|
||||
assert!(
|
||||
segments
|
||||
.iter()
|
||||
.all(|segment| matches!(segment.event, ServerEvent::ServerMessageChunk { .. }))
|
||||
);
|
||||
assert!(segments.iter().all(|segment| segment.seq_id == 9));
|
||||
assert!(segments.iter().all(|segment| {
|
||||
serde_json::to_vec(segment)
|
||||
.expect("segment should serialize")
|
||||
.len()
|
||||
<= REMOTE_CONTROL_SEGMENT_MAX_BYTES
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidates_incomplete_stream_assemblies() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(stream_id.clone()),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
reassembler.invalidate_stream(&client_id, &stream_id);
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
Some(stream_id),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resets_incomplete_client_assembly_when_stream_changes() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let first_stream_id = StreamId("stream-1".to_string());
|
||||
let second_stream_id = StreamId("stream-2".to_string());
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(first_stream_id.clone()),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(second_stream_id.clone()),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
let reassembled = match reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(second_stream_id),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)) {
|
||||
ClientSegmentObservation::Forward(reassembled) => *reassembled,
|
||||
ClientSegmentObservation::Pending | ClientSegmentObservation::Dropped => {
|
||||
panic!("replacement stream should reassemble")
|
||||
}
|
||||
};
|
||||
assert_eq!(
|
||||
reassembled.stream_id,
|
||||
Some(StreamId("stream-2".to_string()))
|
||||
);
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
Some(first_stream_id),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_stale_chunks_without_dropping_newer_assembly() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
stream_id,
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_invalid_stale_chunks_without_dropping_newer_assembly() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
b"",
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
stream_id,
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_invalid_duplicate_chunks_without_dropping_current_assembly() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
b"",
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
stream_id,
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
fn chunk_envelope(
|
||||
client_id: ClientId,
|
||||
stream_id: Option<StreamId>,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> ClientEnvelope {
|
||||
ClientEnvelope {
|
||||
event: ClientEvent::ClientMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64: base64::engine::general_purpose::STANDARD.encode(chunk),
|
||||
},
|
||||
client_id,
|
||||
stream_id,
|
||||
seq_id: Some(seq_id),
|
||||
cursor: None,
|
||||
}
|
||||
}
|
||||
@@ -831,7 +831,7 @@ async fn remote_control_transport_clears_outgoing_buffer_when_backend_acks() {
|
||||
send_client_event(
|
||||
&mut first_websocket,
|
||||
ClientEnvelope {
|
||||
event: ClientEvent::Ack,
|
||||
event: ClientEvent::Ack { segment_id: None },
|
||||
client_id: client_id.clone(),
|
||||
stream_id: Some(stream_id),
|
||||
seq_id: Some(1),
|
||||
@@ -15,6 +15,10 @@ use super::protocol::ClientId;
|
||||
use super::protocol::RemoteControlTarget;
|
||||
use super::protocol::ServerEnvelope;
|
||||
use super::protocol::StreamId;
|
||||
use super::segment::ClientSegmentObservation;
|
||||
use super::segment::ClientSegmentReassembler;
|
||||
use super::segment::REMOTE_CONTROL_SEGMENT_MAX_BYTES;
|
||||
use super::segment::split_server_envelope_for_transport;
|
||||
use axum::http::HeaderValue;
|
||||
use base64::Engine;
|
||||
use codex_app_server_protocol::RemoteControlConnectionStatus;
|
||||
@@ -49,7 +53,7 @@ use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
|
||||
pub(super) const REMOTE_CONTROL_PROTOCOL_VERSION: &str = "2";
|
||||
pub(super) const REMOTE_CONTROL_PROTOCOL_VERSION: &str = "3";
|
||||
pub(super) const REMOTE_CONTROL_ACCOUNT_ID_HEADER: &str = "chatgpt-account-id";
|
||||
const REMOTE_CONTROL_SUBSCRIBE_CURSOR_HEADER: &str = "x-codex-subscribe-cursor";
|
||||
const REMOTE_CONTROL_WEBSOCKET_PING_INTERVAL: std::time::Duration =
|
||||
@@ -85,17 +89,29 @@ impl BoundedOutboundBuffer {
|
||||
self.used_tx.send_modify(|used| *used += 1);
|
||||
}
|
||||
|
||||
fn ack(&mut self, client_id: &ClientId, stream_id: &StreamId, acked_seq_id: u64) {
|
||||
fn ack(
|
||||
&mut self,
|
||||
client_id: &ClientId,
|
||||
stream_id: &StreamId,
|
||||
acked_seq_id: u64,
|
||||
acked_segment_id: Option<usize>,
|
||||
) {
|
||||
let key = (client_id.clone(), stream_id.clone());
|
||||
let Some(buffer) = self.buffer_by_stream.get_mut(&key) else {
|
||||
return;
|
||||
};
|
||||
while let Some(server_envelope) = buffer.front()
|
||||
&& server_envelope.seq_id <= acked_seq_id
|
||||
{
|
||||
buffer.pop_front();
|
||||
self.used_tx.send_modify(|used| *used -= 1);
|
||||
}
|
||||
let acked_cursor = (acked_seq_id, acked_segment_id.unwrap_or(usize::MAX));
|
||||
buffer.retain(|server_envelope| {
|
||||
let envelope_cursor = (
|
||||
server_envelope.seq_id,
|
||||
server_envelope.event.segment_id().unwrap_or_default(),
|
||||
);
|
||||
let is_acked = envelope_cursor <= acked_cursor;
|
||||
if is_acked {
|
||||
self.used_tx.send_modify(|used| *used -= 1);
|
||||
}
|
||||
!is_acked
|
||||
});
|
||||
if buffer.is_empty() {
|
||||
self.buffer_by_stream.remove(&key);
|
||||
}
|
||||
@@ -112,6 +128,88 @@ struct WebsocketState {
|
||||
outbound_buffer: BoundedOutboundBuffer,
|
||||
subscribe_cursor: Option<String>,
|
||||
next_seq_id_by_stream: HashMap<(ClientId, StreamId), u64>,
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap<(ClientId, Option<StreamId>), u64>,
|
||||
client_segment_reassembler: ClientSegmentReassembler,
|
||||
}
|
||||
|
||||
impl WebsocketState {
|
||||
fn observe_client_message(
|
||||
&mut self,
|
||||
client_envelope: ClientEnvelope,
|
||||
wire_size_bytes: usize,
|
||||
) -> ClientSegmentObservation {
|
||||
let client_message_key = Self::client_message_key(&client_envelope);
|
||||
if let Some((key, seq_id)) = client_message_key.as_ref()
|
||||
&& self
|
||||
.last_completed_client_chunk_seq_id_by_stream
|
||||
.get(key)
|
||||
.is_some_and(|last_seq_id| last_seq_id >= seq_id)
|
||||
{
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
if let (
|
||||
Some((_, seq_id)),
|
||||
Some(stream_id),
|
||||
ClientEvent::ClientMessageChunk { segment_id, .. },
|
||||
) = (
|
||||
client_message_key.as_ref(),
|
||||
client_envelope.stream_id.as_ref(),
|
||||
&client_envelope.event,
|
||||
) && self.client_segment_reassembler.should_ignore_chunk(
|
||||
&client_envelope.client_id,
|
||||
stream_id,
|
||||
*seq_id,
|
||||
*segment_id,
|
||||
) {
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
if client_message_key.is_some() && wire_size_bytes > REMOTE_CONTROL_SEGMENT_MAX_BYTES {
|
||||
warn!(
|
||||
client_id = client_envelope.client_id.0.as_str(),
|
||||
"dropping oversized segmented remote-control client envelope"
|
||||
);
|
||||
if let Some(stream_id) = client_envelope.stream_id.as_ref() {
|
||||
self.client_segment_reassembler
|
||||
.invalidate_stream(&client_envelope.client_id, stream_id);
|
||||
}
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
|
||||
let observation = self.client_segment_reassembler.observe(client_envelope);
|
||||
if matches!(observation, ClientSegmentObservation::Forward(_))
|
||||
&& let Some((key, seq_id)) = client_message_key
|
||||
{
|
||||
self.last_completed_client_chunk_seq_id_by_stream
|
||||
.insert(key, seq_id);
|
||||
}
|
||||
observation
|
||||
}
|
||||
|
||||
fn invalidate_client_message_stream(&mut self, client_id: &ClientId, stream_id: &StreamId) {
|
||||
self.last_completed_client_chunk_seq_id_by_stream
|
||||
.remove(&(client_id.clone(), Some(stream_id.clone())));
|
||||
}
|
||||
|
||||
fn invalidate_client_message_client(&mut self, client_id: &ClientId) {
|
||||
self.last_completed_client_chunk_seq_id_by_stream
|
||||
.retain(|(cursor_client_id, _), _| cursor_client_id != client_id);
|
||||
}
|
||||
|
||||
fn client_message_key(
|
||||
client_envelope: &ClientEnvelope,
|
||||
) -> Option<((ClientId, Option<StreamId>), u64)> {
|
||||
let seq_id = match (&client_envelope.event, client_envelope.seq_id) {
|
||||
(ClientEvent::ClientMessageChunk { .. }, Some(seq_id)) => seq_id,
|
||||
_ => return None,
|
||||
};
|
||||
Some((
|
||||
(
|
||||
client_envelope.client_id.clone(),
|
||||
client_envelope.stream_id.clone(),
|
||||
),
|
||||
seq_id,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RemoteControlWebsocket {
|
||||
@@ -231,6 +329,8 @@ impl RemoteControlWebsocket {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
})),
|
||||
server_event_rx: Arc::new(Mutex::new(server_event_rx)),
|
||||
used_rx,
|
||||
@@ -556,7 +656,7 @@ impl RemoteControlWebsocket {
|
||||
}
|
||||
}
|
||||
};
|
||||
let (payload, write_complete_tx) = {
|
||||
let (payloads, write_complete_tx) = {
|
||||
let mut state = state.lock().await;
|
||||
let seq_key = (
|
||||
queued_server_envelope.client_id.clone(),
|
||||
@@ -573,29 +673,42 @@ impl RemoteControlWebsocket {
|
||||
seq_id,
|
||||
stream_id: queued_server_envelope.stream_id,
|
||||
};
|
||||
let payload = match serde_json::to_string(&server_envelope) {
|
||||
Ok(payload) => payload,
|
||||
let server_envelopes = match split_server_envelope_for_transport(server_envelope) {
|
||||
Ok(server_envelopes) => server_envelopes,
|
||||
Err(err) => {
|
||||
error!("failed to serialize remote-control server event: {err}");
|
||||
error!("failed to split remote-control server event: {err}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let mut payloads = Vec::with_capacity(server_envelopes.len());
|
||||
for server_envelope in server_envelopes {
|
||||
let payload = match serde_json::to_string(&server_envelope) {
|
||||
Ok(payload) => payload,
|
||||
Err(err) => {
|
||||
error!("failed to serialize remote-control server event: {err}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
state.outbound_buffer.insert(&server_envelope);
|
||||
payloads.push(payload);
|
||||
}
|
||||
state
|
||||
.next_seq_id_by_stream
|
||||
.insert(seq_key, seq_id.saturating_add(1));
|
||||
state.outbound_buffer.insert(&server_envelope);
|
||||
|
||||
(payload, queued_server_envelope.write_complete_tx)
|
||||
(payloads, queued_server_envelope.write_complete_tx)
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = shutdown_token.cancelled() => return Ok(()),
|
||||
send_result = websocket_writer.send(tungstenite::Message::Text(payload.into())) => {
|
||||
if let Err(err) = send_result {
|
||||
return Err(io::Error::other(err));
|
||||
for payload in payloads {
|
||||
tokio::select! {
|
||||
_ = shutdown_token.cancelled() => return Ok(()),
|
||||
send_result = websocket_writer.send(tungstenite::Message::Text(payload.into())) => {
|
||||
if let Err(err) = send_result {
|
||||
return Err(io::Error::other(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
if let Some(write_complete_tx) = write_complete_tx {
|
||||
let _ = write_complete_tx.send(());
|
||||
}
|
||||
@@ -657,11 +770,30 @@ impl RemoteControlWebsocket {
|
||||
if client_tracker.close_client(&client_key).await.is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
state
|
||||
.lock()
|
||||
.await
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_key.0, &client_key.1);
|
||||
state
|
||||
.lock()
|
||||
.await
|
||||
.invalidate_client_message_stream(&client_key.0, &client_key.1);
|
||||
continue;
|
||||
}
|
||||
_ = idle_sweep_interval.tick() => {
|
||||
if client_tracker.close_expired_clients().await.is_err() {
|
||||
return Ok(());
|
||||
match client_tracker.close_expired_clients().await {
|
||||
Ok(client_keys) => {
|
||||
let mut websocket_state = state.lock().await;
|
||||
for (client_id, stream_id) in client_keys {
|
||||
websocket_state
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_id, &stream_id);
|
||||
websocket_state
|
||||
.invalidate_client_message_stream(&client_id, &stream_id);
|
||||
}
|
||||
}
|
||||
Err(_) => return Ok(()),
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -672,10 +804,11 @@ impl RemoteControlWebsocket {
|
||||
}
|
||||
}
|
||||
};
|
||||
let client_envelope = match incoming_message {
|
||||
let (client_envelope, wire_size_bytes) = match incoming_message {
|
||||
Ok(tungstenite::Message::Text(text)) => {
|
||||
let wire_size_bytes = text.len();
|
||||
match serde_json::from_str::<ClientEnvelope>(&text) {
|
||||
Ok(client_envelope) => client_envelope,
|
||||
Ok(client_envelope) => (client_envelope, wire_size_bytes),
|
||||
Err(err) => {
|
||||
warn!("failed to deserialize remote-control client event: {err}");
|
||||
continue;
|
||||
@@ -707,12 +840,21 @@ impl RemoteControlWebsocket {
|
||||
}
|
||||
};
|
||||
|
||||
let observation = {
|
||||
let mut websocket_state = state.lock().await;
|
||||
websocket_state.observe_client_message(client_envelope, wire_size_bytes)
|
||||
};
|
||||
let client_envelope = match observation {
|
||||
ClientSegmentObservation::Forward(client_envelope) => *client_envelope,
|
||||
ClientSegmentObservation::Pending | ClientSegmentObservation::Dropped => continue,
|
||||
};
|
||||
|
||||
{
|
||||
let mut websocket_state = state.lock().await;
|
||||
if let Some(cursor) = client_envelope.cursor.as_deref() {
|
||||
websocket_state.subscribe_cursor = Some(cursor.to_string());
|
||||
}
|
||||
if let ClientEvent::Ack = &client_envelope.event
|
||||
if let ClientEvent::Ack { segment_id } = &client_envelope.event
|
||||
&& let Some(acked_seq_id) = client_envelope.seq_id
|
||||
&& let Some(stream_id) = client_envelope.stream_id.as_ref()
|
||||
{
|
||||
@@ -720,10 +862,18 @@ impl RemoteControlWebsocket {
|
||||
&client_envelope.client_id,
|
||||
stream_id,
|
||||
acked_seq_id,
|
||||
*segment_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let closed_client =
|
||||
matches!(&client_envelope.event, ClientEvent::ClientClosed).then(|| {
|
||||
(
|
||||
client_envelope.client_id.clone(),
|
||||
client_envelope.stream_id.clone(),
|
||||
)
|
||||
});
|
||||
if client_tracker
|
||||
.handle_message(client_envelope)
|
||||
.await
|
||||
@@ -731,6 +881,20 @@ impl RemoteControlWebsocket {
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
if let Some((client_id, stream_id)) = closed_client {
|
||||
let mut websocket_state = state.lock().await;
|
||||
if let Some(stream_id) = stream_id {
|
||||
websocket_state
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_id, &stream_id);
|
||||
websocket_state.invalidate_client_message_stream(&client_id, &stream_id);
|
||||
} else {
|
||||
websocket_state
|
||||
.client_segment_reassembler
|
||||
.invalidate_client(&client_id);
|
||||
websocket_state.invalidate_client_message_client(&client_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1052,6 +1216,8 @@ mod tests {
|
||||
use chrono::Utc;
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_config::types::AuthCredentialsStoreMode;
|
||||
use codex_core::test_support::auth_manager_from_auth;
|
||||
@@ -1603,6 +1769,8 @@ mod tests {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
}));
|
||||
let (_server_event_tx, server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY);
|
||||
let server_event_rx = Arc::new(Mutex::new(server_event_rx));
|
||||
@@ -1639,6 +1807,8 @@ mod tests {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
}));
|
||||
let (server_event_tx, server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY);
|
||||
let server_event_rx = Arc::new(Mutex::new(server_event_rx));
|
||||
@@ -1716,6 +1886,8 @@ mod tests {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
}));
|
||||
let (server_event_tx, _server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY);
|
||||
let (transport_event_tx, _transport_event_rx) =
|
||||
@@ -1771,7 +1943,9 @@ mod tests {
|
||||
"first-client-new-stream",
|
||||
));
|
||||
|
||||
outbound_buffer.ack(&client_1, &stream_1, /*acked_seq_id*/ 3);
|
||||
outbound_buffer.ack(
|
||||
&client_1, &stream_1, /*acked_seq_id*/ 3, /*acked_segment_id*/ None,
|
||||
);
|
||||
|
||||
let mut retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
@@ -1814,7 +1988,9 @@ mod tests {
|
||||
&client_2, "stream-1", /*seq_id*/ 3, "second",
|
||||
));
|
||||
|
||||
outbound_buffer.ack(&client_1, &stream_1, /*acked_seq_id*/ 1);
|
||||
outbound_buffer.ack(
|
||||
&client_1, &stream_1, /*acked_seq_id*/ 1, /*acked_segment_id*/ None,
|
||||
);
|
||||
|
||||
let mut retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
@@ -1834,6 +2010,390 @@ mod tests {
|
||||
assert_eq!(*used_rx.borrow(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn outbound_buffer_advances_segmented_acks_by_wire_cursor() {
|
||||
let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new();
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
));
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
));
|
||||
|
||||
outbound_buffer.ack(
|
||||
&client_id,
|
||||
&stream_id,
|
||||
/*acked_seq_id*/ 4,
|
||||
/*acked_segment_id*/ Some(1),
|
||||
);
|
||||
|
||||
let retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
.map(|server_envelope| server_envelope.event.segment_id())
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(retained, Vec::<Option<usize>>::new());
|
||||
assert_eq!(*used_rx.borrow(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn outbound_buffer_treats_segmentless_acks_as_seq_level_acks() {
|
||||
let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new();
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
));
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
));
|
||||
|
||||
outbound_buffer.ack(
|
||||
&client_id, &stream_id, /*acked_seq_id*/ 4, /*acked_segment_id*/ None,
|
||||
);
|
||||
|
||||
let retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
.map(|server_envelope| server_envelope.event.segment_id())
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(retained, Vec::<Option<usize>>::new());
|
||||
assert_eq!(*used_rx.borrow(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_drops_duplicate_client_chunks_while_pending() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"y",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_drops_replayed_client_chunks_after_completion() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 4,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 4,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_allows_replay_after_rejected_out_of_order_chunk() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"y",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_allows_replay_after_later_chunk_drops() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
);
|
||||
let invalid_second_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, invalid_second_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_drops_oversized_client_chunk_frames() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 1, /*message_size_bytes*/ 1, b"x",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
state.observe_client_message(chunk, REMOTE_CONTROL_SEGMENT_MAX_BYTES + 1),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_ignores_oversized_stale_chunks_without_dropping_newer_assembly() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let first_newer_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let oversized_stale_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let second_newer_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_newer_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
state.observe_client_message(
|
||||
oversized_stale_chunk,
|
||||
REMOTE_CONTROL_SEGMENT_MAX_BYTES + 1,
|
||||
),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_newer_chunk),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_ignores_oversized_duplicate_chunks_without_dropping_current_assembly() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let oversized_duplicate_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
state.observe_client_message(
|
||||
oversized_duplicate_chunk,
|
||||
REMOTE_CONTROL_SEGMENT_MAX_BYTES + 1,
|
||||
),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_clears_chunk_cursor_when_stream_is_invalidated() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(
|
||||
&mut state,
|
||||
client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
)
|
||||
),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
state.invalidate_client_message_stream(&client_id, &stream_id);
|
||||
state
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_id, &stream_id);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(
|
||||
&mut state,
|
||||
client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 1, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
)
|
||||
),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
fn server_envelope(
|
||||
client_id: &ClientId,
|
||||
stream_id: &str,
|
||||
@@ -1857,6 +2417,58 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn server_chunk_envelope(
|
||||
client_id: &ClientId,
|
||||
stream_id: &str,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
) -> ServerEnvelope {
|
||||
ServerEnvelope {
|
||||
event: ServerEvent::ServerMessageChunk {
|
||||
segment_id,
|
||||
segment_count: 2,
|
||||
message_size_bytes: 2,
|
||||
message_chunk_base64: String::new(),
|
||||
},
|
||||
client_id: client_id.clone(),
|
||||
stream_id: StreamId(stream_id.to_string()),
|
||||
seq_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn client_chunk_envelope(
|
||||
client_id: &str,
|
||||
stream_id: &str,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> ClientEnvelope {
|
||||
ClientEnvelope {
|
||||
event: ClientEvent::ClientMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64: base64::engine::general_purpose::STANDARD.encode(chunk),
|
||||
},
|
||||
client_id: ClientId(client_id.to_string()),
|
||||
stream_id: Some(StreamId(stream_id.to_string())),
|
||||
seq_id: Some(seq_id),
|
||||
cursor: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn observe_client_message(
|
||||
state: &mut WebsocketState,
|
||||
envelope: ClientEnvelope,
|
||||
) -> ClientSegmentObservation {
|
||||
let wire_size_bytes = serde_json::to_vec(&envelope)
|
||||
.expect("client envelope should serialize")
|
||||
.len();
|
||||
state.observe_client_message(envelope, wire_size_bytes)
|
||||
}
|
||||
|
||||
async fn accept_http_request(listener: &TcpListener) -> (TcpStream, String) {
|
||||
let (stream, _) = timeout(TEST_HTTP_ACCEPT_TIMEOUT, listener.accept())
|
||||
.await
|
||||
@@ -21,7 +21,7 @@ use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
|
||||
pub(crate) async fn start_stdio_connection(
|
||||
pub async fn start_stdio_connection(
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
stdio_handles: &mut Vec<JoinHandle<()>>,
|
||||
initialize_client_name_tx: oneshot::Sender<String>,
|
||||
@@ -20,7 +20,7 @@ use tracing::warn;
|
||||
#[cfg(unix)]
|
||||
const CONTROL_SOCKET_MODE: u32 = 0o600;
|
||||
|
||||
pub(crate) async fn start_control_socket_acceptor(
|
||||
pub async fn start_control_socket_acceptor(
|
||||
socket_path: AbsolutePathBuf,
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
shutdown_token: CancellationToken,
|
||||
@@ -128,7 +128,7 @@ async fn websocket_upgrade_handler(
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn start_websocket_acceptor(
|
||||
pub async fn start_websocket_acceptor(
|
||||
bind_address: SocketAddr,
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
shutdown_token: CancellationToken,
|
||||
@@ -30,7 +30,6 @@ axum = { workspace = true, default-features = false, features = [
|
||||
"ws",
|
||||
] }
|
||||
codex-analytics = { workspace = true }
|
||||
codex-api = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-cloud-requirements = { workspace = true }
|
||||
codex-config = { workspace = true }
|
||||
@@ -44,6 +43,7 @@ codex-features = { workspace = true }
|
||||
codex-git-utils = { workspace = true }
|
||||
codex-hooks = { workspace = true }
|
||||
codex-otel = { workspace = true }
|
||||
codex-plugin = { workspace = true }
|
||||
codex-shell-command = { workspace = true }
|
||||
codex-utils-cli = { workspace = true }
|
||||
codex-utils-pty = { workspace = true }
|
||||
@@ -57,6 +57,7 @@ codex-model-provider = { workspace = true }
|
||||
codex-models-manager = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-app-server-transport = { workspace = true }
|
||||
codex-feedback = { workspace = true }
|
||||
codex-rmcp-client = { workspace = true }
|
||||
codex-rollout = { workspace = true }
|
||||
@@ -64,18 +65,11 @@ codex-sandboxing = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-thread-store = { workspace = true }
|
||||
codex-tools = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-json-to-toml = { workspace = true }
|
||||
codex-utils-rustls-provider = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
constant_time_eq = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
@@ -92,7 +86,6 @@ tokio = { workspace = true, features = [
|
||||
"signal",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] }
|
||||
url = { workspace = true }
|
||||
@@ -110,6 +103,7 @@ core_test_support = { workspace = true }
|
||||
codex-model-provider-info = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
flate2 = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
opentelemetry = { workspace = true }
|
||||
opentelemetry_sdk = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -144,12 +144,12 @@ Example with notification opt-out:
|
||||
|
||||
- `thread/start` — create a new thread; emits `thread/started` (including the current `thread.status`) and auto-subscribes you to turn/item events for that thread. When the request includes a `cwd` and the resolved sandbox is `workspace-write` or full access, app-server also marks that project as trusted in the user `config.toml`. Pass `sessionStartSource: "clear"` when starting a replacement thread after clearing the current session so `SessionStart` hooks receive `source: "clear"` instead of the default `"startup"`. For permissions, prefer experimental `permissions` profile selection; the legacy `sandbox` shorthand is still accepted but cannot be combined with `permissions`. Experimental `environments` selects the sticky execution environments for turns on the thread; omit it to use the server default, pass `[]` to disable environments, or pass explicit environment ids with per-environment `cwd`.
|
||||
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it. Accepts the same permission override rules as `thread/start`.
|
||||
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; if the source thread is currently mid-turn, the fork records the same interruption marker as `turn/interrupt` instead of inheriting an unmarked partial turn suffix. The returned `thread.forkedFromId` points at the source thread when known. Accepts `ephemeral: true` for an in-memory temporary fork, emits `thread/started` (including the current `thread.status`), and auto-subscribes you to turn/item events for the new thread. Pass `excludeTurns: true` when the client plans to page fork history via `thread/turns/list` instead of receiving the full turn array immediately. Accepts the same permission override rules as `thread/start`.
|
||||
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; if the source thread is currently mid-turn, the fork records the same interruption marker as `turn/interrupt` instead of inheriting an unmarked partial turn suffix. The returned `thread.forkedFromId` points at the source thread when known. Accepts `ephemeral: true` for an in-memory temporary fork, emits `thread/started` (including the current `thread.status`), and auto-subscribes you to turn/item events for the new thread. Experimental clients can pass `excludeTurns: true` when they plan to page fork history via `thread/turns/list` instead of receiving the full turn array immediately. Accepts the same permission override rules as `thread/start`.
|
||||
- `thread/start`, `thread/resume`, and `thread/fork` responses include the legacy `sandbox` compatibility projection. Experimental clients can read response `permissionProfile` for the exact active runtime permissions and `activePermissionProfile` for the named or implicit built-in profile identity/provenance when known.
|
||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders`, `sourceKinds`, `archived`, `cwd`, and `searchTerm` filters. Each returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded.
|
||||
- `thread/loaded/list` — list the thread ids currently loaded in memory.
|
||||
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`. The returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded.
|
||||
- `thread/turns/list` — page through a stored thread’s turn history without resuming it; supports cursor-based pagination with `sortDirection`, `nextCursor`, and `backwardsCursor`.
|
||||
- `thread/turns/list` — experimental; page through a stored thread’s turn history without resuming it; supports cursor-based pagination with `sortDirection`, `nextCursor`, and `backwardsCursor`.
|
||||
- `thread/metadata/update` — patch stored thread metadata in sqlite; currently supports updating persisted `gitInfo` fields and returns the refreshed `thread`.
|
||||
- `thread/memoryMode/set` — experimental; set a thread’s persisted memory eligibility to `"enabled"` or `"disabled"` for either a loaded thread or a stored rollout; returns `{}` on success.
|
||||
- `memory/reset` — experimental; clear the current `CODEX_HOME/memories` directory and reset persisted memory stage data in sqlite while preserving existing thread memory modes; returns `{}` on success.
|
||||
@@ -201,8 +201,9 @@ Example with notification opt-out:
|
||||
- `marketplace/add` — add a remote plugin marketplace from an HTTP(S) Git URL, SSH Git URL, or GitHub `owner/repo` shorthand, then persist it into the user marketplace config. Returns the installed root path plus whether the marketplace was already present.
|
||||
- `marketplace/remove` — remove a configured marketplace by name from the user marketplace config, and delete its installed marketplace root when one exists.
|
||||
- `marketplace/upgrade` — upgrade all configured Git plugin marketplaces, or one named marketplace when `marketplaceName` is provided. Returns selected marketplace names, upgraded roots, and per-marketplace errors.
|
||||
- `plugin/list` — list discovered plugin marketplaces and plugin state, including effective marketplace install/auth policy metadata, fail-open `marketplaceLoadErrors` entries for marketplace files that could not be parsed or loaded, and best-effort `featuredPluginIds` for the official curated marketplace. `interface.category` uses the marketplace category when present; otherwise it falls back to the plugin manifest category (**under development; do not call from production clients yet**).
|
||||
- `plugin/list` — list discovered plugin marketplaces and plugin state, including effective marketplace install/auth policy metadata, plugin `availability` (`AVAILABLE` by default or `DISABLED_BY_ADMIN` for remote plugins blocked upstream), fail-open `marketplaceLoadErrors` entries for marketplace files that could not be parsed or loaded, and best-effort `featuredPluginIds` for the official curated marketplace. `interface.category` uses the marketplace category when present; otherwise it falls back to the plugin manifest category (**under development; do not call from production clients yet**).
|
||||
- `plugin/read` — read one plugin by `marketplacePath` plus `pluginName`, returning marketplace info, a list-style `summary`, manifest descriptions/interface metadata, and bundled skills/apps/MCP server names. Returned plugin skills include their current `enabled` state after local config filtering. Plugin app summaries also include `needsAuth` when the server can determine connector accessibility (**under development; do not call from production clients yet**).
|
||||
- `plugin/skill/read` — read remote plugin skill markdown on demand by `remoteMarketplaceName`, `remotePluginId`, and `skillName`. This lets clients preview uninstalled remote plugin skills without downloading the plugin bundle.
|
||||
- `skills/changed` — notification emitted when watched local skill files change.
|
||||
- `app/list` — list available apps.
|
||||
- `device/key/create` — create or load a controller-local device signing key for an account/client binding. This local-key API is available only over local transports such as stdio and in-process; remote transports reject it. Hardware-backed providers are the target protection class; an OS-protected non-extractable fallback is allowed only with `protectionPolicy: "allow_os_protected_nonextractable"` and returns the reported `protectionClass`.
|
||||
@@ -222,7 +223,7 @@ Example with notification opt-out:
|
||||
- `feedback/upload` — submit a feedback report (classification + optional reason/logs, conversation_id, and optional `extraLogFiles` attachments array); returns the tracking thread id.
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering.
|
||||
- `externalAgentConfig/detect` — detect migratable external-agent artifacts with `includeHome` and optional `cwds`; each detected item includes `cwd` (`null` for home), and plugin/session migration items may additionally include structured `details` grouping plugin ids or session metadata.
|
||||
- `externalAgentConfig/import` — apply selected external-agent migration items by passing explicit `migrationItems` with `cwd` (`null` for home) and any plugin/session `details` returned by detect. When a request includes plugin or session imports, the server emits `externalAgentConfig/import/completed` after the full import finishes (immediately after the response when everything completed synchronously, or after background imports finish).
|
||||
- `externalAgentConfig/import` — apply selected external-agent migration items by passing explicit `migrationItems` with `cwd` (`null` for home) and any plugin/session `details` returned by detect. When a request includes migration items, the server emits `externalAgentConfig/import/completed` once after the full import finishes (immediately after the response when everything completed synchronously, or after background imports finish).
|
||||
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
|
||||
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk, with optional `reloadUserConfig: true` to hot-reload loaded threads.
|
||||
- `configRequirements/read` — fetch loaded requirements constraints from `requirements.toml` and/or MDM (or `null` if none are configured), including allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`), pinned feature values (`featureRequirements`), managed lifecycle hooks (`hooks`), `enforceResidency`, and `network` constraints such as canonical domain/socket permissions plus `managedAllowedDomainsOnly` and `dangerFullAccessDenylistOnly`.
|
||||
@@ -276,7 +277,7 @@ Valid `personality` values are `"friendly"`, `"pragmatic"`, and `"none"`. When `
|
||||
|
||||
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`. When the stored session includes persisted token usage, the server emits `thread/tokenUsage/updated` immediately after the response so clients can render restored usage before the next turn starts. You can also pass the same configuration overrides supported by `thread/start`, including `approvalsReviewer`.
|
||||
|
||||
By default, `thread/resume` includes the reconstructed turn history in `thread.turns`. Pass `excludeTurns: true` to return only thread metadata and live resume state, then call `thread/turns/list` separately if you want to page the turn history over the network. In that mode the server also skips replaying restored `thread/tokenUsage/updated`, which avoids rebuilding turns just to attribute historical usage.
|
||||
By default, `thread/resume` includes the reconstructed turn history in `thread.turns`. Experimental clients can pass `excludeTurns: true` to return only thread metadata and live resume state, then call `thread/turns/list` separately if they want to page the turn history over the network. In that mode the server also skips replaying restored `thread/tokenUsage/updated`, which avoids rebuilding turns just to attribute historical usage.
|
||||
|
||||
By default, resume uses the latest persisted `model` and `reasoningEffort` values associated with the thread. Supplying any of `model`, `modelProvider`, `config.model`, or `config.model_reasoning_effort` disables that persisted fallback and uses the explicit overrides plus normal config resolution instead.
|
||||
|
||||
@@ -304,9 +305,7 @@ To branch from a stored session, call `thread/fork` with the `thread.id`. This c
|
||||
{ "method": "thread/started", "params": { "thread": { … } } }
|
||||
```
|
||||
|
||||
Like `thread/resume`, `thread/fork` also accepts `excludeTurns: true` to return only thread metadata in `thread.turns` and let the client page history with `thread/turns/list`. In that mode the server skips replaying restored `thread/tokenUsage/updated`, which keeps the fork path from rebuilding turns just to attribute historical usage.
|
||||
|
||||
Experimental API: `thread/start`, `thread/resume`, and `thread/fork` accept `persistExtendedHistory: true` to persist a richer subset of ThreadItems for non-lossy history when calling `thread/read`, `thread/resume`, and `thread/fork` later. This does not backfill events that were not persisted previously.
|
||||
Like `thread/resume`, experimental clients can pass `excludeTurns: true` to `thread/fork` to return only thread metadata in `thread.turns` and page history with `thread/turns/list`. In that mode the server skips replaying restored `thread/tokenUsage/updated`, which keeps the fork path from rebuilding turns just to attribute historical usage.
|
||||
|
||||
### Example: List threads (with pagination & filters)
|
||||
|
||||
@@ -402,7 +401,7 @@ Later, after the idle unload timeout:
|
||||
|
||||
### Example: Read a thread
|
||||
|
||||
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want the full rollout history loaded into `thread.turns`. The returned thread includes `agentNickname` and `agentRole` for AgentControl-spawned thread sub-agents when available.
|
||||
Use `thread/read` to fetch a stored thread by id without resuming it. Pass `includeTurns` when you want thread history loaded into `thread.turns`. The returned thread includes `agentNickname` and `agentRole` for AgentControl-spawned thread sub-agents when available.
|
||||
|
||||
```json
|
||||
{ "method": "thread/read", "id": 22, "params": { "threadId": "thr_123" } }
|
||||
@@ -418,9 +417,9 @@ Use `thread/read` to fetch a stored thread by id without resuming it. Pass `incl
|
||||
} }
|
||||
```
|
||||
|
||||
### Example: List thread turns
|
||||
### Example: List thread turns (experimental)
|
||||
|
||||
Use `thread/turns/list` to page a stored thread’s turn history without resuming it. By default, results are sorted descending so clients can start at the present and fetch older turns with `nextCursor`. The response also includes `backwardsCursor`; pass it as `cursor` on a later request with `sortDirection: "asc"` to fetch turns newer than the first item from the earlier page.
|
||||
Use `thread/turns/list` with `capabilities.experimentalApi = true` to page a stored thread’s turn history without resuming it. By default, results are sorted descending so clients can start at the present and fetch older turns with `nextCursor`. The response also includes `backwardsCursor`; pass it as `cursor` on a later request with `sortDirection: "asc"` to fetch turns newer than the first item from the earlier page.
|
||||
|
||||
```json
|
||||
{ "method": "thread/turns/list", "id": 24, "params": {
|
||||
@@ -761,14 +760,14 @@ const offer = await pc.createOffer();
|
||||
await pc.setLocalDescription(offer);
|
||||
```
|
||||
|
||||
Then send `offer.sdp` to app-server. Core uses `experimental_realtime_ws_backend_prompt` for the backend instructions and the thread conversation id for the realtime session id. The start response is `{}`; the remote answer SDP arrives later as `thread/realtime/sdp` and should be passed to `setRemoteDescription()`:
|
||||
Then send `offer.sdp` to app-server. Core uses `experimental_realtime_ws_backend_prompt` for the backend instructions and the thread conversation id as the default Realtime API session identifier. This `realtimeSessionId` value refers to the upstream Realtime API session, not a Codex session/thread-group id. The start response is `{}`; the remote answer SDP arrives later as `thread/realtime/sdp` and should be passed to `setRemoteDescription()`:
|
||||
|
||||
```json
|
||||
{ "method": "thread/realtime/start", "id": 40, "params": {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"prompt": "You are on a call.",
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": { "type": "webrtc", "sdp": "v=0\r\no=..." }
|
||||
} }
|
||||
{ "id": 40, "result": {} }
|
||||
@@ -1100,7 +1099,7 @@ The fuzzy file search session API emits per-query notifications:
|
||||
|
||||
The thread realtime API emits thread-scoped notifications for session lifecycle and streaming media:
|
||||
|
||||
- `thread/realtime/started` — `{ threadId, sessionId }` once realtime starts for the thread (experimental).
|
||||
- `thread/realtime/started` — `{ threadId, realtimeSessionId }` once realtime starts for the thread (experimental). `realtimeSessionId` is the upstream Realtime API session identifier, not a Codex session/thread-group id.
|
||||
- `thread/realtime/itemAdded` — `{ threadId, item }` for raw non-audio realtime items that do not have a dedicated typed app-server notification, including `handoff_request` (experimental). `item` is forwarded as raw JSON while the upstream websocket item schema remains unstable.
|
||||
- `thread/realtime/transcript/delta` — `{ threadId, role, delta }` for live realtime transcript deltas (experimental).
|
||||
- `thread/realtime/transcript/done` — `{ threadId, role, text }` when realtime emits the final full text for a transcript part (experimental).
|
||||
@@ -1183,7 +1182,7 @@ There are additional item-specific events:
|
||||
#### fileChange
|
||||
|
||||
- `item/fileChange/patchUpdated` - when `features.apply_patch_streaming_events` is enabled, streams structured file-change snapshots parsed from the model-generated patch before it is executed.
|
||||
- `item/fileChange/outputDelta` - contains the tool call response of the underlying `apply_patch` tool call.
|
||||
- `item/fileChange/outputDelta` - deprecated legacy protocol entry for `apply_patch` text output; retained for compatibility but no longer emitted by the server.
|
||||
|
||||
### Errors
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,66 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::AppsListResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_chatgpt::connectors;
|
||||
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
|
||||
pub(super) fn merge_loaded_apps(
|
||||
all_connectors: Option<&[AppInfo]>,
|
||||
accessible_connectors: Option<&[AppInfo]>,
|
||||
) -> Vec<AppInfo> {
|
||||
let all_connectors_loaded = all_connectors.is_some();
|
||||
let all = all_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
let accessible = accessible_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
connectors::merge_connectors_with_accessible(all, accessible, all_connectors_loaded)
|
||||
}
|
||||
|
||||
pub(super) fn should_send_app_list_updated_notification(
|
||||
connectors: &[AppInfo],
|
||||
accessible_loaded: bool,
|
||||
all_loaded: bool,
|
||||
) -> bool {
|
||||
connectors.iter().any(|connector| connector.is_accessible) || (accessible_loaded && all_loaded)
|
||||
}
|
||||
|
||||
pub(super) fn paginate_apps(
|
||||
connectors: &[AppInfo],
|
||||
start: usize,
|
||||
limit: Option<u32>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let total = connectors.len();
|
||||
if start > total {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("cursor {start} exceeds total apps {total}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = connectors[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(AppsListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
pub(super) async fn send_app_list_updated_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
data: Vec<AppInfo>,
|
||||
) {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppSummary;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::plugins::AppConnectorId;
|
||||
use codex_exec_server::EnvironmentManager;
|
||||
use tracing::warn;
|
||||
|
||||
pub(super) async fn load_plugin_app_summaries(
|
||||
config: &Config,
|
||||
plugin_apps: &[AppConnectorId],
|
||||
environment_manager: &EnvironmentManager,
|
||||
) -> Vec<AppSummary> {
|
||||
if plugin_apps.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let connectors =
|
||||
match connectors::list_all_connectors_with_options(config, /*force_refetch*/ false).await {
|
||||
Ok(connectors) => connectors,
|
||||
Err(err) => {
|
||||
warn!("failed to load app metadata for plugin/read: {err:#}");
|
||||
connectors::list_cached_all_connectors(config)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let plugin_connectors = connectors::connectors_for_plugin_apps(connectors, plugin_apps);
|
||||
|
||||
let accessible_connectors =
|
||||
match connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
config,
|
||||
/*force_refetch*/ false,
|
||||
environment_manager,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(status) if status.codex_apps_ready => status.connectors,
|
||||
Ok(_) => {
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("failed to load app auth state for plugin/read: {err:#}");
|
||||
return plugin_connectors
|
||||
.into_iter()
|
||||
.map(AppSummary::from)
|
||||
.collect();
|
||||
}
|
||||
};
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
plugin_connectors
|
||||
.into_iter()
|
||||
.map(|connector| {
|
||||
let needs_auth = !accessible_ids.contains(connector.id.as_str());
|
||||
AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(super) fn plugin_apps_needing_auth(
|
||||
all_connectors: &[AppInfo],
|
||||
accessible_connectors: &[AppInfo],
|
||||
plugin_apps: &[AppConnectorId],
|
||||
codex_apps_ready: bool,
|
||||
) -> Vec<AppSummary> {
|
||||
if !codex_apps_ready {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let accessible_ids = accessible_connectors
|
||||
.iter()
|
||||
.map(|connector| connector.id.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
let plugin_app_ids = plugin_apps
|
||||
.iter()
|
||||
.map(|connector_id| connector_id.0.as_str())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
all_connectors
|
||||
.iter()
|
||||
.filter(|connector| {
|
||||
plugin_app_ids.contains(connector.id.as_str())
|
||||
&& !accessible_ids.contains(connector.id.as_str())
|
||||
})
|
||||
.cloned()
|
||||
.map(|connector| AppSummary {
|
||||
id: connector.id,
|
||||
name: connector.name,
|
||||
description: connector.description,
|
||||
install_url: connector.install_url,
|
||||
needs_auth: true,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_core::plugins::AppConnectorId;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::plugin_apps_needing_auth;
|
||||
|
||||
#[test]
|
||||
fn plugin_apps_needing_auth_returns_empty_when_codex_apps_is_not_ready() {
|
||||
let all_connectors = vec![AppInfo {
|
||||
id: "alpha".to_string(),
|
||||
name: "Alpha".to_string(),
|
||||
description: Some("Alpha connector".to_string()),
|
||||
logo_url: None,
|
||||
logo_url_dark: None,
|
||||
distribution_channel: None,
|
||||
branding: None,
|
||||
app_metadata: None,
|
||||
labels: None,
|
||||
install_url: Some("https://chatgpt.com/apps/alpha/alpha".to_string()),
|
||||
is_accessible: false,
|
||||
is_enabled: true,
|
||||
plugin_display_names: Vec::new(),
|
||||
}];
|
||||
|
||||
assert_eq!(
|
||||
plugin_apps_needing_auth(
|
||||
&all_connectors,
|
||||
&[],
|
||||
&[AppConnectorId("alpha".to_string())],
|
||||
/*codex_apps_ready*/ false,
|
||||
),
|
||||
Vec::new()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_config::types::McpServerConfig;
|
||||
use codex_core::config::Config;
|
||||
use codex_mcp::McpOAuthLoginSupport;
|
||||
use codex_mcp::oauth_login_support;
|
||||
use codex_mcp::resolve_oauth_scopes;
|
||||
use codex_mcp::should_retry_without_scopes;
|
||||
use codex_rmcp_client::perform_oauth_login_silent;
|
||||
use tracing::warn;
|
||||
|
||||
use super::CodexMessageProcessor;
|
||||
|
||||
impl CodexMessageProcessor {
|
||||
pub(super) async fn start_plugin_mcp_oauth_logins(
|
||||
&self,
|
||||
config: &Config,
|
||||
plugin_mcp_servers: HashMap<String, McpServerConfig>,
|
||||
) {
|
||||
for (name, server) in plugin_mcp_servers {
|
||||
let oauth_config = match oauth_login_support(&server.transport).await {
|
||||
McpOAuthLoginSupport::Supported(config) => config,
|
||||
McpOAuthLoginSupport::Unsupported => continue,
|
||||
McpOAuthLoginSupport::Unknown(err) => {
|
||||
warn!(
|
||||
"MCP server may or may not require login for plugin install {name}: {err}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let resolved_scopes = resolve_oauth_scopes(
|
||||
/*explicit_scopes*/ None,
|
||||
server.scopes.clone(),
|
||||
oauth_config.discovered_scopes.clone(),
|
||||
);
|
||||
|
||||
let store_mode = config.mcp_oauth_credentials_store_mode;
|
||||
let callback_port = config.mcp_oauth_callback_port;
|
||||
let callback_url = config.mcp_oauth_callback_url.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let notification_name = name.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let first_attempt = perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers.clone(),
|
||||
oauth_config.env_http_headers.clone(),
|
||||
&resolved_scopes.scopes,
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let final_result = match first_attempt {
|
||||
Err(err) if should_retry_without_scopes(&resolved_scopes, &err) => {
|
||||
perform_oauth_login_silent(
|
||||
&name,
|
||||
&oauth_config.url,
|
||||
store_mode,
|
||||
oauth_config.http_headers,
|
||||
oauth_config.env_http_headers,
|
||||
&[],
|
||||
server.oauth_resource.as_deref(),
|
||||
callback_port,
|
||||
callback_url.as_deref(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
result => result,
|
||||
};
|
||||
|
||||
let (success, error) = match final_result {
|
||||
Ok(()) => (true, None),
|
||||
Err(err) => (false, Some(err.to_string())),
|
||||
};
|
||||
|
||||
let notification = ServerNotification::McpServerOauthLoginCompleted(
|
||||
McpServerOauthLoginCompletedNotification {
|
||||
name: notification_name,
|
||||
success,
|
||||
error,
|
||||
},
|
||||
);
|
||||
outgoing.send_server_notification(notification).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
use codex_config::types::PluginConfig;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::plugins::PluginId;
|
||||
use codex_core::plugins::PluginInstallRequest;
|
||||
use codex_core::plugins::PluginsManager;
|
||||
use codex_core_plugins::PluginInstallRequest;
|
||||
use codex_core_plugins::PluginsManager;
|
||||
use codex_core_plugins::marketplace::MarketplacePluginInstallPolicy;
|
||||
use codex_core_plugins::marketplace::find_marketplace_manifest_path;
|
||||
use codex_core_plugins::marketplace_add::MarketplaceAddRequest;
|
||||
@@ -20,6 +19,7 @@ use codex_external_agent_migration::missing_command_names;
|
||||
use codex_external_agent_migration::missing_subagent_names;
|
||||
use codex_external_agent_sessions::ExternalAgentSessionMigration;
|
||||
use codex_external_agent_sessions::detect_recent_sessions;
|
||||
use codex_plugin::PluginId;
|
||||
use codex_protocol::protocol::Product;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -1146,8 +1146,9 @@ fn configured_marketplace_plugins(
|
||||
config: &Config,
|
||||
plugins_manager: &PluginsManager,
|
||||
) -> io::Result<BTreeMap<String, HashSet<String>>> {
|
||||
let plugins_input = config.plugins_config_input();
|
||||
let marketplaces = plugins_manager
|
||||
.list_marketplaces_for_config(config, &[])
|
||||
.list_marketplaces_for_config(&plugins_input, &[])
|
||||
.map_err(|err| {
|
||||
invalid_data_error(format!("failed to list configured marketplaces: {err}"))
|
||||
})?;
|
||||
|
||||
@@ -1,874 +0,0 @@
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::config_manager_service::ConfigManagerError;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use async_trait::async_trait;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_app_server_protocol::ConfigBatchWriteParams;
|
||||
use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigReadResponse;
|
||||
use codex_app_server_protocol::ConfigRequirements;
|
||||
use codex_app_server_protocol::ConfigRequirementsReadResponse;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWriteErrorCode;
|
||||
use codex_app_server_protocol::ConfigWriteResponse;
|
||||
use codex_app_server_protocol::ConfiguredHookHandler;
|
||||
use codex_app_server_protocol::ConfiguredHookMatcherGroup;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureEnablementSetResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ManagedHooksRequirements;
|
||||
use codex_app_server_protocol::NetworkDomainPermission;
|
||||
use codex_app_server_protocol::NetworkRequirements;
|
||||
use codex_app_server_protocol::NetworkUnixSocketPermission;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_config::ConfigRequirementsToml;
|
||||
use codex_config::HookEventsToml;
|
||||
use codex_config::HookHandlerConfig as CoreHookHandlerConfig;
|
||||
use codex_config::ManagedHooksRequirementsToml;
|
||||
use codex_config::MatcherGroup as CoreMatcherGroup;
|
||||
use codex_config::ResidencyRequirement as CoreResidencyRequirement;
|
||||
use codex_config::SandboxModeRequirement as CoreSandboxModeRequirement;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::plugins::PluginId;
|
||||
use codex_core_plugins::loader::installed_plugin_telemetry_metadata;
|
||||
use codex_core_plugins::toggles::collect_plugin_enabled_candidates;
|
||||
use codex_features::canonical_feature_for_key;
|
||||
use codex_features::feature_for_key;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::protocol::Op;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tracing::warn;
|
||||
|
||||
const SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT: &[&str] = &[
|
||||
"apps",
|
||||
"memories",
|
||||
"plugins",
|
||||
"remote_control",
|
||||
"tool_search",
|
||||
"tool_suggest",
|
||||
"tool_call_mcp_elicitation",
|
||||
];
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait UserConfigReloader: Send + Sync {
|
||||
async fn reload_user_config(&self);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserConfigReloader for ThreadManager {
|
||||
async fn reload_user_config(&self) {
|
||||
let thread_ids = self.list_thread_ids().await;
|
||||
for thread_id in thread_ids {
|
||||
let Ok(thread) = self.get_thread(thread_id).await else {
|
||||
continue;
|
||||
};
|
||||
if let Err(err) = thread.submit(Op::ReloadUserConfig).await {
|
||||
warn!("failed to request user config reload: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConfigApi {
|
||||
config_manager: ConfigManager,
|
||||
user_config_reloader: Arc<dyn UserConfigReloader>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
}
|
||||
|
||||
impl ConfigApi {
|
||||
pub(crate) fn new(
|
||||
config_manager: ConfigManager,
|
||||
user_config_reloader: Arc<dyn UserConfigReloader>,
|
||||
analytics_events_client: AnalyticsEventsClient,
|
||||
) -> Self {
|
||||
Self {
|
||||
config_manager,
|
||||
user_config_reloader,
|
||||
analytics_events_client,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
internal_error(format!(
|
||||
"failed to resolve feature override precedence: {err}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn read(
|
||||
&self,
|
||||
params: ConfigReadParams,
|
||||
) -> Result<ConfigReadResponse, JSONRPCErrorError> {
|
||||
let fallback_cwd = params.cwd.as_ref().map(PathBuf::from);
|
||||
let mut response = self.config_manager.read(params).await.map_err(map_error)?;
|
||||
let config = self.load_latest_config(fallback_cwd).await?;
|
||||
for feature_key in SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT {
|
||||
let Some(feature) = feature_for_key(feature_key) else {
|
||||
continue;
|
||||
};
|
||||
let features = response
|
||||
.config
|
||||
.additional
|
||||
.entry("features".to_string())
|
||||
.or_insert_with(|| json!({}));
|
||||
if !features.is_object() {
|
||||
*features = json!({});
|
||||
}
|
||||
if let Some(features) = features.as_object_mut() {
|
||||
features.insert(
|
||||
(*feature_key).to_string(),
|
||||
json!(config.features.enabled(feature)),
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn config_requirements_read(
|
||||
&self,
|
||||
) -> Result<ConfigRequirementsReadResponse, JSONRPCErrorError> {
|
||||
let requirements = self
|
||||
.config_manager
|
||||
.read_requirements()
|
||||
.await
|
||||
.map_err(map_error)?
|
||||
.map(map_requirements_toml_to_api);
|
||||
|
||||
Ok(ConfigRequirementsReadResponse { requirements })
|
||||
}
|
||||
|
||||
pub(crate) async fn write_value(
|
||||
&self,
|
||||
params: ConfigValueWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let pending_changes =
|
||||
collect_plugin_enabled_candidates([(¶ms.key_path, ¶ms.value)].into_iter());
|
||||
let response = self
|
||||
.config_manager
|
||||
.write_value(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn batch_write(
|
||||
&self,
|
||||
params: ConfigBatchWriteParams,
|
||||
) -> Result<ConfigWriteResponse, JSONRPCErrorError> {
|
||||
let reload_user_config = params.reload_user_config;
|
||||
let pending_changes = collect_plugin_enabled_candidates(
|
||||
params
|
||||
.edits
|
||||
.iter()
|
||||
.map(|edit| (&edit.key_path, &edit.value)),
|
||||
);
|
||||
let response = self
|
||||
.config_manager
|
||||
.batch_write(params)
|
||||
.await
|
||||
.map_err(map_error)?;
|
||||
self.emit_plugin_toggle_events(pending_changes).await;
|
||||
if reload_user_config {
|
||||
self.user_config_reloader.reload_user_config().await;
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub(crate) async fn set_experimental_feature_enablement(
|
||||
&self,
|
||||
params: ExperimentalFeatureEnablementSetParams,
|
||||
) -> Result<ExperimentalFeatureEnablementSetResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureEnablementSetParams { enablement } = params;
|
||||
for key in enablement.keys() {
|
||||
if canonical_feature_for_key(key).is_some() {
|
||||
if SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.contains(&key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Err(invalid_request(format!(
|
||||
"unsupported feature enablement `{key}`: currently supported features are {}",
|
||||
SUPPORTED_EXPERIMENTAL_FEATURE_ENABLEMENT.join(", ")
|
||||
)));
|
||||
}
|
||||
|
||||
let message = if let Some(feature) = feature_for_key(key) {
|
||||
format!(
|
||||
"invalid feature enablement `{key}`: use canonical feature key `{}`",
|
||||
feature.key()
|
||||
)
|
||||
} else {
|
||||
format!("invalid feature enablement `{key}`")
|
||||
};
|
||||
return Err(invalid_request(message));
|
||||
}
|
||||
|
||||
if enablement.is_empty() {
|
||||
return Ok(ExperimentalFeatureEnablementSetResponse { enablement });
|
||||
}
|
||||
|
||||
self.config_manager
|
||||
.extend_runtime_feature_enablement(
|
||||
enablement
|
||||
.iter()
|
||||
.map(|(name, enabled)| (name.clone(), *enabled)),
|
||||
)
|
||||
.map_err(|_| internal_error("failed to update feature enablement"))?;
|
||||
|
||||
self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
self.user_config_reloader.reload_user_config().await;
|
||||
|
||||
Ok(ExperimentalFeatureEnablementSetResponse { enablement })
|
||||
}
|
||||
|
||||
async fn emit_plugin_toggle_events(
|
||||
&self,
|
||||
pending_changes: std::collections::BTreeMap<String, bool>,
|
||||
) {
|
||||
for (plugin_id, enabled) in pending_changes {
|
||||
let Ok(plugin_id) = PluginId::parse(&plugin_id) else {
|
||||
continue;
|
||||
};
|
||||
let metadata =
|
||||
installed_plugin_telemetry_metadata(self.config_manager.codex_home(), &plugin_id)
|
||||
.await;
|
||||
if enabled {
|
||||
self.analytics_events_client.track_plugin_enabled(metadata);
|
||||
} else {
|
||||
self.analytics_events_client.track_plugin_disabled(metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigRequirements {
|
||||
ConfigRequirements {
|
||||
allowed_approval_policies: requirements.allowed_approval_policies.map(|policies| {
|
||||
policies
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::AskForApproval::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_approvals_reviewers: requirements.allowed_approvals_reviewers.map(|reviewers| {
|
||||
reviewers
|
||||
.into_iter()
|
||||
.map(codex_app_server_protocol::ApprovalsReviewer::from)
|
||||
.collect()
|
||||
}),
|
||||
allowed_sandbox_modes: requirements.allowed_sandbox_modes.map(|modes| {
|
||||
modes
|
||||
.into_iter()
|
||||
.filter_map(map_sandbox_mode_requirement_to_api)
|
||||
.collect()
|
||||
}),
|
||||
allowed_web_search_modes: requirements.allowed_web_search_modes.map(|modes| {
|
||||
let mut normalized = modes
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect::<Vec<WebSearchMode>>();
|
||||
if !normalized.contains(&WebSearchMode::Disabled) {
|
||||
normalized.push(WebSearchMode::Disabled);
|
||||
}
|
||||
normalized
|
||||
}),
|
||||
feature_requirements: requirements
|
||||
.feature_requirements
|
||||
.map(|requirements| requirements.entries),
|
||||
hooks: requirements.hooks.map(map_hooks_requirements_to_api),
|
||||
enforce_residency: requirements
|
||||
.enforce_residency
|
||||
.map(map_residency_requirement_to_api),
|
||||
network: requirements.network.map(map_network_requirements_to_api),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hooks_requirements_to_api(hooks: ManagedHooksRequirementsToml) -> ManagedHooksRequirements {
|
||||
let ManagedHooksRequirementsToml {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
hooks,
|
||||
} = hooks;
|
||||
let HookEventsToml {
|
||||
pre_tool_use,
|
||||
permission_request,
|
||||
post_tool_use,
|
||||
session_start,
|
||||
user_prompt_submit,
|
||||
stop,
|
||||
} = hooks;
|
||||
|
||||
ManagedHooksRequirements {
|
||||
managed_dir,
|
||||
windows_managed_dir,
|
||||
pre_tool_use: map_hook_matcher_groups_to_api(pre_tool_use),
|
||||
permission_request: map_hook_matcher_groups_to_api(permission_request),
|
||||
post_tool_use: map_hook_matcher_groups_to_api(post_tool_use),
|
||||
session_start: map_hook_matcher_groups_to_api(session_start),
|
||||
user_prompt_submit: map_hook_matcher_groups_to_api(user_prompt_submit),
|
||||
stop: map_hook_matcher_groups_to_api(stop),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_matcher_groups_to_api(
|
||||
groups: Vec<CoreMatcherGroup>,
|
||||
) -> Vec<ConfiguredHookMatcherGroup> {
|
||||
groups
|
||||
.into_iter()
|
||||
.map(map_hook_matcher_group_to_api)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn map_hook_matcher_group_to_api(group: CoreMatcherGroup) -> ConfiguredHookMatcherGroup {
|
||||
ConfiguredHookMatcherGroup {
|
||||
matcher: group.matcher,
|
||||
hooks: group
|
||||
.hooks
|
||||
.into_iter()
|
||||
.map(map_hook_handler_to_api)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_hook_handler_to_api(handler: CoreHookHandlerConfig) -> ConfiguredHookHandler {
|
||||
match handler {
|
||||
CoreHookHandlerConfig::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
} => ConfiguredHookHandler::Command {
|
||||
command,
|
||||
timeout_sec,
|
||||
r#async,
|
||||
status_message,
|
||||
},
|
||||
CoreHookHandlerConfig::Prompt {} => ConfiguredHookHandler::Prompt {},
|
||||
CoreHookHandlerConfig::Agent {} => ConfiguredHookHandler::Agent {},
|
||||
}
|
||||
}
|
||||
|
||||
fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Option<SandboxMode> {
|
||||
match mode {
|
||||
CoreSandboxModeRequirement::ReadOnly => Some(SandboxMode::ReadOnly),
|
||||
CoreSandboxModeRequirement::WorkspaceWrite => Some(SandboxMode::WorkspaceWrite),
|
||||
CoreSandboxModeRequirement::DangerFullAccess => Some(SandboxMode::DangerFullAccess),
|
||||
CoreSandboxModeRequirement::ExternalSandbox => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_residency_requirement_to_api(
|
||||
residency: CoreResidencyRequirement,
|
||||
) -> codex_app_server_protocol::ResidencyRequirement {
|
||||
match residency {
|
||||
CoreResidencyRequirement::Us => codex_app_server_protocol::ResidencyRequirement::Us,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_requirements_to_api(
|
||||
network: codex_config::NetworkRequirementsToml,
|
||||
) -> NetworkRequirements {
|
||||
let allowed_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains);
|
||||
let denied_domains = network
|
||||
.domains
|
||||
.as_ref()
|
||||
.and_then(codex_config::NetworkDomainPermissionsToml::denied_domains);
|
||||
let allow_unix_sockets = network
|
||||
.unix_sockets
|
||||
.as_ref()
|
||||
.map(codex_config::NetworkUnixSocketPermissionsToml::allow_unix_sockets)
|
||||
.filter(|entries| !entries.is_empty());
|
||||
|
||||
NetworkRequirements {
|
||||
enabled: network.enabled,
|
||||
http_port: network.http_port,
|
||||
socks_port: network.socks_port,
|
||||
allow_upstream_proxy: network.allow_upstream_proxy,
|
||||
dangerously_allow_non_loopback_proxy: network.dangerously_allow_non_loopback_proxy,
|
||||
dangerously_allow_all_unix_sockets: network.dangerously_allow_all_unix_sockets,
|
||||
domains: network.domains.map(|domains| {
|
||||
domains
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(pattern, permission)| {
|
||||
(pattern, map_network_domain_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
managed_allowed_domains_only: network.managed_allowed_domains_only,
|
||||
allowed_domains,
|
||||
denied_domains,
|
||||
unix_sockets: network.unix_sockets.map(|unix_sockets| {
|
||||
unix_sockets
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|(path, permission)| {
|
||||
(path, map_network_unix_socket_permission_to_api(permission))
|
||||
})
|
||||
.collect()
|
||||
}),
|
||||
allow_unix_sockets,
|
||||
allow_local_binding: network.allow_local_binding,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_domain_permission_to_api(
|
||||
permission: codex_config::NetworkDomainPermissionToml,
|
||||
) -> NetworkDomainPermission {
|
||||
match permission {
|
||||
codex_config::NetworkDomainPermissionToml::Allow => NetworkDomainPermission::Allow,
|
||||
codex_config::NetworkDomainPermissionToml::Deny => NetworkDomainPermission::Deny,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_network_unix_socket_permission_to_api(
|
||||
permission: codex_config::NetworkUnixSocketPermissionToml,
|
||||
) -> NetworkUnixSocketPermission {
|
||||
match permission {
|
||||
codex_config::NetworkUnixSocketPermissionToml::Allow => NetworkUnixSocketPermission::Allow,
|
||||
codex_config::NetworkUnixSocketPermissionToml::None => NetworkUnixSocketPermission::None,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_error(err: ConfigManagerError) -> JSONRPCErrorError {
|
||||
if let Some(code) = err.write_error_code() {
|
||||
return config_write_error(code, err.to_string());
|
||||
}
|
||||
|
||||
internal_error(err.to_string())
|
||||
}
|
||||
|
||||
fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: message.into(),
|
||||
data: Some(json!({
|
||||
"config_write_error_code": code,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_manager::apply_runtime_feature_enablement;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_config::CloudRequirementsLoader;
|
||||
use codex_config::LoaderOverrides;
|
||||
use codex_config::NetworkDomainPermissionToml as CoreNetworkDomainPermissionToml;
|
||||
use codex_config::NetworkDomainPermissionsToml as CoreNetworkDomainPermissionsToml;
|
||||
use codex_config::NetworkRequirementsToml as CoreNetworkRequirementsToml;
|
||||
use codex_config::NetworkUnixSocketPermissionToml as CoreNetworkUnixSocketPermissionToml;
|
||||
use codex_config::NetworkUnixSocketPermissionsToml as CoreNetworkUnixSocketPermissionsToml;
|
||||
use codex_features::Feature;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_protocol::config_types::ApprovalsReviewer as CoreApprovalsReviewer;
|
||||
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tempfile::TempDir;
|
||||
use toml::Value as TomlValue;
|
||||
|
||||
#[derive(Default)]
|
||||
struct RecordingUserConfigReloader {
|
||||
call_count: AtomicUsize,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl UserConfigReloader for RecordingUserConfigReloader {
|
||||
async fn reload_user_config(&self) {
|
||||
self.call_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_converts_core_enums() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: Some(vec![
|
||||
CoreAskForApproval::Never,
|
||||
CoreAskForApproval::OnRequest,
|
||||
]),
|
||||
allowed_approvals_reviewers: Some(vec![
|
||||
CoreApprovalsReviewer::User,
|
||||
CoreApprovalsReviewer::AutoReview,
|
||||
]),
|
||||
allowed_sandbox_modes: Some(vec![
|
||||
CoreSandboxModeRequirement::ReadOnly,
|
||||
CoreSandboxModeRequirement::ExternalSandbox,
|
||||
]),
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: Some(vec![codex_config::WebSearchModeRequirement::Cached]),
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: Some(codex_config::FeatureRequirementsToml {
|
||||
entries: std::collections::BTreeMap::from([
|
||||
("apps".to_string(), false),
|
||||
("personality".to_string(), true),
|
||||
]),
|
||||
}),
|
||||
hooks: Some(ManagedHooksRequirementsToml {
|
||||
managed_dir: Some(PathBuf::from("/enterprise/hooks")),
|
||||
windows_managed_dir: Some(PathBuf::from(r"C:\enterprise\hooks")),
|
||||
hooks: HookEventsToml {
|
||||
pre_tool_use: vec![CoreMatcherGroup {
|
||||
matcher: Some("^Bash$".to_string()),
|
||||
hooks: vec![CoreHookHandlerConfig::Command {
|
||||
command: "python3 /enterprise/hooks/pre.py".to_string(),
|
||||
timeout_sec: Some(10),
|
||||
r#async: false,
|
||||
status_message: Some("checking".to_string()),
|
||||
}],
|
||||
}],
|
||||
..Default::default()
|
||||
},
|
||||
}),
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: Some(CoreResidencyRequirement::Us),
|
||||
network: Some(CoreNetworkRequirementsToml {
|
||||
enabled: Some(true),
|
||||
http_port: Some(8080),
|
||||
socks_port: Some(1080),
|
||||
allow_upstream_proxy: Some(false),
|
||||
dangerously_allow_non_loopback_proxy: Some(false),
|
||||
dangerously_allow_all_unix_sockets: Some(true),
|
||||
domains: Some(CoreNetworkDomainPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([
|
||||
(
|
||||
"api.openai.com".to_string(),
|
||||
CoreNetworkDomainPermissionToml::Allow,
|
||||
),
|
||||
(
|
||||
"example.com".to_string(),
|
||||
CoreNetworkDomainPermissionToml::Deny,
|
||||
),
|
||||
]),
|
||||
}),
|
||||
managed_allowed_domains_only: Some(false),
|
||||
unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([(
|
||||
"/tmp/proxy.sock".to_string(),
|
||||
CoreNetworkUnixSocketPermissionToml::Allow,
|
||||
)]),
|
||||
}),
|
||||
allow_local_binding: Some(true),
|
||||
}),
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.allowed_approval_policies,
|
||||
Some(vec![
|
||||
codex_app_server_protocol::AskForApproval::Never,
|
||||
codex_app_server_protocol::AskForApproval::OnRequest,
|
||||
])
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_approvals_reviewers,
|
||||
Some(vec![
|
||||
codex_app_server_protocol::ApprovalsReviewer::User,
|
||||
codex_app_server_protocol::ApprovalsReviewer::AutoReview,
|
||||
])
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_sandbox_modes,
|
||||
Some(vec![SandboxMode::ReadOnly]),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.allowed_web_search_modes,
|
||||
Some(vec![WebSearchMode::Cached, WebSearchMode::Disabled]),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.feature_requirements,
|
||||
Some(std::collections::BTreeMap::from([
|
||||
("apps".to_string(), false),
|
||||
("personality".to_string(), true),
|
||||
])),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.hooks,
|
||||
Some(ManagedHooksRequirements {
|
||||
managed_dir: Some(PathBuf::from("/enterprise/hooks")),
|
||||
windows_managed_dir: Some(PathBuf::from(r"C:\enterprise\hooks")),
|
||||
pre_tool_use: vec![ConfiguredHookMatcherGroup {
|
||||
matcher: Some("^Bash$".to_string()),
|
||||
hooks: vec![ConfiguredHookHandler::Command {
|
||||
command: "python3 /enterprise/hooks/pre.py".to_string(),
|
||||
timeout_sec: Some(10),
|
||||
r#async: false,
|
||||
status_message: Some("checking".to_string()),
|
||||
}],
|
||||
}],
|
||||
permission_request: Vec::new(),
|
||||
post_tool_use: Vec::new(),
|
||||
session_start: Vec::new(),
|
||||
user_prompt_submit: Vec::new(),
|
||||
stop: Vec::new(),
|
||||
}),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.enforce_residency,
|
||||
Some(codex_app_server_protocol::ResidencyRequirement::Us),
|
||||
);
|
||||
assert_eq!(
|
||||
mapped.network,
|
||||
Some(NetworkRequirements {
|
||||
enabled: Some(true),
|
||||
http_port: Some(8080),
|
||||
socks_port: Some(1080),
|
||||
allow_upstream_proxy: Some(false),
|
||||
dangerously_allow_non_loopback_proxy: Some(false),
|
||||
dangerously_allow_all_unix_sockets: Some(true),
|
||||
domains: Some(std::collections::BTreeMap::from([
|
||||
("api.openai.com".to_string(), NetworkDomainPermission::Allow,),
|
||||
("example.com".to_string(), NetworkDomainPermission::Deny),
|
||||
])),
|
||||
managed_allowed_domains_only: Some(false),
|
||||
allowed_domains: Some(vec!["api.openai.com".to_string()]),
|
||||
denied_domains: Some(vec!["example.com".to_string()]),
|
||||
unix_sockets: Some(std::collections::BTreeMap::from([(
|
||||
"/tmp/proxy.sock".to_string(),
|
||||
NetworkUnixSocketPermission::Allow,
|
||||
)])),
|
||||
allow_unix_sockets: Some(vec!["/tmp/proxy.sock".to_string()]),
|
||||
allow_local_binding: Some(true),
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_omits_unix_socket_none_entries_from_legacy_network_fields() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: None,
|
||||
allowed_approvals_reviewers: None,
|
||||
allowed_sandbox_modes: None,
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: None,
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: None,
|
||||
hooks: None,
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: None,
|
||||
network: Some(CoreNetworkRequirementsToml {
|
||||
enabled: None,
|
||||
http_port: None,
|
||||
socks_port: None,
|
||||
allow_upstream_proxy: None,
|
||||
dangerously_allow_non_loopback_proxy: None,
|
||||
dangerously_allow_all_unix_sockets: None,
|
||||
domains: None,
|
||||
managed_allowed_domains_only: None,
|
||||
unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml {
|
||||
entries: std::collections::BTreeMap::from([(
|
||||
"/tmp/ignored.sock".to_string(),
|
||||
CoreNetworkUnixSocketPermissionToml::None,
|
||||
)]),
|
||||
}),
|
||||
allow_local_binding: None,
|
||||
}),
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.network,
|
||||
Some(NetworkRequirements {
|
||||
enabled: None,
|
||||
http_port: None,
|
||||
socks_port: None,
|
||||
allow_upstream_proxy: None,
|
||||
dangerously_allow_non_loopback_proxy: None,
|
||||
dangerously_allow_all_unix_sockets: None,
|
||||
domains: None,
|
||||
managed_allowed_domains_only: None,
|
||||
allowed_domains: None,
|
||||
denied_domains: None,
|
||||
unix_sockets: Some(std::collections::BTreeMap::from([(
|
||||
"/tmp/ignored.sock".to_string(),
|
||||
NetworkUnixSocketPermission::None,
|
||||
)])),
|
||||
allow_unix_sockets: None,
|
||||
allow_local_binding: None,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn map_requirements_toml_to_api_normalizes_allowed_web_search_modes() {
|
||||
let requirements = ConfigRequirementsToml {
|
||||
allowed_approval_policies: None,
|
||||
allowed_approvals_reviewers: None,
|
||||
allowed_sandbox_modes: None,
|
||||
remote_sandbox_config: None,
|
||||
allowed_web_search_modes: Some(Vec::new()),
|
||||
guardian_policy_config: None,
|
||||
feature_requirements: None,
|
||||
hooks: None,
|
||||
mcp_servers: None,
|
||||
plugins: None,
|
||||
apps: None,
|
||||
rules: None,
|
||||
enforce_residency: None,
|
||||
network: None,
|
||||
permissions: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
assert_eq!(
|
||||
mapped.allowed_web_search_modes,
|
||||
Some(vec![WebSearchMode::Disabled])
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_runtime_feature_enablement_keeps_cli_overrides_above_config_and_runtime() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
std::fs::write(
|
||||
codex_home.path().join("config.toml"),
|
||||
"[features]\napps = false\n",
|
||||
)
|
||||
.expect("write config");
|
||||
|
||||
let mut config = codex_core::config::ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.fallback_cwd(Some(codex_home.path().to_path_buf()))
|
||||
.cli_overrides(vec![(
|
||||
"features.apps".to_string(),
|
||||
TomlValue::Boolean(true),
|
||||
)])
|
||||
.build()
|
||||
.await
|
||||
.expect("load config");
|
||||
|
||||
apply_runtime_feature_enablement(
|
||||
&mut config,
|
||||
&BTreeMap::from([("apps".to_string(), false)]),
|
||||
);
|
||||
|
||||
assert!(config.features.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn apply_runtime_feature_enablement_keeps_cloud_pins_above_cli_and_runtime() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
|
||||
let mut config = codex_core::config::ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.cli_overrides(vec![(
|
||||
"features.apps".to_string(),
|
||||
TomlValue::Boolean(true),
|
||||
)])
|
||||
.cloud_requirements(CloudRequirementsLoader::new(async {
|
||||
Ok(Some(ConfigRequirementsToml {
|
||||
feature_requirements: Some(codex_config::FeatureRequirementsToml {
|
||||
entries: BTreeMap::from([("apps".to_string(), false)]),
|
||||
}),
|
||||
..Default::default()
|
||||
}))
|
||||
}))
|
||||
.build()
|
||||
.await
|
||||
.expect("load config");
|
||||
|
||||
apply_runtime_feature_enablement(
|
||||
&mut config,
|
||||
&BTreeMap::from([("apps".to_string(), true)]),
|
||||
);
|
||||
|
||||
assert!(!config.features.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn batch_write_reloads_user_config_when_requested() {
|
||||
let codex_home = TempDir::new().expect("create temp dir");
|
||||
let user_config_path = codex_home.path().join("config.toml");
|
||||
std::fs::write(&user_config_path, "").expect("write config");
|
||||
let reloader = Arc::new(RecordingUserConfigReloader::default());
|
||||
let analytics_config = Arc::new(
|
||||
codex_core::config::ConfigBuilder::default()
|
||||
.build()
|
||||
.await
|
||||
.expect("load analytics config"),
|
||||
);
|
||||
let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("test"));
|
||||
let config_api = ConfigApi::new(
|
||||
ConfigManager::new(
|
||||
codex_home.path().to_path_buf(),
|
||||
Vec::new(),
|
||||
LoaderOverrides::default(),
|
||||
CloudRequirementsLoader::default(),
|
||||
Arg0DispatchPaths::default(),
|
||||
Arc::new(codex_config::NoopThreadConfigLoader),
|
||||
),
|
||||
reloader.clone(),
|
||||
AnalyticsEventsClient::new(
|
||||
auth_manager,
|
||||
analytics_config
|
||||
.chatgpt_base_url
|
||||
.trim_end_matches('/')
|
||||
.to_string(),
|
||||
analytics_config.analytics_enabled,
|
||||
),
|
||||
);
|
||||
|
||||
let response = config_api
|
||||
.batch_write(ConfigBatchWriteParams {
|
||||
edits: vec![codex_app_server_protocol::ConfigEdit {
|
||||
key_path: "model".to_string(),
|
||||
value: json!("gpt-5"),
|
||||
merge_strategy: codex_app_server_protocol::MergeStrategy::Replace,
|
||||
}],
|
||||
file_path: Some(user_config_path.display().to_string()),
|
||||
expected_version: None,
|
||||
reload_user_config: true,
|
||||
})
|
||||
.await
|
||||
.expect("batch write should succeed");
|
||||
|
||||
assert_eq!(
|
||||
response,
|
||||
ConfigWriteResponse {
|
||||
status: codex_app_server_protocol::WriteStatus::Ok,
|
||||
version: response.version.clone(),
|
||||
file_path: codex_utils_absolute_path::AbsolutePathBuf::try_from(
|
||||
user_config_path.clone()
|
||||
)
|
||||
.expect("absolute config path"),
|
||||
overridden_metadata: None,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_to_string(user_config_path).unwrap(),
|
||||
"model = \"gpt-5\"\n"
|
||||
);
|
||||
assert_eq!(reloader.call_count.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
}
|
||||
@@ -86,6 +86,7 @@ use codex_exec_server::EnvironmentManager;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_login::AuthManager;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
pub use codex_rollout::StateDbHandle;
|
||||
pub use codex_state::log_db::LogDbLayer;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -126,6 +127,8 @@ pub struct InProcessStartArgs {
|
||||
pub feedback: CodexFeedback,
|
||||
/// SQLite tracing layer used to flush recently emitted logs before feedback upload.
|
||||
pub log_db: Option<LogDbLayer>,
|
||||
/// Process-wide SQLite state handle shared with embedded app-server consumers.
|
||||
pub state_db: Option<StateDbHandle>,
|
||||
/// Environment manager used by core execution and filesystem operations.
|
||||
pub environment_manager: Arc<EnvironmentManager>,
|
||||
/// Startup warnings emitted after initialize succeeds.
|
||||
@@ -251,6 +254,8 @@ pub struct InProcessClientHandle {
|
||||
client: InProcessClientSender,
|
||||
event_rx: mpsc::Receiver<InProcessServerEvent>,
|
||||
runtime_handle: tokio::task::JoinHandle<()>,
|
||||
#[cfg(test)]
|
||||
_test_codex_home: Option<tempfile::TempDir>,
|
||||
}
|
||||
|
||||
impl InProcessClientHandle {
|
||||
@@ -418,6 +423,7 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle {
|
||||
environment_manager: args.environment_manager,
|
||||
feedback: args.feedback,
|
||||
log_db: args.log_db,
|
||||
state_db: args.state_db,
|
||||
config_warnings: args.config_warnings,
|
||||
session_source: args.session_source,
|
||||
auth_manager,
|
||||
@@ -717,6 +723,8 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle {
|
||||
client: InProcessClientSender { client_tx },
|
||||
event_rx,
|
||||
runtime_handle,
|
||||
#[cfg(test)]
|
||||
_test_codex_home: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -738,13 +746,22 @@ mod tests {
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use tempfile::TempDir;
|
||||
|
||||
async fn build_test_config() -> Config {
|
||||
match ConfigBuilder::default().build().await {
|
||||
async fn build_test_config(codex_home: &Path) -> Config {
|
||||
match ConfigBuilder::default()
|
||||
.codex_home(codex_home.to_path_buf())
|
||||
.build()
|
||||
.await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Err(_) => Config::load_default_with_cli_overrides(Vec::new())
|
||||
.await
|
||||
.expect("default config should load"),
|
||||
Err(_) => Config::load_default_with_cli_overrides_for_codex_home(
|
||||
codex_home.to_path_buf(),
|
||||
Vec::new(),
|
||||
)
|
||||
.await
|
||||
.expect("default config should load"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -752,15 +769,21 @@ mod tests {
|
||||
session_source: SessionSource,
|
||||
channel_capacity: usize,
|
||||
) -> InProcessClientHandle {
|
||||
let codex_home = TempDir::new().expect("temp dir");
|
||||
let config = Arc::new(build_test_config(codex_home.path()).await);
|
||||
let state_db = codex_rollout::state_db::try_init(config.as_ref())
|
||||
.await
|
||||
.expect("state db should initialize for in-process test");
|
||||
let args = InProcessStartArgs {
|
||||
arg0_paths: Arg0DispatchPaths::default(),
|
||||
config: Arc::new(build_test_config().await),
|
||||
config,
|
||||
cli_overrides: Vec::new(),
|
||||
loader_overrides: LoaderOverrides::default(),
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
thread_config_loader: Arc::new(codex_config::NoopThreadConfigLoader),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: Some(state_db),
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source,
|
||||
@@ -775,7 +798,9 @@ mod tests {
|
||||
},
|
||||
channel_capacity,
|
||||
};
|
||||
start(args).await.expect("in-process runtime should start")
|
||||
let mut client = start(args).await.expect("in-process runtime should start");
|
||||
client._test_codex_home = Some(codex_home);
|
||||
client
|
||||
}
|
||||
|
||||
async fn start_test_client(session_source: SessionSource) -> InProcessClientHandle {
|
||||
@@ -804,7 +829,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn in_process_allows_device_key_requests_to_reach_device_key_api() {
|
||||
async fn in_process_allows_device_key_requests_to_reach_device_key_processor() {
|
||||
let client = start_test_client(SessionSource::Cli).await;
|
||||
const MALFORMED_KEY_ID_MESSAGE: &str = concat!(
|
||||
"invalid device key payload: keyId must be dk_hse_, dk_tpm_, or dk_osn_ ",
|
||||
|
||||
@@ -54,6 +54,7 @@ use codex_exec_server::EnvironmentManager;
|
||||
use codex_exec_server::ExecServerRuntimePaths;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_rollout::state_db as rollout_state_db;
|
||||
use codex_state::log_db;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -73,25 +74,21 @@ use tracing_subscriber::util::SubscriberInitExt;
|
||||
mod analytics_utils;
|
||||
mod app_server_tracing;
|
||||
mod bespoke_event_handling;
|
||||
mod codex_message_processor;
|
||||
mod command_exec;
|
||||
mod config;
|
||||
mod config_api;
|
||||
mod config_manager;
|
||||
mod config_manager_service;
|
||||
mod connection_rpc_gate;
|
||||
mod device_key_api;
|
||||
mod dynamic_tools;
|
||||
mod error_code;
|
||||
mod external_agent_config_api;
|
||||
mod filters;
|
||||
mod fs_api;
|
||||
mod fs_watch;
|
||||
mod fuzzy_file_search;
|
||||
pub mod in_process;
|
||||
mod message_processor;
|
||||
mod models;
|
||||
mod outgoing_message;
|
||||
mod request_processors;
|
||||
mod request_serialization;
|
||||
mod server_request_error;
|
||||
mod thread_state;
|
||||
@@ -457,23 +454,6 @@ pub async fn run_main_with_transport_options(
|
||||
.await
|
||||
{
|
||||
Ok(config) => {
|
||||
let effective_toml = config.config_layer_stack.effective_config();
|
||||
match effective_toml.try_into() {
|
||||
Ok(config_toml) => {
|
||||
if let Err(err) = codex_core::personality_migration::maybe_migrate_personality(
|
||||
&config.codex_home,
|
||||
&config_toml,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!(error = %err, "Failed to run personality migration");
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(error = %err, "Failed to deserialize config for personality migration");
|
||||
}
|
||||
}
|
||||
|
||||
let discovered_thread_config_loader = configured_thread_config_loader(&config);
|
||||
config_manager
|
||||
.replace_thread_config_loader(Arc::clone(&discovered_thread_config_loader));
|
||||
@@ -487,23 +467,70 @@ pub async fn run_main_with_transport_options(
|
||||
}
|
||||
};
|
||||
let mut config_warnings = Vec::new();
|
||||
let config = match config_manager
|
||||
let (mut config, should_run_personality_migration) = match config_manager
|
||||
.load_latest_config(/*fallback_cwd*/ None)
|
||||
.await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Ok(config) => (config, true),
|
||||
Err(err) => {
|
||||
let message = config_warning_from_error("Invalid configuration; using defaults.", &err);
|
||||
config_warnings.push(message);
|
||||
config_manager.load_default_config().await.map_err(|e| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("error loading default config after config error: {e}"),
|
||||
)
|
||||
})?
|
||||
(
|
||||
config_manager.load_default_config().await.map_err(|e| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("error loading default config after config error: {e}"),
|
||||
)
|
||||
})?,
|
||||
false,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let state_db_result = rollout_state_db::try_init(&config).await;
|
||||
let state_db_init_error = state_db_result.as_ref().err().map(ToString::to_string);
|
||||
let state_db = state_db_result.ok();
|
||||
|
||||
if should_run_personality_migration {
|
||||
let effective_toml = config.config_layer_stack.effective_config();
|
||||
match effective_toml.try_into() {
|
||||
Ok(config_toml) => {
|
||||
match codex_core::personality_migration::maybe_migrate_personality(
|
||||
&config.codex_home,
|
||||
&config_toml,
|
||||
state_db.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(codex_core::personality_migration::PersonalityMigrationStatus::Applied) => {
|
||||
config = config_manager
|
||||
.load_latest_config(/*fallback_cwd*/ None)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
std::io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!(
|
||||
"error reloading config after personality migration: {err}"
|
||||
),
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(
|
||||
codex_core::personality_migration::PersonalityMigrationStatus::SkippedMarker
|
||||
| codex_core::personality_migration::PersonalityMigrationStatus::SkippedExplicitPersonality
|
||||
| codex_core::personality_migration::PersonalityMigrationStatus::SkippedNoSessions,
|
||||
) => {}
|
||||
Err(err) => {
|
||||
warn!(error = %err, "Failed to run personality migration");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(error = %err, "Failed to deserialize config for personality migration");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(Some(err)) = check_execpolicy_for_warnings(&config.config_layer_stack).await {
|
||||
let (path, range) = exec_policy_warning_location(&err);
|
||||
let message = ConfigWarningNotification {
|
||||
@@ -571,13 +598,6 @@ pub async fn run_main_with_transport_options(
|
||||
|
||||
let feedback_layer = feedback.logger_layer();
|
||||
let feedback_metadata_layer = feedback.metadata_layer();
|
||||
let state_db_result = codex_state::StateRuntime::init(
|
||||
config.sqlite_home.clone(),
|
||||
config.model_provider_id.clone(),
|
||||
)
|
||||
.await;
|
||||
let state_db_init_error = state_db_result.as_ref().err().map(ToString::to_string);
|
||||
let state_db = state_db_result.ok();
|
||||
let log_db = state_db.clone().map(log_db::start);
|
||||
let log_db_layer = log_db
|
||||
.clone()
|
||||
@@ -749,6 +769,7 @@ pub async fn run_main_with_transport_options(
|
||||
environment_manager,
|
||||
feedback: feedback.clone(),
|
||||
log_db,
|
||||
state_db: state_db.clone(),
|
||||
config_warnings,
|
||||
session_source,
|
||||
auth_manager,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -290,6 +290,7 @@ async fn build_test_processor(
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::VSCode,
|
||||
auth_manager,
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
use std::sync::atomic::Ordering;
|
||||
@@ -15,7 +14,6 @@ use codex_app_server_protocol::ServerRequestPayload;
|
||||
use codex_otel::span_w3c_trace_context;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::W3cTraceContext;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::oneshot;
|
||||
@@ -26,22 +24,17 @@ use tracing::warn;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::internal_error;
|
||||
use crate::server_request_error::TURN_TRANSITION_PENDING_REQUEST_ERROR_REASON;
|
||||
pub(crate) use codex_app_server_transport::ConnectionId;
|
||||
pub(crate) use codex_app_server_transport::OutgoingError;
|
||||
pub(crate) use codex_app_server_transport::OutgoingMessage;
|
||||
pub(crate) use codex_app_server_transport::OutgoingResponse;
|
||||
pub(crate) use codex_app_server_transport::QueuedOutgoingMessage;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_protocol::account::PlanType;
|
||||
|
||||
pub(crate) type ClientRequestResult = std::result::Result<Result, JSONRPCErrorError>;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub(crate) struct ConnectionId(pub(crate) u64);
|
||||
|
||||
impl fmt::Display for ConnectionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stable identifier for a client request scoped to a transport connection.
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
pub(crate) struct ConnectionRequestId {
|
||||
@@ -96,21 +89,6 @@ pub(crate) enum OutgoingEnvelope {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct QueuedOutgoingMessage {
|
||||
pub(crate) message: OutgoingMessage,
|
||||
pub(crate) write_complete_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl QueuedOutgoingMessage {
|
||||
pub(crate) fn new(message: OutgoingMessage) -> Self {
|
||||
Self {
|
||||
message,
|
||||
write_complete_tx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends messages to the client and manages request callbacks.
|
||||
pub(crate) struct OutgoingMessageSender {
|
||||
next_server_request_id: AtomicI64,
|
||||
@@ -665,30 +643,6 @@ impl OutgoingMessageSender {
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing message from the server to the client.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub(crate) enum OutgoingMessage {
|
||||
Request(ServerRequest),
|
||||
/// AppServerNotification is specific to the case where this is run as an
|
||||
/// "app server" as opposed to an MCP server.
|
||||
AppServerNotification(ServerNotification),
|
||||
Response(OutgoingResponse),
|
||||
Error(OutgoingError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub(crate) struct OutgoingResponse {
|
||||
pub id: RequestId,
|
||||
pub result: Result,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub(crate) struct OutgoingError {
|
||||
pub error: JSONRPCErrorError,
|
||||
pub id: RequestId,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
492
codex-rs/app-server/src/request_processors.rs
Normal file
492
codex-rs/app-server/src/request_processors.rs
Normal file
@@ -0,0 +1,492 @@
|
||||
use crate::bespoke_event_handling::apply_bespoke_event_handling;
|
||||
use crate::bespoke_event_handling::maybe_emit_hook_prompt_item_completed;
|
||||
use crate::command_exec::CommandExecManager;
|
||||
use crate::command_exec::StartCommandExecParams;
|
||||
use crate::config_manager::ConfigManager;
|
||||
use crate::error_code::INPUT_TOO_LARGE_ERROR_CODE;
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use crate::error_code::INVALID_PARAMS_ERROR_CODE;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::error_code::invalid_params;
|
||||
use crate::models::supported_models;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
use crate::outgoing_message::RequestContext;
|
||||
use crate::outgoing_message::ThreadScopedOutgoingMessageSender;
|
||||
use crate::thread_status::ThreadWatchManager;
|
||||
use crate::thread_status::resolve_thread_status;
|
||||
use chrono::DateTime;
|
||||
use chrono::Duration as ChronoDuration;
|
||||
use chrono::SecondsFormat;
|
||||
use chrono::Utc;
|
||||
use codex_analytics::AnalyticsEventsClient;
|
||||
use codex_analytics::AnalyticsJsonRpcError;
|
||||
use codex_analytics::InputError;
|
||||
use codex_analytics::TurnSteerRequestError;
|
||||
use codex_app_server_protocol::Account;
|
||||
use codex_app_server_protocol::AccountLoginCompletedNotification;
|
||||
use codex_app_server_protocol::AccountUpdatedNotification;
|
||||
use codex_app_server_protocol::AddCreditsNudgeCreditType;
|
||||
use codex_app_server_protocol::AddCreditsNudgeEmailStatus;
|
||||
use codex_app_server_protocol::AppInfo;
|
||||
use codex_app_server_protocol::AppListUpdatedNotification;
|
||||
use codex_app_server_protocol::AppSummary;
|
||||
use codex_app_server_protocol::AppsListParams;
|
||||
use codex_app_server_protocol::AppsListResponse;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::CancelLoginAccountParams;
|
||||
use codex_app_server_protocol::CancelLoginAccountResponse;
|
||||
use codex_app_server_protocol::CancelLoginAccountStatus;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::CodexErrorInfo;
|
||||
use codex_app_server_protocol::CollaborationModeListParams;
|
||||
use codex_app_server_protocol::CollaborationModeListResponse;
|
||||
use codex_app_server_protocol::CommandExecParams;
|
||||
use codex_app_server_protocol::CommandExecResizeParams;
|
||||
use codex_app_server_protocol::CommandExecTerminateParams;
|
||||
use codex_app_server_protocol::CommandExecWriteParams;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::ConversationGitInfo;
|
||||
use codex_app_server_protocol::ConversationSummary;
|
||||
use codex_app_server_protocol::DynamicToolSpec as ApiDynamicToolSpec;
|
||||
use codex_app_server_protocol::ExperimentalFeature as ApiExperimentalFeature;
|
||||
use codex_app_server_protocol::ExperimentalFeatureListParams;
|
||||
use codex_app_server_protocol::ExperimentalFeatureListResponse;
|
||||
use codex_app_server_protocol::ExperimentalFeatureStage as ApiExperimentalFeatureStage;
|
||||
use codex_app_server_protocol::FeedbackUploadParams;
|
||||
use codex_app_server_protocol::FeedbackUploadResponse;
|
||||
use codex_app_server_protocol::GetAccountParams;
|
||||
use codex_app_server_protocol::GetAccountRateLimitsResponse;
|
||||
use codex_app_server_protocol::GetAccountResponse;
|
||||
use codex_app_server_protocol::GetAuthStatusParams;
|
||||
use codex_app_server_protocol::GetAuthStatusResponse;
|
||||
use codex_app_server_protocol::GetConversationSummaryParams;
|
||||
use codex_app_server_protocol::GetConversationSummaryResponse;
|
||||
use codex_app_server_protocol::GitDiffToRemoteParams;
|
||||
use codex_app_server_protocol::GitDiffToRemoteResponse;
|
||||
use codex_app_server_protocol::GitInfo as ApiGitInfo;
|
||||
use codex_app_server_protocol::HookMetadata;
|
||||
use codex_app_server_protocol::HooksListParams;
|
||||
use codex_app_server_protocol::HooksListResponse;
|
||||
use codex_app_server_protocol::InitializeParams;
|
||||
use codex_app_server_protocol::InitializeResponse;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::ListMcpServerStatusParams;
|
||||
use codex_app_server_protocol::ListMcpServerStatusResponse;
|
||||
use codex_app_server_protocol::LoginAccountParams;
|
||||
use codex_app_server_protocol::LoginAccountResponse;
|
||||
use codex_app_server_protocol::LoginApiKeyParams;
|
||||
use codex_app_server_protocol::LogoutAccountResponse;
|
||||
use codex_app_server_protocol::MarketplaceAddParams;
|
||||
use codex_app_server_protocol::MarketplaceAddResponse;
|
||||
use codex_app_server_protocol::MarketplaceInterface;
|
||||
use codex_app_server_protocol::MarketplaceRemoveParams;
|
||||
use codex_app_server_protocol::MarketplaceRemoveResponse;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeErrorInfo;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeParams;
|
||||
use codex_app_server_protocol::MarketplaceUpgradeResponse;
|
||||
use codex_app_server_protocol::McpResourceReadParams;
|
||||
use codex_app_server_protocol::McpResourceReadResponse;
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::McpServerOauthLoginParams;
|
||||
use codex_app_server_protocol::McpServerOauthLoginResponse;
|
||||
use codex_app_server_protocol::McpServerRefreshResponse;
|
||||
use codex_app_server_protocol::McpServerStatus;
|
||||
use codex_app_server_protocol::McpServerStatusDetail;
|
||||
use codex_app_server_protocol::McpServerToolCallParams;
|
||||
use codex_app_server_protocol::McpServerToolCallResponse;
|
||||
use codex_app_server_protocol::MemoryResetResponse;
|
||||
use codex_app_server_protocol::MockExperimentalMethodParams;
|
||||
use codex_app_server_protocol::MockExperimentalMethodResponse;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::PermissionProfileModificationParams;
|
||||
use codex_app_server_protocol::PermissionProfileSelectionParams;
|
||||
use codex_app_server_protocol::PluginDetail;
|
||||
use codex_app_server_protocol::PluginInstallParams;
|
||||
use codex_app_server_protocol::PluginInstallResponse;
|
||||
use codex_app_server_protocol::PluginInterface;
|
||||
use codex_app_server_protocol::PluginListParams;
|
||||
use codex_app_server_protocol::PluginListResponse;
|
||||
use codex_app_server_protocol::PluginMarketplaceEntry;
|
||||
use codex_app_server_protocol::PluginReadParams;
|
||||
use codex_app_server_protocol::PluginReadResponse;
|
||||
use codex_app_server_protocol::PluginShareDeleteParams;
|
||||
use codex_app_server_protocol::PluginShareDeleteResponse;
|
||||
use codex_app_server_protocol::PluginShareListItem;
|
||||
use codex_app_server_protocol::PluginShareListParams;
|
||||
use codex_app_server_protocol::PluginShareListResponse;
|
||||
use codex_app_server_protocol::PluginShareSaveParams;
|
||||
use codex_app_server_protocol::PluginShareSaveResponse;
|
||||
use codex_app_server_protocol::PluginSkillReadParams;
|
||||
use codex_app_server_protocol::PluginSkillReadResponse;
|
||||
use codex_app_server_protocol::PluginSource;
|
||||
use codex_app_server_protocol::PluginSummary;
|
||||
use codex_app_server_protocol::PluginUninstallParams;
|
||||
use codex_app_server_protocol::PluginUninstallResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ReviewDelivery as ApiReviewDelivery;
|
||||
use codex_app_server_protocol::ReviewStartParams;
|
||||
use codex_app_server_protocol::ReviewStartResponse;
|
||||
use codex_app_server_protocol::ReviewTarget as ApiReviewTarget;
|
||||
use codex_app_server_protocol::SandboxMode;
|
||||
use codex_app_server_protocol::SendAddCreditsNudgeEmailParams;
|
||||
use codex_app_server_protocol::SendAddCreditsNudgeEmailResponse;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequestResolvedNotification;
|
||||
use codex_app_server_protocol::SkillSummary;
|
||||
use codex_app_server_protocol::SkillsConfigWriteParams;
|
||||
use codex_app_server_protocol::SkillsConfigWriteResponse;
|
||||
use codex_app_server_protocol::SkillsListParams;
|
||||
use codex_app_server_protocol::SkillsListResponse;
|
||||
use codex_app_server_protocol::SortDirection;
|
||||
use codex_app_server_protocol::Thread;
|
||||
use codex_app_server_protocol::ThreadApproveGuardianDeniedActionParams;
|
||||
use codex_app_server_protocol::ThreadApproveGuardianDeniedActionResponse;
|
||||
use codex_app_server_protocol::ThreadArchiveParams;
|
||||
use codex_app_server_protocol::ThreadArchiveResponse;
|
||||
use codex_app_server_protocol::ThreadArchivedNotification;
|
||||
use codex_app_server_protocol::ThreadBackgroundTerminalsCleanParams;
|
||||
use codex_app_server_protocol::ThreadBackgroundTerminalsCleanResponse;
|
||||
use codex_app_server_protocol::ThreadClosedNotification;
|
||||
use codex_app_server_protocol::ThreadCompactStartParams;
|
||||
use codex_app_server_protocol::ThreadCompactStartResponse;
|
||||
use codex_app_server_protocol::ThreadDecrementElicitationParams;
|
||||
use codex_app_server_protocol::ThreadDecrementElicitationResponse;
|
||||
use codex_app_server_protocol::ThreadForkParams;
|
||||
use codex_app_server_protocol::ThreadForkResponse;
|
||||
use codex_app_server_protocol::ThreadGoal;
|
||||
use codex_app_server_protocol::ThreadGoalClearParams;
|
||||
use codex_app_server_protocol::ThreadGoalClearResponse;
|
||||
use codex_app_server_protocol::ThreadGoalClearedNotification;
|
||||
use codex_app_server_protocol::ThreadGoalGetParams;
|
||||
use codex_app_server_protocol::ThreadGoalGetResponse;
|
||||
use codex_app_server_protocol::ThreadGoalSetParams;
|
||||
use codex_app_server_protocol::ThreadGoalSetResponse;
|
||||
use codex_app_server_protocol::ThreadGoalStatus;
|
||||
use codex_app_server_protocol::ThreadGoalUpdatedNotification;
|
||||
use codex_app_server_protocol::ThreadHistoryBuilder;
|
||||
use codex_app_server_protocol::ThreadIncrementElicitationParams;
|
||||
use codex_app_server_protocol::ThreadIncrementElicitationResponse;
|
||||
use codex_app_server_protocol::ThreadInjectItemsParams;
|
||||
use codex_app_server_protocol::ThreadInjectItemsResponse;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadListCwdFilter;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
use codex_app_server_protocol::ThreadLoadedListParams;
|
||||
use codex_app_server_protocol::ThreadLoadedListResponse;
|
||||
use codex_app_server_protocol::ThreadMemoryModeSetParams;
|
||||
use codex_app_server_protocol::ThreadMemoryModeSetResponse;
|
||||
use codex_app_server_protocol::ThreadMetadataGitInfoUpdateParams;
|
||||
use codex_app_server_protocol::ThreadMetadataUpdateParams;
|
||||
use codex_app_server_protocol::ThreadMetadataUpdateResponse;
|
||||
use codex_app_server_protocol::ThreadNameUpdatedNotification;
|
||||
use codex_app_server_protocol::ThreadReadParams;
|
||||
use codex_app_server_protocol::ThreadReadResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendAudioParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendAudioResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendTextParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeAppendTextResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeListVoicesResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartResponse;
|
||||
use codex_app_server_protocol::ThreadRealtimeStartTransport;
|
||||
use codex_app_server_protocol::ThreadRealtimeStopParams;
|
||||
use codex_app_server_protocol::ThreadRealtimeStopResponse;
|
||||
use codex_app_server_protocol::ThreadResumeParams;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
use codex_app_server_protocol::ThreadRollbackParams;
|
||||
use codex_app_server_protocol::ThreadSetNameParams;
|
||||
use codex_app_server_protocol::ThreadSetNameResponse;
|
||||
use codex_app_server_protocol::ThreadShellCommandParams;
|
||||
use codex_app_server_protocol::ThreadShellCommandResponse;
|
||||
use codex_app_server_protocol::ThreadSortKey;
|
||||
use codex_app_server_protocol::ThreadSourceKind;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::ThreadStartedNotification;
|
||||
use codex_app_server_protocol::ThreadStatus;
|
||||
use codex_app_server_protocol::ThreadTurnsListParams;
|
||||
use codex_app_server_protocol::ThreadTurnsListResponse;
|
||||
use codex_app_server_protocol::ThreadUnarchiveParams;
|
||||
use codex_app_server_protocol::ThreadUnarchiveResponse;
|
||||
use codex_app_server_protocol::ThreadUnarchivedNotification;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeParams;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeResponse;
|
||||
use codex_app_server_protocol::ThreadUnsubscribeStatus;
|
||||
use codex_app_server_protocol::Turn;
|
||||
use codex_app_server_protocol::TurnEnvironmentParams;
|
||||
use codex_app_server_protocol::TurnError;
|
||||
use codex_app_server_protocol::TurnInterruptParams;
|
||||
use codex_app_server_protocol::TurnInterruptResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::TurnSteerParams;
|
||||
use codex_app_server_protocol::TurnSteerResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupCompletedNotification;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupMode;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartParams;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartResponse;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_backend_client::AddCreditsNudgeCreditType as BackendAddCreditsNudgeCreditType;
|
||||
use codex_backend_client::Client as BackendClient;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_chatgpt::workspace_settings;
|
||||
use codex_config::CloudRequirementsLoadError;
|
||||
use codex_config::CloudRequirementsLoadErrorCode;
|
||||
use codex_config::ConfigLayerStack;
|
||||
use codex_config::loader::project_trust_key;
|
||||
use codex_config::types::McpServerTransportConfig;
|
||||
use codex_core::CodexThread;
|
||||
use codex_core::CodexThreadTurnContextOverrides;
|
||||
use codex_core::ForkSnapshot;
|
||||
use codex_core::NewThread;
|
||||
use codex_core::RolloutRecorder;
|
||||
use codex_core::SessionMeta;
|
||||
use codex_core::StartThreadOptions;
|
||||
use codex_core::SteerInputError;
|
||||
use codex_core::ThreadConfigSnapshot;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::NetworkProxyAuditMetadata;
|
||||
use codex_core::config::edit::ConfigEdit;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::exec::ExecCapturePolicy;
|
||||
use codex_core::exec::ExecExpiration;
|
||||
use codex_core::exec::ExecParams;
|
||||
use codex_core::exec_env::create_env;
|
||||
use codex_core::find_archived_thread_path_by_id_str;
|
||||
use codex_core::find_thread_name_by_id;
|
||||
use codex_core::find_thread_path_by_id_str;
|
||||
use codex_core::path_utils;
|
||||
use codex_core::read_head_for_summary;
|
||||
use codex_core::sandboxing::SandboxPermissions;
|
||||
use codex_core::windows_sandbox::WindowsSandboxLevelExt;
|
||||
use codex_core::windows_sandbox::WindowsSandboxSetupMode as CoreWindowsSandboxSetupMode;
|
||||
use codex_core::windows_sandbox::WindowsSandboxSetupRequest;
|
||||
use codex_core_plugins::OPENAI_CURATED_MARKETPLACE_NAME;
|
||||
use codex_core_plugins::PluginInstallError as CorePluginInstallError;
|
||||
use codex_core_plugins::PluginInstallRequest;
|
||||
use codex_core_plugins::PluginLoadOutcome;
|
||||
use codex_core_plugins::PluginReadRequest;
|
||||
use codex_core_plugins::PluginUninstallError as CorePluginUninstallError;
|
||||
use codex_core_plugins::loader::load_plugin_apps;
|
||||
use codex_core_plugins::loader::load_plugin_mcp_servers;
|
||||
use codex_core_plugins::loader::plugin_telemetry_metadata_from_root;
|
||||
use codex_core_plugins::manifest::PluginManifestInterface;
|
||||
use codex_core_plugins::marketplace::MarketplaceError;
|
||||
use codex_core_plugins::marketplace::MarketplacePluginSource;
|
||||
use codex_core_plugins::marketplace_add::MarketplaceAddError;
|
||||
use codex_core_plugins::marketplace_add::MarketplaceAddRequest;
|
||||
use codex_core_plugins::marketplace_add::add_marketplace as add_marketplace_to_codex_home;
|
||||
use codex_core_plugins::marketplace_remove::MarketplaceRemoveError;
|
||||
use codex_core_plugins::marketplace_remove::MarketplaceRemoveRequest as CoreMarketplaceRemoveRequest;
|
||||
use codex_core_plugins::marketplace_remove::remove_marketplace;
|
||||
use codex_core_plugins::remote::RemoteMarketplace;
|
||||
use codex_core_plugins::remote::RemotePluginCatalogError;
|
||||
use codex_core_plugins::remote::RemotePluginDetail as RemoteCatalogPluginDetail;
|
||||
use codex_core_plugins::remote::RemotePluginServiceConfig;
|
||||
use codex_core_plugins::remote::RemotePluginShareSummary as RemoteCatalogPluginShareSummary;
|
||||
use codex_core_plugins::remote::RemotePluginSummary as RemoteCatalogPluginSummary;
|
||||
use codex_exec_server::EnvironmentManager;
|
||||
use codex_exec_server::LOCAL_FS;
|
||||
use codex_features::FEATURES;
|
||||
use codex_features::Feature;
|
||||
use codex_features::Stage;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_feedback::FeedbackAttachmentPath;
|
||||
use codex_feedback::FeedbackUploadOptions;
|
||||
use codex_git_utils::git_diff_to_remote;
|
||||
use codex_git_utils::resolve_root_git_project_for_trust;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::CLIENT_ID;
|
||||
use codex_login::CodexAuth;
|
||||
use codex_login::ServerOptions as LoginServerOptions;
|
||||
use codex_login::ShutdownHandle;
|
||||
use codex_login::auth::login_with_chatgpt_auth_tokens;
|
||||
use codex_login::complete_device_code_login;
|
||||
use codex_login::login_with_api_key;
|
||||
use codex_login::request_device_code;
|
||||
use codex_login::run_login_server;
|
||||
use codex_mcp::McpRuntimeEnvironment;
|
||||
use codex_mcp::McpServerStatusSnapshot;
|
||||
use codex_mcp::McpSnapshotDetail;
|
||||
use codex_mcp::collect_mcp_server_status_snapshot_with_detail;
|
||||
use codex_mcp::discover_supported_scopes;
|
||||
use codex_mcp::effective_mcp_servers;
|
||||
use codex_mcp::read_mcp_resource as read_mcp_resource_without_thread;
|
||||
use codex_mcp::resolve_oauth_scopes;
|
||||
use codex_memories_write::clear_memory_roots_contents;
|
||||
use codex_model_provider::ProviderAccountError;
|
||||
use codex_model_provider::create_model_provider;
|
||||
use codex_models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::CollaborationMode;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
use codex_protocol::config_types::Personality;
|
||||
use codex_protocol::config_types::TrustLevel;
|
||||
use codex_protocol::config_types::WindowsSandboxLevel;
|
||||
use codex_protocol::dynamic_tools::DynamicToolSpec as CoreDynamicToolSpec;
|
||||
use codex_protocol::error::CodexErr;
|
||||
use codex_protocol::error::Result as CodexResult;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::permissions::FileSystemSandboxPolicy;
|
||||
use codex_protocol::protocol::AgentStatus;
|
||||
use codex_protocol::protocol::ConversationAudioParams;
|
||||
use codex_protocol::protocol::ConversationStartParams;
|
||||
use codex_protocol::protocol::ConversationStartTransport;
|
||||
use codex_protocol::protocol::ConversationTextParams;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::McpAuthStatus as CoreMcpAuthStatus;
|
||||
use codex_protocol::protocol::McpServerRefreshConfig;
|
||||
use codex_protocol::protocol::Op;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RealtimeVoicesList;
|
||||
use codex_protocol::protocol::ResumedHistory;
|
||||
use codex_protocol::protocol::ReviewDelivery as CoreReviewDelivery;
|
||||
use codex_protocol::protocol::ReviewRequest;
|
||||
use codex_protocol::protocol::ReviewTarget as CoreReviewTarget;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
use codex_protocol::protocol::TurnEnvironmentSelection;
|
||||
use codex_protocol::protocol::USER_MESSAGE_BEGIN;
|
||||
use codex_protocol::protocol::W3cTraceContext;
|
||||
use codex_protocol::user_input::MAX_USER_INPUT_TEXT_CHARS;
|
||||
use codex_protocol::user_input::UserInput as CoreInputItem;
|
||||
use codex_rmcp_client::perform_oauth_login_return_url;
|
||||
use codex_rollout::EventPersistenceMode;
|
||||
use codex_rollout::is_persisted_rollout_item;
|
||||
use codex_rollout::state_db::StateDbHandle;
|
||||
use codex_rollout::state_db::reconcile_rollout;
|
||||
use codex_state::StateRuntime;
|
||||
use codex_state::ThreadMetadata;
|
||||
use codex_state::ThreadMetadataBuilder;
|
||||
use codex_state::log_db::LogDbLayer;
|
||||
use codex_thread_store::ArchiveThreadParams as StoreArchiveThreadParams;
|
||||
use codex_thread_store::ListThreadsParams as StoreListThreadsParams;
|
||||
use codex_thread_store::LocalThreadStore;
|
||||
use codex_thread_store::ReadThreadByRolloutPathParams as StoreReadThreadByRolloutPathParams;
|
||||
use codex_thread_store::ReadThreadParams as StoreReadThreadParams;
|
||||
use codex_thread_store::SortDirection as StoreSortDirection;
|
||||
use codex_thread_store::StoredThread;
|
||||
use codex_thread_store::ThreadMetadataPatch as StoreThreadMetadataPatch;
|
||||
use codex_thread_store::ThreadSortKey as StoreThreadSortKey;
|
||||
use codex_thread_store::ThreadStore;
|
||||
use codex_thread_store::ThreadStoreError;
|
||||
use codex_thread_store::UpdateThreadMetadataParams as StoreUpdateThreadMetadataParams;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_pty::DEFAULT_OUTPUT_BYTES_CAP;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::io::Error as IoError;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::result::Result;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::sync::SemaphorePermit;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::watch;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tokio_util::task::TaskTracker;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::Instrument;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(test)]
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
|
||||
mod account_processor;
|
||||
mod apps_processor;
|
||||
mod catalog_processor;
|
||||
mod command_exec_processor;
|
||||
mod config_processor;
|
||||
mod device_key_processor;
|
||||
mod external_agent_config_processor;
|
||||
mod feedback_processor;
|
||||
mod fs_processor;
|
||||
mod git_processor;
|
||||
mod initialize_processor;
|
||||
mod marketplace_processor;
|
||||
mod mcp_processor;
|
||||
mod plugins;
|
||||
mod search;
|
||||
mod thread_processor;
|
||||
mod token_usage_replay;
|
||||
mod turn_processor;
|
||||
mod windows_sandbox_processor;
|
||||
|
||||
pub(crate) use account_processor::AccountRequestProcessor;
|
||||
pub(crate) use apps_processor::AppsRequestProcessor;
|
||||
pub(crate) use catalog_processor::CatalogRequestProcessor;
|
||||
pub(crate) use command_exec_processor::CommandExecRequestProcessor;
|
||||
pub(crate) use config_processor::ConfigRequestProcessor;
|
||||
pub(crate) use device_key_processor::DeviceKeyRequestProcessor;
|
||||
pub(crate) use external_agent_config_processor::ExternalAgentConfigRequestProcessor;
|
||||
pub(crate) use feedback_processor::FeedbackRequestProcessor;
|
||||
pub(crate) use fs_processor::FsRequestProcessor;
|
||||
pub(crate) use git_processor::GitRequestProcessor;
|
||||
pub(crate) use initialize_processor::InitializeRequestProcessor;
|
||||
pub(crate) use marketplace_processor::MarketplaceRequestProcessor;
|
||||
pub(crate) use mcp_processor::McpRequestProcessor;
|
||||
pub(crate) use plugins::PluginRequestProcessor;
|
||||
pub(crate) use search::SearchRequestProcessor;
|
||||
pub(crate) use thread_goal_processor::ThreadGoalRequestProcessor;
|
||||
pub(crate) use thread_processor::ThreadRequestProcessor;
|
||||
pub(crate) use turn_processor::TurnRequestProcessor;
|
||||
pub(crate) use windows_sandbox_processor::WindowsSandboxRequestProcessor;
|
||||
|
||||
use crate::error_code::internal_error;
|
||||
use crate::error_code::invalid_request;
|
||||
use crate::filters::compute_source_filters;
|
||||
use crate::filters::source_kind_matches;
|
||||
use crate::thread_state::ThreadListenerCommand;
|
||||
use crate::thread_state::ThreadState;
|
||||
use crate::thread_state::ThreadStateManager;
|
||||
use token_usage_replay::latest_token_usage_turn_id_from_rollout_items;
|
||||
use token_usage_replay::send_thread_token_usage_update_to_connection;
|
||||
|
||||
mod config_errors;
|
||||
mod request_errors;
|
||||
mod thread_goal_processor;
|
||||
mod thread_lifecycle;
|
||||
mod thread_summary;
|
||||
|
||||
use self::config_errors::*;
|
||||
use self::request_errors::*;
|
||||
use self::thread_goal_processor::api_thread_goal_from_state;
|
||||
use self::thread_lifecycle::*;
|
||||
use self::thread_summary::*;
|
||||
|
||||
pub(crate) use self::thread_summary::read_rollout_items_from_rollout;
|
||||
pub(crate) use self::thread_summary::read_summary_from_rollout;
|
||||
pub(crate) use self::thread_summary::summary_to_thread;
|
||||
|
||||
pub(crate) fn build_api_turns_from_rollout_items(items: &[RolloutItem]) -> Vec<Turn> {
|
||||
let mut builder = ThreadHistoryBuilder::new();
|
||||
for item in items {
|
||||
if is_persisted_rollout_item(item, EventPersistenceMode::Limited) {
|
||||
builder.handle_rollout_item(item);
|
||||
}
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
1002
codex-rs/app-server/src/request_processors/account_processor.rs
Normal file
1002
codex-rs/app-server/src/request_processors/account_processor.rs
Normal file
File diff suppressed because it is too large
Load Diff
350
codex-rs/app-server/src/request_processors/apps_processor.rs
Normal file
350
codex-rs/app-server/src/request_processors/apps_processor.rs
Normal file
@@ -0,0 +1,350 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AppsRequestProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
impl AppsRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn apps_list(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.apps_list_inner(request_id, params)
|
||||
.await
|
||||
.map(|response| response.map(Into::into))
|
||||
}
|
||||
|
||||
async fn apps_list_inner(
|
||||
&self,
|
||||
request_id: &ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
) -> Result<Option<AppsListResponse>, JSONRPCErrorError> {
|
||||
let mut config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
|
||||
if let Some(thread_id) = params.thread_id.as_deref() {
|
||||
let (_, thread) = self.load_thread(thread_id).await?;
|
||||
|
||||
let _ = config
|
||||
.features
|
||||
.set_enabled(Feature::Apps, thread.enabled(Feature::Apps));
|
||||
}
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
if !config
|
||||
.features
|
||||
.apps_enabled_for_auth(auth.as_ref().is_some_and(CodexAuth::uses_codex_backend))
|
||||
{
|
||||
return Ok(Some(AppsListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
}));
|
||||
}
|
||||
|
||||
if !self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await
|
||||
{
|
||||
return Ok(Some(AppsListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
}));
|
||||
}
|
||||
|
||||
let request = request_id.clone();
|
||||
let outgoing = Arc::clone(&self.outgoing);
|
||||
let environment_manager = self.thread_manager.environment_manager();
|
||||
tokio::spawn(async move {
|
||||
Self::apps_list_task(outgoing, request, params, config, environment_manager).await;
|
||||
});
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn apps_list_task(
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
request_id: ConnectionRequestId,
|
||||
params: AppsListParams,
|
||||
config: Config,
|
||||
environment_manager: Arc<EnvironmentManager>,
|
||||
) {
|
||||
let result = Self::apps_list_response(&outgoing, params, config, environment_manager).await;
|
||||
outgoing.send_result(request_id, result).await;
|
||||
}
|
||||
|
||||
async fn apps_list_response(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
params: AppsListParams,
|
||||
config: Config,
|
||||
environment_manager: Arc<EnvironmentManager>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let AppsListParams {
|
||||
cursor,
|
||||
limit,
|
||||
thread_id: _,
|
||||
force_refetch,
|
||||
} = params;
|
||||
let start = match cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
let (mut accessible_connectors, mut all_connectors) = tokio::join!(
|
||||
connectors::list_cached_accessible_connectors_from_mcp_tools(&config),
|
||||
connectors::list_cached_all_connectors(&config)
|
||||
);
|
||||
let cached_all_connectors = all_connectors.clone();
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
|
||||
let accessible_config = config.clone();
|
||||
let accessible_tx = tx.clone();
|
||||
tokio::spawn(async move {
|
||||
let result =
|
||||
connectors::list_accessible_connectors_from_mcp_tools_with_environment_manager(
|
||||
&accessible_config,
|
||||
force_refetch,
|
||||
&environment_manager,
|
||||
)
|
||||
.await
|
||||
.map(|status| status.connectors)
|
||||
.map_err(|err| format!("failed to load accessible apps: {err}"));
|
||||
let _ = accessible_tx.send(AppListLoadResult::Accessible(result));
|
||||
});
|
||||
|
||||
let all_config = config.clone();
|
||||
tokio::spawn(async move {
|
||||
let result = connectors::list_all_connectors_with_options(&all_config, force_refetch)
|
||||
.await
|
||||
.map_err(|err| format!("failed to list apps: {err}"));
|
||||
let _ = tx.send(AppListLoadResult::Directory(result));
|
||||
});
|
||||
|
||||
let app_list_deadline = tokio::time::Instant::now() + APP_LIST_LOAD_TIMEOUT;
|
||||
let mut accessible_loaded = false;
|
||||
let mut all_loaded = false;
|
||||
let mut last_notified_apps = None;
|
||||
|
||||
if accessible_connectors.is_some() || all_connectors.is_some() {
|
||||
let merged = connectors::with_app_enabled_state(
|
||||
merge_loaded_apps(all_connectors.as_deref(), accessible_connectors.as_deref()),
|
||||
&config,
|
||||
);
|
||||
if should_send_app_list_updated_notification(
|
||||
merged.as_slice(),
|
||||
accessible_loaded,
|
||||
all_loaded,
|
||||
) {
|
||||
send_app_list_updated_notification(outgoing, merged.clone()).await;
|
||||
last_notified_apps = Some(merged);
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
let result = match tokio::time::timeout_at(app_list_deadline, rx.recv()).await {
|
||||
Ok(Some(result)) => result,
|
||||
Ok(None) => {
|
||||
return Err(internal_error("failed to load app lists"));
|
||||
}
|
||||
Err(_) => {
|
||||
let timeout_seconds = APP_LIST_LOAD_TIMEOUT.as_secs();
|
||||
return Err(internal_error(format!(
|
||||
"timed out waiting for app lists after {timeout_seconds} seconds"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
AppListLoadResult::Accessible(Ok(connectors)) => {
|
||||
accessible_connectors = Some(connectors);
|
||||
accessible_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Accessible(Err(err)) => {
|
||||
return Err(internal_error(err));
|
||||
}
|
||||
AppListLoadResult::Directory(Ok(connectors)) => {
|
||||
all_connectors = Some(connectors);
|
||||
all_loaded = true;
|
||||
}
|
||||
AppListLoadResult::Directory(Err(err)) => {
|
||||
return Err(internal_error(err));
|
||||
}
|
||||
}
|
||||
|
||||
let showing_interim_force_refetch = force_refetch && !(accessible_loaded && all_loaded);
|
||||
let all_connectors_for_update =
|
||||
if showing_interim_force_refetch && cached_all_connectors.is_some() {
|
||||
cached_all_connectors.as_deref()
|
||||
} else {
|
||||
all_connectors.as_deref()
|
||||
};
|
||||
let accessible_connectors_for_update =
|
||||
if showing_interim_force_refetch && !accessible_loaded {
|
||||
None
|
||||
} else {
|
||||
accessible_connectors.as_deref()
|
||||
};
|
||||
let merged = connectors::with_app_enabled_state(
|
||||
merge_loaded_apps(all_connectors_for_update, accessible_connectors_for_update),
|
||||
&config,
|
||||
);
|
||||
if should_send_app_list_updated_notification(
|
||||
merged.as_slice(),
|
||||
accessible_loaded,
|
||||
all_loaded,
|
||||
) && last_notified_apps.as_ref() != Some(&merged)
|
||||
{
|
||||
send_app_list_updated_notification(outgoing, merged.clone()).await;
|
||||
last_notified_apps = Some(merged.clone());
|
||||
}
|
||||
|
||||
if accessible_loaded && all_loaded {
|
||||
return paginate_apps(merged.as_slice(), start, limit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_thread(
|
||||
&self,
|
||||
thread_id: &str,
|
||||
) -> Result<(ThreadId, Arc<CodexThread>), JSONRPCErrorError> {
|
||||
let thread_id = ThreadId::from_string(thread_id).map_err(|err| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("invalid thread id: {err}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
let thread = self
|
||||
.thread_manager
|
||||
.get_thread(thread_id)
|
||||
.await
|
||||
.map_err(|_| JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("thread not found: {thread_id}"),
|
||||
data: None,
|
||||
})?;
|
||||
|
||||
Ok((thread_id, thread))
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const APP_LIST_LOAD_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
|
||||
enum AppListLoadResult {
|
||||
Accessible(Result<Vec<AppInfo>, String>),
|
||||
Directory(Result<Vec<AppInfo>, String>),
|
||||
}
|
||||
|
||||
fn merge_loaded_apps(
|
||||
all_connectors: Option<&[AppInfo]>,
|
||||
accessible_connectors: Option<&[AppInfo]>,
|
||||
) -> Vec<AppInfo> {
|
||||
let all_connectors_loaded = all_connectors.is_some();
|
||||
let all = all_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
let accessible = accessible_connectors.map_or_else(Vec::new, <[AppInfo]>::to_vec);
|
||||
connectors::merge_connectors_with_accessible(all, accessible, all_connectors_loaded)
|
||||
}
|
||||
|
||||
fn should_send_app_list_updated_notification(
|
||||
connectors: &[AppInfo],
|
||||
accessible_loaded: bool,
|
||||
all_loaded: bool,
|
||||
) -> bool {
|
||||
connectors.iter().any(|connector| connector.is_accessible) || (accessible_loaded && all_loaded)
|
||||
}
|
||||
|
||||
fn paginate_apps(
|
||||
connectors: &[AppInfo],
|
||||
start: usize,
|
||||
limit: Option<u32>,
|
||||
) -> Result<AppsListResponse, JSONRPCErrorError> {
|
||||
let total = connectors.len();
|
||||
if start > total {
|
||||
return Err(JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("cursor {start} exceeds total apps {total}"),
|
||||
data: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = connectors[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(AppsListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn send_app_list_updated_notification(
|
||||
outgoing: &Arc<OutgoingMessageSender>,
|
||||
data: Vec<AppInfo>,
|
||||
) {
|
||||
outgoing
|
||||
.send_server_notification(ServerNotification::AppListUpdated(
|
||||
AppListUpdatedNotification { data },
|
||||
))
|
||||
.await;
|
||||
}
|
||||
600
codex-rs/app-server/src/request_processors/catalog_processor.rs
Normal file
600
codex-rs/app-server/src/request_processors/catalog_processor.rs
Normal file
@@ -0,0 +1,600 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CatalogRequestProcessor {
|
||||
pub(super) auth_manager: Arc<AuthManager>,
|
||||
pub(super) thread_manager: Arc<ThreadManager>,
|
||||
pub(super) config: Arc<Config>,
|
||||
pub(super) config_manager: ConfigManager,
|
||||
pub(super) workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
}
|
||||
|
||||
fn skills_to_info(
|
||||
skills: &[codex_core::skills::SkillMetadata],
|
||||
disabled_paths: &HashSet<AbsolutePathBuf>,
|
||||
) -> Vec<codex_app_server_protocol::SkillMetadata> {
|
||||
skills
|
||||
.iter()
|
||||
.map(|skill| {
|
||||
let enabled = !disabled_paths.contains(&skill.path_to_skills_md);
|
||||
codex_app_server_protocol::SkillMetadata {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
short_description: skill.short_description.clone(),
|
||||
interface: skill.interface.clone().map(|interface| {
|
||||
codex_app_server_protocol::SkillInterface {
|
||||
display_name: interface.display_name,
|
||||
short_description: interface.short_description,
|
||||
icon_small: interface.icon_small,
|
||||
icon_large: interface.icon_large,
|
||||
brand_color: interface.brand_color,
|
||||
default_prompt: interface.default_prompt,
|
||||
}
|
||||
}),
|
||||
dependencies: skill.dependencies.clone().map(|dependencies| {
|
||||
codex_app_server_protocol::SkillDependencies {
|
||||
tools: dependencies
|
||||
.tools
|
||||
.into_iter()
|
||||
.map(|tool| codex_app_server_protocol::SkillToolDependency {
|
||||
r#type: tool.r#type,
|
||||
value: tool.value,
|
||||
description: tool.description,
|
||||
transport: tool.transport,
|
||||
command: tool.command,
|
||||
url: tool.url,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}),
|
||||
path: skill.path_to_skills_md.clone(),
|
||||
scope: skill.scope.into(),
|
||||
enabled,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn hooks_to_info(hooks: &[codex_hooks::HookListEntry]) -> Vec<HookMetadata> {
|
||||
hooks
|
||||
.iter()
|
||||
.map(|hook| HookMetadata {
|
||||
key: hook.key.clone(),
|
||||
event_name: hook.event_name.into(),
|
||||
handler_type: hook.handler_type.into(),
|
||||
matcher: hook.matcher.clone(),
|
||||
command: hook.command.clone(),
|
||||
timeout_sec: hook.timeout_sec,
|
||||
status_message: hook.status_message.clone(),
|
||||
source_path: hook.source_path.clone(),
|
||||
source: hook.source.into(),
|
||||
plugin_id: hook.plugin_id.clone(),
|
||||
display_order: hook.display_order,
|
||||
enabled: hook.enabled,
|
||||
is_managed: hook.is_managed,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn errors_to_info(
|
||||
errors: &[codex_core::skills::SkillError],
|
||||
) -> Vec<codex_app_server_protocol::SkillErrorInfo> {
|
||||
errors
|
||||
.iter()
|
||||
.map(|err| codex_app_server_protocol::SkillErrorInfo {
|
||||
path: err.path.to_path_buf(),
|
||||
message: err.message.clone(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
impl CatalogRequestProcessor {
|
||||
pub(crate) fn new(
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
config: Arc<Config>,
|
||||
config_manager: ConfigManager,
|
||||
workspace_settings_cache: Arc<workspace_settings::WorkspaceSettingsCache>,
|
||||
) -> Self {
|
||||
Self {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
config,
|
||||
config_manager,
|
||||
workspace_settings_cache,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn skills_list(
|
||||
&self,
|
||||
params: SkillsListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.skills_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn hooks_list(
|
||||
&self,
|
||||
params: HooksListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.hooks_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn skills_config_write(
|
||||
&self,
|
||||
params: SkillsConfigWriteParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.skills_config_write_response_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn model_list(
|
||||
&self,
|
||||
params: ModelListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
Self::list_models(self.thread_manager.clone(), params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn experimental_feature_list(
|
||||
&self,
|
||||
params: ExperimentalFeatureListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.experimental_feature_list_response(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn collaboration_mode_list(
|
||||
&self,
|
||||
params: CollaborationModeListParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
Self::list_collaboration_modes(self.thread_manager.clone(), params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
pub(crate) async fn mock_experimental_method(
|
||||
&self,
|
||||
params: MockExperimentalMethodParams,
|
||||
) -> Result<Option<ClientResponsePayload>, JSONRPCErrorError> {
|
||||
self.mock_experimental_method_inner(params)
|
||||
.await
|
||||
.map(|response| Some(response.into()))
|
||||
}
|
||||
|
||||
async fn resolve_cwd_config(
|
||||
&self,
|
||||
cwd: &Path,
|
||||
) -> Result<(AbsolutePathBuf, ConfigLayerStack), String> {
|
||||
let cwd_abs =
|
||||
AbsolutePathBuf::relative_to_current_dir(cwd).map_err(|err| err.to_string())?;
|
||||
let config_layer_stack = self
|
||||
.config_manager
|
||||
.load_config_layers_for_cwd(cwd_abs.clone())
|
||||
.await
|
||||
.map_err(|err| err.to_string())?;
|
||||
|
||||
Ok((cwd_abs, config_layer_stack))
|
||||
}
|
||||
|
||||
async fn load_latest_config(
|
||||
&self,
|
||||
fallback_cwd: Option<PathBuf>,
|
||||
) -> Result<Config, JSONRPCErrorError> {
|
||||
self.config_manager
|
||||
.load_latest_config(fallback_cwd)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn workspace_codex_plugins_enabled(
|
||||
&self,
|
||||
config: &Config,
|
||||
auth: Option<&CodexAuth>,
|
||||
) -> bool {
|
||||
match workspace_settings::codex_plugins_enabled_for_workspace(
|
||||
config,
|
||||
auth,
|
||||
Some(&self.workspace_settings_cache),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(enabled) => enabled,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"failed to fetch workspace Codex plugins setting; allowing Codex plugins: {err:#}"
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_models(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
params: ModelListParams,
|
||||
) -> Result<ModelListResponse, JSONRPCErrorError> {
|
||||
let ModelListParams {
|
||||
limit,
|
||||
cursor,
|
||||
include_hidden,
|
||||
} = params;
|
||||
let models = supported_models(thread_manager, include_hidden.unwrap_or(false)).await;
|
||||
let total = models.len();
|
||||
|
||||
if total == 0 {
|
||||
return Ok(ModelListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
});
|
||||
}
|
||||
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = effective_limit.min(total);
|
||||
let start = match cursor {
|
||||
Some(cursor) => cursor
|
||||
.parse::<usize>()
|
||||
.map_err(|_| invalid_request(format!("invalid cursor: {cursor}")))?,
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total models {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let items = models[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(ModelListResponse {
|
||||
data: items,
|
||||
next_cursor,
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_collaboration_modes(
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
params: CollaborationModeListParams,
|
||||
) -> Result<CollaborationModeListResponse, JSONRPCErrorError> {
|
||||
let CollaborationModeListParams {} = params;
|
||||
let items = thread_manager
|
||||
.list_collaboration_modes()
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
let response = CollaborationModeListResponse { data: items };
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn experimental_feature_list_response(
|
||||
&self,
|
||||
params: ExperimentalFeatureListParams,
|
||||
) -> Result<ExperimentalFeatureListResponse, JSONRPCErrorError> {
|
||||
let ExperimentalFeatureListParams { cursor, limit } = params;
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
|
||||
let data = FEATURES
|
||||
.iter()
|
||||
.map(|spec| {
|
||||
let (stage, display_name, description, announcement) = match spec.stage {
|
||||
Stage::Experimental {
|
||||
name,
|
||||
menu_description,
|
||||
announcement,
|
||||
} => (
|
||||
ApiExperimentalFeatureStage::Beta,
|
||||
Some(name.to_string()),
|
||||
Some(menu_description.to_string()),
|
||||
Some(announcement.to_string()),
|
||||
),
|
||||
Stage::UnderDevelopment => (
|
||||
ApiExperimentalFeatureStage::UnderDevelopment,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
Stage::Stable => (ApiExperimentalFeatureStage::Stable, None, None, None),
|
||||
Stage::Deprecated => {
|
||||
(ApiExperimentalFeatureStage::Deprecated, None, None, None)
|
||||
}
|
||||
Stage::Removed => (ApiExperimentalFeatureStage::Removed, None, None, None),
|
||||
};
|
||||
|
||||
ApiExperimentalFeature {
|
||||
name: spec.key.to_string(),
|
||||
stage,
|
||||
display_name,
|
||||
description,
|
||||
announcement,
|
||||
enabled: config.features.enabled(spec.id)
|
||||
&& (workspace_codex_plugins_enabled
|
||||
|| !matches!(spec.id, Feature::Apps | Feature::Plugins)),
|
||||
default_enabled: spec.default_enabled,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let total = data.len();
|
||||
if total == 0 {
|
||||
return Ok(ExperimentalFeatureListResponse {
|
||||
data: Vec::new(),
|
||||
next_cursor: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Clamp to 1 so limit=0 cannot return a non-advancing page.
|
||||
let effective_limit = limit.unwrap_or(total as u32).max(1) as usize;
|
||||
let effective_limit = effective_limit.min(total);
|
||||
let start = match cursor {
|
||||
Some(cursor) => match cursor.parse::<usize>() {
|
||||
Ok(idx) => idx,
|
||||
Err(_) => return Err(invalid_request(format!("invalid cursor: {cursor}"))),
|
||||
},
|
||||
None => 0,
|
||||
};
|
||||
|
||||
if start > total {
|
||||
return Err(invalid_request(format!(
|
||||
"cursor {start} exceeds total feature flags {total}"
|
||||
)));
|
||||
}
|
||||
|
||||
let end = start.saturating_add(effective_limit).min(total);
|
||||
let data = data[start..end].to_vec();
|
||||
let next_cursor = if end < total {
|
||||
Some(end.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ExperimentalFeatureListResponse { data, next_cursor })
|
||||
}
|
||||
|
||||
async fn mock_experimental_method_inner(
|
||||
&self,
|
||||
params: MockExperimentalMethodParams,
|
||||
) -> Result<MockExperimentalMethodResponse, JSONRPCErrorError> {
|
||||
let MockExperimentalMethodParams { value } = params;
|
||||
let response = MockExperimentalMethodResponse { echoed: value };
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn skills_list_response(
|
||||
&self,
|
||||
params: SkillsListParams,
|
||||
) -> Result<SkillsListResponse, JSONRPCErrorError> {
|
||||
let SkillsListParams {
|
||||
cwds,
|
||||
force_reload,
|
||||
per_cwd_extra_user_roots,
|
||||
} = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.to_path_buf()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
let cwd_set: HashSet<PathBuf> = cwds.iter().cloned().collect();
|
||||
|
||||
let mut extra_roots_by_cwd: HashMap<PathBuf, Vec<AbsolutePathBuf>> = HashMap::new();
|
||||
for entry in per_cwd_extra_user_roots.unwrap_or_default() {
|
||||
if !cwd_set.contains(&entry.cwd) {
|
||||
warn!(
|
||||
cwd = %entry.cwd.display(),
|
||||
"ignoring per-cwd extra roots for cwd not present in skills/list cwds"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut valid_extra_roots = Vec::new();
|
||||
for root in entry.extra_user_roots {
|
||||
let root =
|
||||
AbsolutePathBuf::from_absolute_path_checked(root.as_path()).map_err(|_| {
|
||||
invalid_request(format!(
|
||||
"skills/list perCwdExtraUserRoots extraUserRoots paths must be absolute: {}",
|
||||
root.display()
|
||||
))
|
||||
})?;
|
||||
valid_extra_roots.push(root);
|
||||
}
|
||||
extra_roots_by_cwd
|
||||
.entry(entry.cwd)
|
||||
.or_default()
|
||||
.extend(valid_extra_roots);
|
||||
}
|
||||
|
||||
let config = self.load_latest_config(/*fallback_cwd*/ None).await?;
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
let skills_manager = self.thread_manager.skills_manager();
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let fs = self
|
||||
.thread_manager
|
||||
.environment_manager()
|
||||
.default_environment()
|
||||
.map(|environment| environment.get_filesystem());
|
||||
let mut data = Vec::new();
|
||||
for cwd in cwds {
|
||||
let (cwd_abs, config_layer_stack) = match self.resolve_cwd_config(&cwd).await {
|
||||
Ok(resolved) => resolved,
|
||||
Err(message) => {
|
||||
let error_path = cwd.clone();
|
||||
data.push(codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills: Vec::new(),
|
||||
errors: vec![codex_app_server_protocol::SkillErrorInfo {
|
||||
path: error_path,
|
||||
message,
|
||||
}],
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let extra_roots = extra_roots_by_cwd
|
||||
.get(&cwd)
|
||||
.map_or(&[][..], std::vec::Vec::as_slice);
|
||||
let effective_skill_roots = if workspace_codex_plugins_enabled {
|
||||
let plugins_input = config.plugins_config_input();
|
||||
plugins_manager
|
||||
.effective_skill_roots_for_layer_stack(&config_layer_stack, &plugins_input)
|
||||
.await
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let skills_input = codex_core::skills::SkillsLoadInput::new(
|
||||
cwd_abs.clone(),
|
||||
effective_skill_roots,
|
||||
config_layer_stack,
|
||||
config.bundled_skills_enabled(),
|
||||
);
|
||||
let outcome = skills_manager
|
||||
.skills_for_cwd_with_extra_user_roots(
|
||||
&skills_input,
|
||||
force_reload,
|
||||
extra_roots,
|
||||
fs.clone(),
|
||||
)
|
||||
.await;
|
||||
let errors = errors_to_info(&outcome.errors);
|
||||
let skills = skills_to_info(&outcome.skills, &outcome.disabled_paths);
|
||||
data.push(codex_app_server_protocol::SkillsListEntry {
|
||||
cwd,
|
||||
skills,
|
||||
errors,
|
||||
});
|
||||
}
|
||||
Ok(SkillsListResponse { data })
|
||||
}
|
||||
|
||||
/// Handle `hooks/list` by resolving hooks for each requested cwd.
|
||||
async fn hooks_list_response(
|
||||
&self,
|
||||
params: HooksListParams,
|
||||
) -> Result<HooksListResponse, JSONRPCErrorError> {
|
||||
let HooksListParams { cwds } = params;
|
||||
let cwds = if cwds.is_empty() {
|
||||
vec![self.config.cwd.to_path_buf()]
|
||||
} else {
|
||||
cwds
|
||||
};
|
||||
|
||||
let auth = self.auth_manager.auth().await;
|
||||
let plugins_manager = self.thread_manager.plugins_manager();
|
||||
let mut data = Vec::new();
|
||||
for cwd in cwds {
|
||||
let config = match self
|
||||
.config_manager
|
||||
.load_for_cwd(
|
||||
/*request_overrides*/ None,
|
||||
ConfigOverrides::default(),
|
||||
Some(cwd.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
let error_path = cwd.clone();
|
||||
data.push(codex_app_server_protocol::HooksListEntry {
|
||||
cwd,
|
||||
hooks: Vec::new(),
|
||||
warnings: Vec::new(),
|
||||
errors: vec![codex_app_server_protocol::HookErrorInfo {
|
||||
path: error_path,
|
||||
message: err.to_string(),
|
||||
}],
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let workspace_codex_plugins_enabled = self
|
||||
.workspace_codex_plugins_enabled(&config, auth.as_ref())
|
||||
.await;
|
||||
let plugins_enabled =
|
||||
config.features.enabled(Feature::Plugins) && workspace_codex_plugins_enabled;
|
||||
let plugin_outcome = if plugins_enabled && config.features.enabled(Feature::PluginHooks)
|
||||
{
|
||||
let plugins_input = config.plugins_config_input();
|
||||
plugins_manager
|
||||
.plugins_for_layer_stack(
|
||||
&config.config_layer_stack,
|
||||
&plugins_input,
|
||||
/*plugin_hooks_feature_enabled*/ true,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
PluginLoadOutcome::default()
|
||||
};
|
||||
let hooks = codex_hooks::list_hooks(codex_hooks::HooksConfig {
|
||||
feature_enabled: config.features.enabled(Feature::CodexHooks),
|
||||
config_layer_stack: Some(config.config_layer_stack),
|
||||
plugin_hook_sources: plugin_outcome.effective_plugin_hook_sources(),
|
||||
plugin_hook_load_warnings: plugin_outcome.effective_plugin_hook_warnings(),
|
||||
..Default::default()
|
||||
});
|
||||
data.push(codex_app_server_protocol::HooksListEntry {
|
||||
cwd,
|
||||
hooks: hooks_to_info(&hooks.hooks),
|
||||
warnings: hooks.warnings,
|
||||
errors: Vec::new(),
|
||||
});
|
||||
}
|
||||
Ok(HooksListResponse { data })
|
||||
}
|
||||
|
||||
async fn skills_config_write_response_inner(
|
||||
&self,
|
||||
params: SkillsConfigWriteParams,
|
||||
) -> Result<SkillsConfigWriteResponse, JSONRPCErrorError> {
|
||||
let SkillsConfigWriteParams {
|
||||
path,
|
||||
name,
|
||||
enabled,
|
||||
} = params;
|
||||
let edit = match (path, name) {
|
||||
(Some(path), None) => ConfigEdit::SetSkillConfig {
|
||||
path: path.into_path_buf(),
|
||||
enabled,
|
||||
},
|
||||
(None, Some(name)) if !name.trim().is_empty() => {
|
||||
ConfigEdit::SetSkillConfigByName { name, enabled }
|
||||
}
|
||||
_ => {
|
||||
return Err(invalid_params(
|
||||
"skills/config/write requires exactly one of path or name",
|
||||
));
|
||||
}
|
||||
};
|
||||
let edits = vec![edit];
|
||||
ConfigEditsBuilder::new(&self.config.codex_home)
|
||||
.with_edits(edits)
|
||||
.apply()
|
||||
.await
|
||||
.map(|()| {
|
||||
self.thread_manager.plugins_manager().clear_cache();
|
||||
self.thread_manager.skills_manager().clear_cache();
|
||||
SkillsConfigWriteResponse {
|
||||
effective_enabled: enabled,
|
||||
}
|
||||
})
|
||||
.map_err(|err| internal_error(format!("failed to update skill settings: {err}")))
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user