mirror of
https://github.com/openai/codex.git
synced 2026-05-08 21:32:33 +00:00
Compare commits
140 Commits
pr20386
...
ruslan/app
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
25a5dc8a58 | ||
|
|
a1947079e3 | ||
|
|
c46f532ed6 | ||
|
|
b02d6996a0 | ||
|
|
8d28f78d92 | ||
|
|
82fa3864f0 | ||
|
|
8007522ede | ||
|
|
fc4b4d4836 | ||
|
|
1710cd3614 | ||
|
|
2200b1b3a6 | ||
|
|
f94b29ff42 | ||
|
|
f1722640d9 | ||
|
|
8f81ecd7dc | ||
|
|
5c53058c4c | ||
|
|
44167d87a6 | ||
|
|
6ef5841772 | ||
|
|
182cd68400 | ||
|
|
8e745718b1 | ||
|
|
7125f7bcb2 | ||
|
|
59b3d20ca7 | ||
|
|
854c39fb48 | ||
|
|
4950e7d8a6 | ||
|
|
a8db4af5c3 | ||
|
|
aee1fe2659 | ||
|
|
e7e6267ab3 | ||
|
|
36912ce3de | ||
|
|
30de54da36 | ||
|
|
87d2235b54 | ||
|
|
a6599b8202 | ||
|
|
229b40aa21 | ||
|
|
8126af3879 | ||
|
|
b9e8df47da | ||
|
|
48402be6fa | ||
|
|
cc16995cc6 | ||
|
|
c2fed01550 | ||
|
|
4d201e340e | ||
|
|
0035d7bd18 | ||
|
|
5d5500650b | ||
|
|
905987c08f | ||
|
|
5c1ec8f4fd | ||
|
|
94800ecbbf | ||
|
|
5b80f87c97 | ||
|
|
541e99cf09 | ||
|
|
1b900bee8a | ||
|
|
83a4e3b66b | ||
|
|
e3451ce6be | ||
|
|
4fd7dfe223 | ||
|
|
f20f8a719e | ||
|
|
161541310f | ||
|
|
33b19bcfde | ||
|
|
12a729f2b2 | ||
|
|
f072119ccf | ||
|
|
3c2dcbef85 | ||
|
|
2f5c06a29c | ||
|
|
8ba294ea13 | ||
|
|
5512b23c95 | ||
|
|
0269a46ab1 | ||
|
|
554223ab80 | ||
|
|
29352569b3 | ||
|
|
5730615e75 | ||
|
|
6b6581ac59 | ||
|
|
019755d570 | ||
|
|
d927f61208 | ||
|
|
d013155f40 | ||
|
|
f48b777717 | ||
|
|
c8c30d9d75 | ||
|
|
9ddfda9db7 | ||
|
|
67849d950d | ||
|
|
39555036a3 | ||
|
|
35aaa5d9fc | ||
|
|
f88701f5c8 | ||
|
|
127434cd8b | ||
|
|
9e905528bb | ||
|
|
cd2760fc08 | ||
|
|
466798aa83 | ||
|
|
a5fbcf1ab4 | ||
|
|
2952beb009 | ||
|
|
d55479488e | ||
|
|
443f6b831e | ||
|
|
aed74e5ee4 | ||
|
|
610eefb86b | ||
|
|
2817866a32 | ||
|
|
ff66b3c7eb | ||
|
|
be71b6fcd1 | ||
|
|
e4d6675632 | ||
|
|
78baa20780 | ||
|
|
9b8d585075 | ||
|
|
6784db51c0 | ||
|
|
41e171fcf2 | ||
|
|
5744b85b9a | ||
|
|
3d1d164aee | ||
|
|
227bee0445 | ||
|
|
f476338f93 | ||
|
|
0b04d1b3cc | ||
|
|
ff27d01676 | ||
|
|
70fc55b8f3 | ||
|
|
97aae46800 | ||
|
|
ad404c8400 | ||
|
|
48791920a8 | ||
|
|
96d2ea9058 | ||
|
|
a62b52f826 | ||
|
|
a93c89f497 | ||
|
|
d898cc8f3f | ||
|
|
fe05acad23 | ||
|
|
f50c02d7bc | ||
|
|
bb60b78c46 | ||
|
|
c39824c2fd | ||
|
|
6b1b227804 | ||
|
|
972b819213 | ||
|
|
af089fb21d | ||
|
|
4f96001fa7 | ||
|
|
0d9a5d20ec | ||
|
|
5affb7f9d5 | ||
|
|
acdf908268 | ||
|
|
b6f81257f8 | ||
|
|
a5ebedef67 | ||
|
|
5de7992ee5 | ||
|
|
2686873e77 | ||
|
|
9ddb267e9c | ||
|
|
6014b6679f | ||
|
|
8426edf71e | ||
|
|
7b3de63041 | ||
|
|
127be0612c | ||
|
|
9121132c8f | ||
|
|
70090c9ff7 | ||
|
|
8121710ffe | ||
|
|
7dd08e304c | ||
|
|
06f3b4836a | ||
|
|
31f8813e3e | ||
|
|
93d53f655b | ||
|
|
719431da6e | ||
|
|
b52083146c | ||
|
|
f2bc2f26a9 | ||
|
|
5cc5f12efc | ||
|
|
c70cdc108f | ||
|
|
487716ae74 | ||
|
|
a85d265097 | ||
|
|
c02814c106 | ||
|
|
3516cb9751 | ||
|
|
8a97f3cf03 |
23
.bazelrc
23
.bazelrc
@@ -79,6 +79,10 @@ common:ci --disk_cache=
|
||||
# Shared config for the main Bazel CI workflow.
|
||||
common:ci-bazel --config=ci
|
||||
common:ci-bazel --build_metadata=TAG_workflow=bazel
|
||||
# Bazel CI cross-compiles in several legs, and the V8-backed code-mode tests
|
||||
# are not stable in that setup yet. Keep running the rest of the Rust
|
||||
# integration suites through the workspace-root launcher.
|
||||
common:ci-bazel --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::
|
||||
|
||||
# Shared config for Bazel-backed Rust linting.
|
||||
build:clippy --aspects=@rules_rust//rust:defs.bzl%rust_clippy_aspect
|
||||
@@ -153,6 +157,25 @@ common:ci-macos --config=remote
|
||||
common:ci-macos --strategy=remote
|
||||
common:ci-macos --strategy=TestRunner=darwin-sandbox,local
|
||||
|
||||
# On Windows, use Linux remote execution for build actions but keep test actions
|
||||
# on the Windows runner so Bazel's normal test sharding and flaky-test retries
|
||||
# still run against Windows binaries.
|
||||
common:ci-windows-cross --config=ci-windows
|
||||
common:ci-windows-cross --build_metadata=TAG_windows_cross_compile=true
|
||||
common:ci-windows-cross --config=remote
|
||||
common:ci-windows-cross --host_platform=//:rbe
|
||||
common:ci-windows-cross --strategy=remote
|
||||
common:ci-windows-cross --strategy=TestRunner=local
|
||||
common:ci-windows-cross --local_test_jobs=4
|
||||
common:ci-windows-cross --test_env=RUST_TEST_THREADS=1
|
||||
# Native Windows CI still covers the PowerShell tests. The cross-built gnullvm
|
||||
# binaries currently hang in PowerShell AST parser tests when those binaries are
|
||||
# run on the Windows runner.
|
||||
common:ci-windows-cross --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::,powershell
|
||||
common:ci-windows-cross --platforms=//:windows_x86_64_gnullvm
|
||||
common:ci-windows-cross --extra_execution_platforms=//:rbe,//:windows_x86_64_msvc
|
||||
common:ci-windows-cross --extra_toolchains=//:windows_gnullvm_tests_on_msvc_host_toolchain
|
||||
|
||||
# Linux-only V8 CI config.
|
||||
common:ci-v8 --config=ci
|
||||
common:ci-v8 --build_metadata=TAG_workflow=v8
|
||||
|
||||
11
.codex/environments/environment.toml
Normal file
11
.codex/environments/environment.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
|
||||
version = 1
|
||||
name = "codex"
|
||||
|
||||
[setup]
|
||||
script = ""
|
||||
|
||||
[[actions]]
|
||||
name = "Run"
|
||||
icon = "run"
|
||||
command = "cargo +1.93.0 run --manifest-path=codex-rs/Cargo.toml --bin codex -- -c mcp_oauth_credentials_store=file"
|
||||
@@ -27,10 +27,10 @@ Accept any of the following:
|
||||
2. Run the watcher script to snapshot PR/review/CI state (or consume each streamed snapshot from `--watch`).
|
||||
3. Inspect the `actions` list in the JSON response.
|
||||
4. If `diagnose_ci_failure` is present, inspect failed run logs and classify the failure.
|
||||
5. If the failure is likely caused by the current branch, patch code locally, commit, and push.
|
||||
5. If the failure is likely caused by the current branch, patch code locally, commit, and push. Do not patch random flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are unrelated to the branch.
|
||||
6. If `process_review_comment` is present, inspect surfaced review items and decide whether to address them.
|
||||
7. If a review item is actionable and correct, patch code locally, commit, push, and then mark the associated review thread/comment as resolved once the fix is on GitHub.
|
||||
8. If a review item from another author is non-actionable, already addressed, or not valid, post one reply on the comment/thread explaining that decision (for example answering the question or explaining why no change is needed). Prefix the GitHub reply body with `[codex]` so it is clear the response is automated. If the watcher later surfaces your own reply, treat that self-authored item as already handled and do not reply again.
|
||||
8. Do not post replies to human-authored review comments/threads unless the user explicitly confirms the exact response. If a human review item is non-actionable, already addressed, or not valid, surface the item and recommended response to the user instead of replying on GitHub.
|
||||
9. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
|
||||
10. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
|
||||
11. On every loop, look for newly surfaced review feedback before acting on CI failures or mergeability state, then verify mergeability / merge-conflict status (for example via `gh pr view`) alongside CI.
|
||||
@@ -69,12 +69,18 @@ python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr <number-or-url> --o
|
||||
Use `gh` commands to inspect failed runs before deciding to rerun.
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh run view <run-id> --log-failed`
|
||||
- `gh api repos/<owner>/<repo>/actions/runs/<run-id>/jobs -X GET -f per_page=100`
|
||||
- `gh api repos/<owner>/<repo>/actions/jobs/<job-id>/logs > /tmp/codex-gh-job-<job-id>-logs.zip`
|
||||
- `gh run view <run-id> --log-failed` as a fallback after the overall workflow run is complete
|
||||
|
||||
Prefer treating failures as branch-related when logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
|
||||
`gh run view --log-failed` is workflow-run scoped and may not expose failed-job logs until the overall run finishes. For faster diagnosis, poll the run's jobs first and, as soon as a specific job has failed, fetch that job's logs directly from the Actions job logs endpoint. The watcher includes a `failed_jobs` list with each failed job's `job_id` and `logs_endpoint` when GitHub exposes one.
|
||||
|
||||
Prefer treating failures as branch-related when failed-job logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
|
||||
|
||||
Prefer treating failures as flaky/unrelated when logs show transient infra/external issues (timeouts, runner provisioning failures, registry/network outages, GitHub Actions infra errors).
|
||||
|
||||
Do not attempt to fix flaky/unrelated failures by changing tests, build scripts, CI configuration, dependency pins, or infrastructure-adjacent code unless the logs clearly connect the failure to the PR branch. For flaky/unrelated failures, rerun only when the watcher recommends `retry_failed_checks`; otherwise wait or stop for user help.
|
||||
|
||||
If classification is ambiguous, perform one manual diagnosis attempt before choosing rerun.
|
||||
|
||||
Read `.codex/skills/babysit-pr/references/heuristics.md` for a concise checklist.
|
||||
@@ -99,7 +105,8 @@ When you agree with a comment and it is actionable:
|
||||
5. Resume watching on the new SHA immediately (do not stop after reporting the push).
|
||||
6. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
|
||||
|
||||
If you disagree or the comment is non-actionable/already addressed, reply once directly on the GitHub comment/thread so the reviewer gets an explicit answer, then continue the watcher loop. Prefix any GitHub reply to a code review comment/thread with `[codex]` so it is clear the response is automated and not from the human user. If the watcher later surfaces your own reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
|
||||
Do not post replies to human-authored GitHub review comments/threads automatically. If you disagree with a human comment, believe it is non-actionable/already addressed, or need to answer a question, report the item to the user with a suggested response and wait for explicit confirmation before posting anything on GitHub. If the user approves a response, prefix it with `[codex]` so it is clear the response is automated and not from the human user.
|
||||
If the watcher later surfaces your own approved reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
|
||||
If a code review comment/thread is already marked as resolved in GitHub, treat it as non-actionable and safely ignore it unless new unresolved follow-up feedback appears.
|
||||
|
||||
## Git Safety Rules
|
||||
@@ -125,11 +132,11 @@ Use this loop in a live Codex session:
|
||||
2. Read `actions`.
|
||||
3. First check whether the PR is now merged or otherwise closed; if so, report that terminal state and stop polling immediately.
|
||||
4. Check CI summary, new review items, and mergeability/conflict status.
|
||||
5. Diagnose CI failures and classify branch-related vs flaky/unrelated.
|
||||
6. For each surfaced review item from another author, either reply once with an explanation if it is non-actionable or patch/commit/push and then resolve it if it is actionable. If a later snapshot surfaces your own reply, treat it as informational and continue without responding again.
|
||||
5. Diagnose CI failures and classify branch-related vs flaky/unrelated. If the overall run is still pending but `failed_jobs` already includes a failed job, fetch that job's logs and diagnose immediately instead of waiting for the whole workflow run to finish. Patch only when the failure is branch-related.
|
||||
6. For each surfaced review item from another author, patch/commit/push and then resolve it if it is actionable. If it is non-actionable, already addressed, or requires a written answer, surface it to the user with a suggested response instead of posting automatically. If a later snapshot surfaces your own approved reply, treat it as informational and continue without responding again.
|
||||
7. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
|
||||
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit.
|
||||
9. If you pushed a commit, resolved a review thread, replied to a review comment, or triggered a rerun, report the action briefly and continue polling (do not stop).
|
||||
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit. Do not make code changes for unrelated flakes or infrastructure failures just to get CI green.
|
||||
9. If you pushed a commit, resolved a review thread, or triggered a rerun, report the action briefly and continue polling (do not stop). If a human review comment needs a written GitHub response, stop and ask for confirmation before posting.
|
||||
10. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
|
||||
11. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report that the PR is currently ready to merge but keep the watcher running so new review comments are surfaced quickly while the PR remains open.
|
||||
12. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
interface:
|
||||
display_name: "PR Babysitter"
|
||||
short_description: "Watch PR review comments, CI, and merge conflicts"
|
||||
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watcher’s --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
|
||||
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watcher’s --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Do not post replies to human-authored review comments unless the user explicitly confirms the exact response. Do not patch unrelated flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are not caused by the branch. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
|
||||
|
||||
@@ -23,9 +23,11 @@ Used to discover failed workflow runs and rerunnable run IDs.
|
||||
### Failed log inspection
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh api repos/{owner}/{repo}/actions/runs/{run_id}/jobs -X GET -f per_page=100`
|
||||
- `gh api repos/{owner}/{repo}/actions/jobs/{job_id}/logs > /tmp/codex-gh-job-{job_id}-logs.zip`
|
||||
- `gh run view <run-id> --log-failed`
|
||||
|
||||
Used by Codex to classify branch-related vs flaky/unrelated failures.
|
||||
Used by Codex to classify branch-related vs flaky/unrelated failures. Prefer the direct job log endpoint as soon as a job has failed because `gh run view --log-failed` may not produce failed-job logs until the overall workflow run completes.
|
||||
|
||||
### Retry failed jobs only
|
||||
|
||||
@@ -70,3 +72,11 @@ Reruns only failed jobs (and dependencies) for a workflow run.
|
||||
- `conclusion`
|
||||
- `html_url`
|
||||
- `head_sha`
|
||||
|
||||
### Actions run jobs API (`jobs[]`)
|
||||
|
||||
- `id`
|
||||
- `name`
|
||||
- `status`
|
||||
- `conclusion`
|
||||
- `html_url`
|
||||
|
||||
@@ -18,6 +18,8 @@ Treat as **likely flaky or unrelated** when evidence points to transient or exte
|
||||
- Cloud/service rate limits or transient API outages
|
||||
- Non-deterministic failures in unrelated integration tests with known flake patterns
|
||||
|
||||
Do not patch likely flaky/unrelated failures. Use the retry budget for rerunnable failures, wait for pending jobs, or stop and report the blocker when the failure is persistent or infrastructure-owned.
|
||||
|
||||
If uncertain, inspect failed logs once before choosing rerun.
|
||||
|
||||
## Decision tree (fix vs rerun vs stop)
|
||||
@@ -25,9 +27,11 @@ If uncertain, inspect failed logs once before choosing rerun.
|
||||
1. If PR is merged/closed: stop.
|
||||
2. If there are failed checks:
|
||||
- Diagnose first.
|
||||
- If checks are still pending but an individual job has already failed: fetch that job's logs and diagnose now.
|
||||
- If branch-related: fix locally, commit, push.
|
||||
- If likely flaky/unrelated and all checks for the current SHA are terminal: rerun failed jobs.
|
||||
- If checks are still pending: wait.
|
||||
- If likely flaky/unrelated and not safely rerunnable: stop and report the blocker; do not edit unrelated tests, build scripts, CI configuration, dependency pins, or infrastructure code.
|
||||
- If checks are still pending and no failed job is available yet: wait.
|
||||
3. If flaky reruns for the same SHA reach the configured limit (default 3): stop and report persistent failure.
|
||||
4. Independently, process any new human review comments.
|
||||
|
||||
@@ -40,12 +44,15 @@ Address the comment when:
|
||||
- The requested change does not conflict with the user’s intent or recent guidance.
|
||||
- The change can be made safely without unrelated refactors.
|
||||
|
||||
Fix valid human review feedback in code when possible, but do not post a GitHub reply to a human-authored comment/thread unless the user explicitly confirms the exact response.
|
||||
|
||||
Do not auto-fix when:
|
||||
|
||||
- The comment is ambiguous and needs clarification.
|
||||
- The request conflicts with explicit user instructions.
|
||||
- The proposed change requires product/design decisions the user has not made.
|
||||
- The codebase is in a dirty/unrelated state that makes safe editing uncertain.
|
||||
- The comment only needs a written answer or disagreement response; propose the reply to the user instead of posting it automatically.
|
||||
|
||||
## Stop-and-ask conditions
|
||||
|
||||
@@ -56,3 +63,4 @@ Stop and ask the user instead of continuing automatically when:
|
||||
- The PR branch cannot be pushed.
|
||||
- CI failures persist after the flaky retry budget.
|
||||
- Reviewer feedback requires a product decision or cross-team coordination.
|
||||
- A human review comment requires a written GitHub reply instead of a code change.
|
||||
|
||||
@@ -338,6 +338,66 @@ def failed_runs_from_workflow_runs(runs, head_sha):
|
||||
return failed_runs
|
||||
|
||||
|
||||
def get_jobs_for_run(repo, run_id):
|
||||
endpoint = f"repos/{repo}/actions/runs/{run_id}/jobs"
|
||||
data = gh_json(["api", endpoint, "-X", "GET", "-f", "per_page=100"], repo=repo)
|
||||
if not isinstance(data, dict):
|
||||
raise GhCommandError("Unexpected payload from actions run jobs API")
|
||||
jobs = data.get("jobs") or []
|
||||
if not isinstance(jobs, list):
|
||||
raise GhCommandError("Expected `jobs` to be a list")
|
||||
return jobs
|
||||
|
||||
|
||||
def failed_jobs_from_workflow_runs(repo, runs, head_sha):
|
||||
failed_jobs = []
|
||||
for run in runs:
|
||||
if not isinstance(run, dict):
|
||||
continue
|
||||
if str(run.get("head_sha") or "") != head_sha:
|
||||
continue
|
||||
run_id = run.get("id")
|
||||
if run_id in (None, ""):
|
||||
continue
|
||||
run_status = str(run.get("status") or "")
|
||||
run_conclusion = str(run.get("conclusion") or "")
|
||||
if run_status.lower() == "completed" and run_conclusion not in FAILED_RUN_CONCLUSIONS:
|
||||
continue
|
||||
jobs = get_jobs_for_run(repo, run_id)
|
||||
for job in jobs:
|
||||
if not isinstance(job, dict):
|
||||
continue
|
||||
conclusion = str(job.get("conclusion") or "")
|
||||
if conclusion not in FAILED_RUN_CONCLUSIONS:
|
||||
continue
|
||||
job_id = job.get("id")
|
||||
logs_endpoint = None
|
||||
if job_id not in (None, ""):
|
||||
logs_endpoint = f"repos/{repo}/actions/jobs/{job_id}/logs"
|
||||
failed_jobs.append(
|
||||
{
|
||||
"run_id": run_id,
|
||||
"workflow_name": run.get("name") or run.get("display_title") or "",
|
||||
"run_status": run_status,
|
||||
"run_conclusion": run_conclusion,
|
||||
"job_id": job_id,
|
||||
"job_name": str(job.get("name") or ""),
|
||||
"status": str(job.get("status") or ""),
|
||||
"conclusion": conclusion,
|
||||
"html_url": str(job.get("html_url") or ""),
|
||||
"logs_endpoint": logs_endpoint,
|
||||
}
|
||||
)
|
||||
failed_jobs.sort(
|
||||
key=lambda item: (
|
||||
str(item.get("workflow_name") or ""),
|
||||
str(item.get("job_name") or ""),
|
||||
str(item.get("job_id") or ""),
|
||||
)
|
||||
)
|
||||
return failed_jobs
|
||||
|
||||
|
||||
def get_authenticated_login():
|
||||
data = gh_json(["api", "user"])
|
||||
if not isinstance(data, dict) or not data.get("login"):
|
||||
@@ -568,7 +628,7 @@ def is_pr_ready_to_merge(pr, checks_summary, new_review_items):
|
||||
return True
|
||||
|
||||
|
||||
def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries_used, max_retries):
|
||||
def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_items, retries_used, max_retries):
|
||||
actions = []
|
||||
if pr["closed"] or pr["merged"]:
|
||||
if new_review_items:
|
||||
@@ -583,7 +643,7 @@ def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries
|
||||
if new_review_items:
|
||||
actions.append("process_review_comment")
|
||||
|
||||
has_failed_pr_checks = checks_summary["failed_count"] > 0
|
||||
has_failed_pr_checks = checks_summary["failed_count"] > 0 or bool(failed_jobs)
|
||||
if has_failed_pr_checks:
|
||||
if checks_summary["all_terminal"] and retries_used >= max_retries:
|
||||
actions.append("stop_exhausted_retries")
|
||||
@@ -621,12 +681,14 @@ def collect_snapshot(args):
|
||||
checks_summary = summarize_checks(checks)
|
||||
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
|
||||
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
|
||||
failed_jobs = failed_jobs_from_workflow_runs(pr["repo"], workflow_runs, pr["head_sha"])
|
||||
|
||||
retries_used = current_retry_count(state, pr["head_sha"])
|
||||
actions = recommend_actions(
|
||||
pr,
|
||||
checks_summary,
|
||||
failed_runs,
|
||||
failed_jobs,
|
||||
new_review_items,
|
||||
retries_used,
|
||||
args.max_flaky_retries,
|
||||
@@ -641,6 +703,7 @@ def collect_snapshot(args):
|
||||
"pr": pr,
|
||||
"checks": checks_summary,
|
||||
"failed_runs": failed_runs,
|
||||
"failed_jobs": failed_jobs,
|
||||
"new_review_items": new_review_items,
|
||||
"actions": actions,
|
||||
"retry_state": {
|
||||
|
||||
@@ -75,6 +75,11 @@ def test_collect_snapshot_fetches_review_items_before_ci(monkeypatch, tmp_path):
|
||||
"failed_runs_from_workflow_runs",
|
||||
lambda *args, **kwargs: call_order.append("failed_runs") or [],
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"failed_jobs_from_workflow_runs",
|
||||
lambda *args, **kwargs: call_order.append("failed_jobs") or [],
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"recommend_actions",
|
||||
@@ -100,6 +105,7 @@ def test_recommend_actions_prioritizes_review_comments():
|
||||
sample_pr(),
|
||||
sample_checks(failed_count=1),
|
||||
[{"run_id": 99}],
|
||||
[],
|
||||
[{"kind": "review_comment", "id": "1"}],
|
||||
0,
|
||||
3,
|
||||
@@ -119,6 +125,7 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
|
||||
"pr": sample_pr(),
|
||||
"checks": sample_checks(),
|
||||
"failed_runs": [],
|
||||
"failed_jobs": [],
|
||||
"new_review_items": [],
|
||||
"actions": ["ready_to_merge"],
|
||||
"retry_state": {
|
||||
@@ -153,3 +160,58 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
|
||||
|
||||
assert sleeps == [30, 30]
|
||||
assert [event for event, _ in events] == ["snapshot", "snapshot"]
|
||||
|
||||
|
||||
def test_failed_jobs_include_direct_logs_endpoint(monkeypatch):
|
||||
jobs_by_run = {
|
||||
99: [
|
||||
{
|
||||
"id": 555,
|
||||
"name": "unit tests",
|
||||
"status": "completed",
|
||||
"conclusion": "failure",
|
||||
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
|
||||
},
|
||||
{
|
||||
"id": 556,
|
||||
"name": "lint",
|
||||
"status": "completed",
|
||||
"conclusion": "success",
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"get_jobs_for_run",
|
||||
lambda repo, run_id: jobs_by_run[run_id],
|
||||
)
|
||||
|
||||
failed_jobs = gh_pr_watch.failed_jobs_from_workflow_runs(
|
||||
"openai/codex",
|
||||
[
|
||||
{
|
||||
"id": 99,
|
||||
"name": "CI",
|
||||
"status": "in_progress",
|
||||
"conclusion": "",
|
||||
"head_sha": "abc123",
|
||||
}
|
||||
],
|
||||
"abc123",
|
||||
)
|
||||
|
||||
assert failed_jobs == [
|
||||
{
|
||||
"run_id": 99,
|
||||
"workflow_name": "CI",
|
||||
"run_status": "in_progress",
|
||||
"run_conclusion": "",
|
||||
"job_id": 555,
|
||||
"job_name": "unit tests",
|
||||
"status": "completed",
|
||||
"conclusion": "failure",
|
||||
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
|
||||
"logs_endpoint": "repos/openai/codex/actions/jobs/555/logs",
|
||||
}
|
||||
]
|
||||
|
||||
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
14
.github/scripts/compute-bazel-windows-path.ps1
vendored
@@ -5,9 +5,9 @@ tool entries, such as Maven, that can change independently of this repo and
|
||||
cause avoidable cache misses.
|
||||
|
||||
This script derives a smaller, cache-stable PATH that keeps the Windows
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths, Git,
|
||||
PowerShell, Node, Python, DotSlash, and the standard Windows system
|
||||
directories.
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths,
|
||||
MinGW runtime DLL paths for gnullvm-built tests, Git, PowerShell, Node, Python,
|
||||
DotSlash, and the standard Windows system directories.
|
||||
`setup-bazel-ci` runs this after exporting the MSVC environment, and the script
|
||||
publishes the result via `GITHUB_ENV` as `CODEX_BAZEL_WINDOWS_PATH` so later
|
||||
steps can pass that explicit PATH to Bazel.
|
||||
@@ -49,6 +49,8 @@ foreach ($pathEntry in ($env:PATH -split ';')) {
|
||||
$pathEntry -like '*Microsoft Visual Studio*' -or
|
||||
$pathEntry -like '*Windows Kits*' -or
|
||||
$pathEntry -like '*Microsoft SDKs*' -or
|
||||
$pathEntry -eq 'C:\mingw64\bin' -or
|
||||
$pathEntry -like 'C:\msys64\*\bin' -or
|
||||
$pathEntry -like 'C:\Program Files\Git\*' -or
|
||||
$pathEntry -like 'C:\Program Files\PowerShell\*' -or
|
||||
$pathEntry -like 'C:\hostedtoolcache\windows\node\*' -or
|
||||
@@ -85,6 +87,12 @@ if ($pwshCommand) {
|
||||
Add-StablePathEntry (Split-Path $pwshCommand.Source -Parent)
|
||||
}
|
||||
|
||||
foreach ($mingwPath in @('C:\mingw64\bin', 'C:\msys64\mingw64\bin', 'C:\msys64\ucrt64\bin')) {
|
||||
if (Test-Path $mingwPath) {
|
||||
Add-StablePathEntry $mingwPath
|
||||
}
|
||||
}
|
||||
|
||||
if ($windowsAppsPath) {
|
||||
Add-StablePathEntry $windowsAppsPath
|
||||
}
|
||||
|
||||
110
.github/scripts/run-bazel-ci.sh
vendored
110
.github/scripts/run-bazel-ci.sh
vendored
@@ -6,6 +6,7 @@ print_failed_bazel_test_logs=0
|
||||
print_failed_bazel_action_summary=0
|
||||
remote_download_toplevel=0
|
||||
windows_msvc_host_platform=0
|
||||
windows_cross_compile=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
@@ -25,6 +26,10 @@ while [[ $# -gt 0 ]]; do
|
||||
windows_msvc_host_platform=1
|
||||
shift
|
||||
;;
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -37,7 +42,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] -- <bazel args> -- <targets>" >&2
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] [--windows-cross-compile] -- <bazel args> -- <targets>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -61,7 +66,11 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
ci_config=ci-windows
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -105,8 +114,8 @@ print_bazel_test_log_tails() {
|
||||
while IFS= read -r target; do
|
||||
failed_targets+=("$target")
|
||||
done < <(
|
||||
grep -E '^FAIL: //' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
|
||||
grep -E '^(FAIL: //|ERROR: .* Testing //)' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#; s#^ERROR: .* Testing (//[^ ]+) failed:.*#\1#' \
|
||||
| sort -u
|
||||
)
|
||||
|
||||
@@ -244,6 +253,12 @@ if [[ ${#bazel_args[@]} -eq 0 || ${#bazel_targets[@]} -eq 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Fork PRs do not receive the BuildBuddy secret needed for the remote
|
||||
# cross-compile config. Preserve the previous local Windows build shape.
|
||||
windows_msvc_host_platform=1
|
||||
fi
|
||||
|
||||
post_config_bazel_args=()
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_msvc_host_platform -eq 1 ]]; then
|
||||
has_host_platform_override=0
|
||||
@@ -269,6 +284,25 @@ if [[ $remote_download_toplevel -eq 1 ]]; then
|
||||
post_config_bazel_args+=(--remote_download_toplevel)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# `--enable_platform_specific_config` expands `common:windows` on Windows
|
||||
# hosts after ordinary rc configs, which can override `ci-windows-cross`'s
|
||||
# RBE host platform. Repeat the host platform on the command line so V8 and
|
||||
# other genrules execute on Linux RBE workers instead of Git Bash locally.
|
||||
#
|
||||
# Bazel also derives the default genrule shell from the client host. Without
|
||||
# an explicit shell executable, remote Linux actions can be asked to run
|
||||
# `C:\Program Files\Git\usr\bin\bash.exe`.
|
||||
post_config_bazel_args+=(--host_platform=//:rbe --shell_executable=/bin/bash)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The Windows cross-compile config depends on remote execution. Fork PRs do
|
||||
# not receive the BuildBuddy secret, so fall back to the existing local build
|
||||
# shape and keep its lower concurrency cap.
|
||||
post_config_bazel_args+=(--jobs=8)
|
||||
fi
|
||||
|
||||
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
|
||||
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
|
||||
# each job its own repo contents cache so they do not fight over the shared
|
||||
@@ -287,37 +321,57 @@ if [[ -n "${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR:-}" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
pass_windows_build_env=1
|
||||
if [[ $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Remote build actions execute on Linux RBE workers. Passing the Windows
|
||||
# runner's build environment there makes Bazel genrules try to execute
|
||||
# C:\Program Files\Git\usr\bin\bash.exe on Linux.
|
||||
pass_windows_build_env=0
|
||||
fi
|
||||
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -z "${CODEX_BAZEL_WINDOWS_PATH:-}" ]]; then
|
||||
echo "CODEX_BAZEL_WINDOWS_PATH must be set for Windows Bazel CI." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
elif [[ $windows_cross_compile -eq 1 ]]; then
|
||||
# Remote build actions run on Linux RBE workers. Give their shell snippets
|
||||
# a Linux PATH while preserving CODEX_BAZEL_WINDOWS_PATH below for local
|
||||
# Windows test execution.
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=/usr/bin:/bin"
|
||||
"--host_action_env=PATH=/usr/bin:/bin"
|
||||
)
|
||||
fi
|
||||
post_config_bazel_args+=("--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}")
|
||||
fi
|
||||
|
||||
bazel_console_log="$(mktemp)"
|
||||
|
||||
13
.github/scripts/run-bazel-query-ci.sh
vendored
13
.github/scripts/run-bazel-query-ci.sh
vendored
@@ -6,8 +6,13 @@ set -euo pipefail
|
||||
# invocation so target-discovery queries can reuse the same Bazel server.
|
||||
|
||||
query_args=()
|
||||
windows_cross_compile=0
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -20,7 +25,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 [<bazel query args>...] -- <query expression>" >&2
|
||||
echo "Usage: $0 [--windows-cross-compile] [<bazel query args>...] -- <query expression>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -32,7 +37,11 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
ci_config=ci-windows
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
133
.github/workflows/bazel.yml
vendored
133
.github/workflows/bazel.yml
vendored
@@ -17,13 +17,10 @@ concurrency:
|
||||
cancel-in-progress: ${{ github.ref_name != 'main' }}
|
||||
jobs:
|
||||
test:
|
||||
# Even though a no-cache-hit Windows build seems to exceed the 30-minute
|
||||
# limit on occasion, the more common reason for exceeding the limit is a
|
||||
# true test failure in a rust_test() marked "flaky" that gets run 3x.
|
||||
# In that case, extra time generally does not give us more signal.
|
||||
#
|
||||
# Ultimately we need true distributed builds (e.g.,
|
||||
# https://www.buildbuddy.io/docs/rbe-setup/) to speed things up.
|
||||
# PRs use a fast Windows cross-compiled test leg for pre-merge signal.
|
||||
# Post-merge pushes to main also run the native Windows test job below for
|
||||
# broader Windows signal without putting PR latency back on the critical
|
||||
# path. Cargo CI owns V8/code-mode test coverage for now.
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -47,13 +44,16 @@ jobs:
|
||||
# - os: ubuntu-24.04-arm
|
||||
# target: aarch64-unknown-linux-gnu
|
||||
|
||||
# Windows
|
||||
# Windows fast path: build the windows-gnullvm binaries with Linux
|
||||
# RBE, then run the resulting Windows tests on the Windows runner.
|
||||
# Cargo CI preserves V8/code-mode coverage while Bazel CI keeps broad
|
||||
# non-code-mode signal.
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
# Configure a human readable name for each job
|
||||
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
name: Bazel test on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
@@ -88,9 +88,15 @@ jobs:
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
# V8-backed code-mode tests are covered by Cargo CI. Bazel CI
|
||||
# cross-compiles in several legs, and those tests are not stable in
|
||||
# that setup yet.
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
|
||||
bazel_wrapper_args=(
|
||||
--print-failed-action-summary
|
||||
--print-failed-test-logs
|
||||
)
|
||||
bazel_test_args=(
|
||||
@@ -100,8 +106,10 @@ jobs:
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
)
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_test_args+=(--jobs=8)
|
||||
bazel_wrapper_args+=(
|
||||
--windows-cross-compile
|
||||
--remote-download-toplevel
|
||||
)
|
||||
fi
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
@@ -130,6 +138,79 @@ jobs:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
test-windows-native-main:
|
||||
# Native Windows Bazel tests are slower and frequently approach the
|
||||
# 30-minute PR budget. Run this only for post-merge commits to main and give
|
||||
# it a larger timeout.
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 40
|
||||
runs-on: windows-latest
|
||||
name: Bazel test on windows-latest for x86_64-pc-windows-gnullvm (native main)
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
with:
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
install-test-prereqs: "true"
|
||||
|
||||
- name: bazel test //...
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
bazel_targets=(
|
||||
//...
|
||||
# Keep standalone V8 library targets out of the ordinary Bazel CI
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
# Keep this aligned with the main Bazel job. The native Windows
|
||||
# job preserves broad post-merge coverage, but code-mode/V8 tests
|
||||
# are covered by Cargo CI rather than Bazel for now.
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
|
||||
bazel_test_args=(
|
||||
test
|
||||
--test_tag_filters=-argument-comment-lint
|
||||
--test_verbose_timeout_warnings
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--build_metadata=TAG_windows_native_main=true
|
||||
)
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
--print-failed-action-summary \
|
||||
--print-failed-test-logs \
|
||||
-- \
|
||||
"${bazel_test_args[@]}" \
|
||||
-- \
|
||||
"${bazel_targets[@]}"
|
||||
|
||||
- name: Upload Bazel execution logs
|
||||
if: always() && !cancelled()
|
||||
continue-on-error: true
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: bazel-execution-logs-test-windows-native-x86_64-pc-windows-gnullvm
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
clippy:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
@@ -170,17 +251,24 @@ jobs:
|
||||
--build_metadata=TAG_job=clippy
|
||||
)
|
||||
bazel_wrapper_args=()
|
||||
bazel_target_list_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# Keep this aligned with the Windows Bazel test job. With the
|
||||
# default `//:local_windows` host platform, Windows `rust_test`
|
||||
# targets such as `//codex-rs/core:core-all-test` can be skipped
|
||||
# by `--skip_incompatible_explicit_targets`, which hides clippy
|
||||
# diagnostics from integration-test modules.
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
# Keep this aligned with the fast Windows Bazel test job: use
|
||||
# Linux RBE for clippy build actions while targeting Windows
|
||||
# gnullvm. Fork/community PRs without the BuildBuddy secret fall
|
||||
# back inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
bazel_target_list_args+=(--windows-cross-compile)
|
||||
if [[ -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The fork fallback can see incompatible explicit Windows-cross
|
||||
# internal test binaries in the generated target list. Preserve
|
||||
# the old local-fallback behavior there.
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
fi
|
||||
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh)"
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh "${bazel_target_list_args[@]}")"
|
||||
bazel_targets=()
|
||||
while IFS= read -r target; do
|
||||
bazel_targets+=("${target}")
|
||||
@@ -252,7 +340,12 @@ jobs:
|
||||
# Rust debug assertions explicitly.
|
||||
bazel_wrapper_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
# This is build-only signal, so use the same Linux-RBE
|
||||
# cross-compile path as the fast Windows test and clippy jobs.
|
||||
# Fork/community PRs without the BuildBuddy secret fall back
|
||||
# inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
fi
|
||||
|
||||
bazel_build_args=(
|
||||
|
||||
4
.github/workflows/cargo-deny.yml
vendored
4
.github/workflows/cargo-deny.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
|
||||
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@82eb9f621fbc699dd0918f3ea06864c14cc84246 # v2
|
||||
with:
|
||||
rust-version: stable
|
||||
rust-version: 1.93.0
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
|
||||
12
.github/workflows/issue-labeler.yml
vendored
12
.github/workflows/issue-labeler.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
6. iOS — Issues with the Codex iOS app.
|
||||
|
||||
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior. Use "memory" for agentic memory storage/retrieval and "performance" for high process memory utilization or memory leaks.
|
||||
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
2. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
|
||||
@@ -68,7 +68,15 @@ jobs:
|
||||
21. session - Issues involving session or thread management, including resume, fork, archive, rename/title, thread history, rollout persistence, compaction, checkpoints, retention, and cross-session state.
|
||||
22. config - Issues involving config.toml, config keys, config key merging, config updates, profiles, hooks config, project config, agent role TOMLs, instruction/personality config, and config schema behavior.
|
||||
23. plan - Issues involving plan mode, planning workflows, or plan-specific tools/behavior.
|
||||
24. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, or plan.
|
||||
24. computer-use - Issues involving agentic computer use or SkyComputerUseService.
|
||||
25. browser - Issues involving agentic browser use, IAB, or the built-in browser within the Codex app.
|
||||
26. memory - Issues involving agentic memory storage and retrieval.
|
||||
27. imagen - Issues involving image generation.
|
||||
28. remote - Issues involving remote access, remote control, or SSH.
|
||||
29. performance - Issues involving slow, laggy performance, high memory utilization, or memory leaks.
|
||||
30. automations - Issues involving scheduled automation tasks or heartbeats.
|
||||
31. pets - Issues involving pets avatars and animations.
|
||||
32. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, plan, computer-use, browser, memory, imagen, remote, performance, automations, or pets.
|
||||
|
||||
Issue number: ${{ github.event.issue.number }}
|
||||
|
||||
|
||||
2
.github/workflows/rust-release.yml
vendored
2
.github/workflows/rust-release.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- uses: dtolnay/rust-toolchain@c2b55edffaf41a251c410bb32bed22afefa800f1 # 1.92
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
34
BUILD.bazel
34
BUILD.bazel
@@ -30,6 +30,40 @@ platform(
|
||||
parents = ["@platforms//host"],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_gnullvm",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_msvc",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
)
|
||||
|
||||
toolchain(
|
||||
name = "windows_gnullvm_tests_on_msvc_host_toolchain",
|
||||
exec_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
target_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
toolchain = "@bazel_tools//tools/test:empty_toolchain",
|
||||
toolchain_type = "@bazel_tools//tools/test:default_test_toolchain_type",
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "rbe",
|
||||
actual = "@rbe_platform",
|
||||
|
||||
@@ -6,4 +6,6 @@ ignore = [
|
||||
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
|
||||
"RUSTSEC-2024-0320", # yaml-rust via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2025-0141", # bincode via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2026-0118", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
"RUSTSEC-2026-0119", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
]
|
||||
|
||||
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
|
||||
94
codex-rs/Cargo.lock
generated
94
codex-rs/Cargo.lock
generated
@@ -1857,8 +1857,8 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-analytics",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-app-server-transport",
|
||||
"codex-arg0",
|
||||
"codex-backend-client",
|
||||
"codex-chatgpt",
|
||||
@@ -1882,6 +1882,7 @@ dependencies = [
|
||||
"codex-model-provider-info",
|
||||
"codex-models-manager",
|
||||
"codex-otel",
|
||||
"codex-plugin",
|
||||
"codex-protocol",
|
||||
"codex-rmcp-client",
|
||||
"codex-rollout",
|
||||
@@ -1890,23 +1891,17 @@ dependencies = [
|
||||
"codex-state",
|
||||
"codex-thread-store",
|
||||
"codex-tools",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-cli",
|
||||
"codex-utils-json-to-toml",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"core_test_support",
|
||||
"flate2",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"reqwest",
|
||||
"rmcp",
|
||||
@@ -1956,6 +1951,26 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-app-server-daemon"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-app-server-protocol",
|
||||
"codex-app-server-transport",
|
||||
"codex-core",
|
||||
"codex-uds",
|
||||
"futures",
|
||||
"libc",
|
||||
"pretty_assertions",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-app-server-protocol"
|
||||
version = "0.0.0"
|
||||
@@ -2004,6 +2019,45 @@ dependencies = [
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-app-server-transport"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-config",
|
||||
"codex-core",
|
||||
"codex-login",
|
||||
"codex-model-provider",
|
||||
"codex-state",
|
||||
"codex-uds",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-rustls-provider",
|
||||
"constant_time_eq 0.3.1",
|
||||
"futures",
|
||||
"gethostname",
|
||||
"hmac",
|
||||
"jsonwebtoken",
|
||||
"owo-colors",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"tempfile",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-apply-patch"
|
||||
version = "0.0.0"
|
||||
@@ -2100,9 +2154,11 @@ dependencies = [
|
||||
"codex-app-server-protocol",
|
||||
"codex-connectors",
|
||||
"codex-core",
|
||||
"codex-core-plugins",
|
||||
"codex-git-utils",
|
||||
"codex-login",
|
||||
"codex-model-provider",
|
||||
"codex-plugin",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-cli",
|
||||
"pretty_assertions",
|
||||
@@ -2122,6 +2178,7 @@ dependencies = [
|
||||
"clap",
|
||||
"clap_complete",
|
||||
"codex-app-server",
|
||||
"codex-app-server-daemon",
|
||||
"codex-app-server-protocol",
|
||||
"codex-app-server-test-client",
|
||||
"codex-arg0",
|
||||
@@ -2184,6 +2241,7 @@ dependencies = [
|
||||
"opentelemetry_sdk",
|
||||
"pretty_assertions",
|
||||
"rand 0.9.3",
|
||||
"rcgen",
|
||||
"reqwest",
|
||||
"rustls",
|
||||
"rustls-native-certs",
|
||||
@@ -2485,6 +2543,7 @@ name = "codex-core-api"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"codex-analytics",
|
||||
"codex-app-server-protocol",
|
||||
"codex-arg0",
|
||||
"codex-config",
|
||||
"codex-core",
|
||||
@@ -2503,6 +2562,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
"codex-analytics",
|
||||
"codex-app-server-protocol",
|
||||
"codex-config",
|
||||
"codex-core-skills",
|
||||
@@ -2999,6 +3059,23 @@ dependencies = [
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-mcp"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-output-truncation",
|
||||
"pretty_assertions",
|
||||
"rmcp",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-memories-read"
|
||||
version = "0.0.0"
|
||||
@@ -3515,6 +3592,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
"codex-core-api",
|
||||
"serde_json",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
@@ -3589,6 +3667,7 @@ dependencies = [
|
||||
"codex-install-context",
|
||||
"codex-login",
|
||||
"codex-mcp",
|
||||
"codex-model-provider",
|
||||
"codex-model-provider-info",
|
||||
"codex-models-manager",
|
||||
"codex-otel",
|
||||
@@ -3902,6 +3981,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"codex-otel",
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-pty",
|
||||
|
||||
@@ -8,6 +8,8 @@ members = [
|
||||
"ansi-escape",
|
||||
"async-utils",
|
||||
"app-server",
|
||||
"app-server-transport",
|
||||
"app-server-daemon",
|
||||
"app-server-client",
|
||||
"app-server-protocol",
|
||||
"app-server-test-client",
|
||||
@@ -51,6 +53,7 @@ members = [
|
||||
"login",
|
||||
"codex-mcp",
|
||||
"mcp-server",
|
||||
"memories/mcp",
|
||||
"memories/read",
|
||||
"memories/write",
|
||||
"model-provider-info",
|
||||
@@ -127,6 +130,8 @@ codex-ansi-escape = { path = "ansi-escape" }
|
||||
codex-api = { path = "codex-api" }
|
||||
codex-aws-auth = { path = "aws-auth" }
|
||||
codex-app-server = { path = "app-server" }
|
||||
codex-app-server-transport = { path = "app-server-transport" }
|
||||
codex-app-server-daemon = { path = "app-server-daemon" }
|
||||
codex-app-server-client = { path = "app-server-client" }
|
||||
codex-app-server-protocol = { path = "app-server-protocol" }
|
||||
codex-app-server-test-client = { path = "app-server-test-client" }
|
||||
@@ -166,6 +171,7 @@ codex-keyring-store = { path = "keyring-store" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
codex-lmstudio = { path = "lmstudio" }
|
||||
codex-login = { path = "login" }
|
||||
codex-memories-mcp = { path = "memories/mcp" }
|
||||
codex-memories-read = { path = "memories/read" }
|
||||
codex-memories-write = { path = "memories/write" }
|
||||
codex-mcp = { path = "codex-mcp" }
|
||||
@@ -320,6 +326,10 @@ quick-xml = "0.38.4"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
rcgen = { version = "0.14.7", default-features = false, features = [
|
||||
"aws_lc_rs",
|
||||
"pem",
|
||||
] }
|
||||
regex = "1.12.3"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = { version = "0.12", features = ["cookies"] }
|
||||
@@ -455,6 +465,7 @@ unwrap_used = "deny"
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = [
|
||||
"codex-agent-graph-store",
|
||||
"codex-memories-mcp",
|
||||
"icu_provider",
|
||||
"openssl-sys",
|
||||
"codex-utils-readiness",
|
||||
|
||||
@@ -46,7 +46,7 @@ Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.t
|
||||
|
||||
### Notifications
|
||||
|
||||
You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
The legacy `notify` setting is deprecated and will be removed in a future release. Existing configurations still work, but new automation should use lifecycle hooks instead. The [notify documentation](../docs/config.md#notify) explains the remaining compatibility behavior. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9.
|
||||
|
||||
### `codex exec` to run Codex programmatically/non-interactively
|
||||
|
||||
|
||||
@@ -98,6 +98,7 @@ use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::HookEventName;
|
||||
use codex_protocol::protocol::HookRunStatus;
|
||||
use codex_protocol::protocol::HookSource;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use codex_protocol::protocol::SubAgentSource;
|
||||
use codex_protocol::protocol::TokenUsage;
|
||||
@@ -302,17 +303,19 @@ fn sample_turn_completed_notification(
|
||||
})
|
||||
}
|
||||
|
||||
fn sample_turn_resolved_config(turn_id: &str) -> TurnResolvedConfigFact {
|
||||
fn sample_turn_resolved_config(thread_id: &str, turn_id: &str) -> TurnResolvedConfigFact {
|
||||
TurnResolvedConfigFact {
|
||||
turn_id: turn_id.to_string(),
|
||||
thread_id: "thread-2".to_string(),
|
||||
thread_id: thread_id.to_string(),
|
||||
num_input_images: 1,
|
||||
submission_type: None,
|
||||
ephemeral: false,
|
||||
session_source: SessionSource::Exec,
|
||||
model: "gpt-5".to_string(),
|
||||
model_provider: "openai".to_string(),
|
||||
permission_profile: CorePermissionProfile::read_only(),
|
||||
permission_profile: CorePermissionProfile::from_legacy_sandbox_policy(
|
||||
&SandboxPolicy::new_read_only_policy(),
|
||||
),
|
||||
permission_profile_cwd: PathBuf::from("/tmp"),
|
||||
reasoning_effort: None,
|
||||
reasoning_summary: None,
|
||||
@@ -416,6 +419,38 @@ async fn ingest_rejected_turn_steer(
|
||||
/*include_started*/ false, /*include_token_usage*/ false,
|
||||
)
|
||||
.await;
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Initialize {
|
||||
connection_id: 8,
|
||||
params: InitializeParams {
|
||||
client_info: ClientInfo {
|
||||
name: "codex-web".to_string(),
|
||||
title: None,
|
||||
version: "1.0.0".to_string(),
|
||||
},
|
||||
capabilities: None,
|
||||
},
|
||||
product_client_id: "codex-web".to_string(),
|
||||
runtime: sample_runtime_metadata(),
|
||||
rpc_transport: AppServerRpcTransport::Stdio,
|
||||
},
|
||||
out,
|
||||
)
|
||||
.await;
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::ClientResponse {
|
||||
connection_id: 8,
|
||||
request_id: RequestId::Integer(6),
|
||||
response: Box::new(sample_thread_resume_response(
|
||||
"thread-2", /*ephemeral*/ false, "gpt-5",
|
||||
)),
|
||||
},
|
||||
out,
|
||||
)
|
||||
.await;
|
||||
out.clear();
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::ClientRequest {
|
||||
@@ -516,7 +551,7 @@ async fn ingest_turn_prerequisites(
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::TurnResolvedConfig(Box::new(
|
||||
sample_turn_resolved_config("turn-2"),
|
||||
sample_turn_resolved_config("thread-2", "turn-2"),
|
||||
))),
|
||||
out,
|
||||
)
|
||||
@@ -1433,6 +1468,110 @@ async fn subagent_thread_started_publishes_without_initialize() {
|
||||
assert_eq!(payload[0]["event_params"]["subagent_source"], "review");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn subagent_thread_started_inherits_parent_connection_for_new_thread() {
|
||||
let mut reducer = AnalyticsReducer::default();
|
||||
let mut events = Vec::new();
|
||||
let parent_thread_id =
|
||||
codex_protocol::ThreadId::from_string("44444444-4444-4444-4444-444444444444")
|
||||
.expect("valid parent thread id");
|
||||
let parent_thread_id_string = parent_thread_id.to_string();
|
||||
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Initialize {
|
||||
connection_id: 7,
|
||||
params: InitializeParams {
|
||||
client_info: ClientInfo {
|
||||
name: "parent-client".to_string(),
|
||||
title: None,
|
||||
version: "1.0.0".to_string(),
|
||||
},
|
||||
capabilities: None,
|
||||
},
|
||||
product_client_id: "parent-client".to_string(),
|
||||
runtime: sample_runtime_metadata(),
|
||||
rpc_transport: AppServerRpcTransport::Stdio,
|
||||
},
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::ClientResponse {
|
||||
connection_id: 7,
|
||||
request_id: RequestId::Integer(1),
|
||||
response: Box::new(sample_thread_start_response(
|
||||
&parent_thread_id_string,
|
||||
/*ephemeral*/ false,
|
||||
"gpt-5",
|
||||
)),
|
||||
},
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::SubAgentThreadStarted(
|
||||
SubAgentThreadStartedInput {
|
||||
thread_id: "thread-review".to_string(),
|
||||
parent_thread_id: None,
|
||||
product_client_id: "parent-client".to_string(),
|
||||
client_name: "parent-client".to_string(),
|
||||
client_version: "1.0.0".to_string(),
|
||||
model: "gpt-5".to_string(),
|
||||
ephemeral: false,
|
||||
subagent_source: SubAgentSource::ThreadSpawn {
|
||||
parent_thread_id,
|
||||
depth: 1,
|
||||
agent_path: None,
|
||||
agent_nickname: None,
|
||||
agent_role: None,
|
||||
},
|
||||
created_at: 130,
|
||||
},
|
||||
)),
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
|
||||
events.clear();
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::Compaction(Box::new(
|
||||
CodexCompactionEvent {
|
||||
thread_id: "thread-review".to_string(),
|
||||
turn_id: "turn-compact".to_string(),
|
||||
trigger: CompactionTrigger::Manual,
|
||||
reason: CompactionReason::UserRequested,
|
||||
implementation: CompactionImplementation::Responses,
|
||||
phase: CompactionPhase::StandaloneTurn,
|
||||
strategy: CompactionStrategy::Memento,
|
||||
status: CompactionStatus::Completed,
|
||||
error: None,
|
||||
active_context_tokens_before: 131_000,
|
||||
active_context_tokens_after: 64_000,
|
||||
started_at: 100,
|
||||
completed_at: 101,
|
||||
duration_ms: Some(1200),
|
||||
},
|
||||
))),
|
||||
&mut events,
|
||||
)
|
||||
.await;
|
||||
|
||||
let payload = serde_json::to_value(&events).expect("serialize events");
|
||||
assert_eq!(
|
||||
payload[0]["event_params"]["app_server_client"]["product_client_id"],
|
||||
"parent-client"
|
||||
);
|
||||
assert_eq!(
|
||||
payload[0]["event_params"]["parent_thread_id"],
|
||||
"44444444-4444-4444-4444-444444444444"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_used_event_serializes_expected_shape() {
|
||||
let tracking = TrackEventsContext {
|
||||
@@ -1493,6 +1632,25 @@ fn plugin_management_event_serializes_expected_shape() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_management_event_can_use_remote_plugin_id_override() {
|
||||
let mut plugin = sample_plugin_metadata();
|
||||
plugin.remote_plugin_id = Some("plugins~Plugin_remote".to_string());
|
||||
let event = TrackEventRequest::PluginInstalled(CodexPluginEventRequest {
|
||||
event_type: "codex_plugin_installed",
|
||||
event_params: codex_plugin_metadata(plugin),
|
||||
});
|
||||
|
||||
let payload = serde_json::to_value(&event).expect("serialize plugin installed event");
|
||||
|
||||
assert_eq!(
|
||||
payload["event_params"]["plugin_id"],
|
||||
"plugins~Plugin_remote"
|
||||
);
|
||||
assert_eq!(payload["event_params"]["plugin_name"], "sample");
|
||||
assert_eq!(payload["event_params"]["marketplace_name"], "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hook_run_event_serializes_expected_shape() {
|
||||
let tracking = TrackEventsContext {
|
||||
@@ -2127,7 +2285,7 @@ async fn turn_start_error_response_discards_pending_start_request() {
|
||||
reducer
|
||||
.ingest(
|
||||
AnalyticsFact::Custom(CustomAnalyticsFact::TurnResolvedConfig(Box::new(
|
||||
sample_turn_resolved_config("turn-2"),
|
||||
sample_turn_resolved_config("thread-2", "turn-2"),
|
||||
))),
|
||||
&mut out,
|
||||
)
|
||||
@@ -2479,6 +2637,7 @@ async fn turn_completed_without_started_notification_emits_null_started_at() {
|
||||
fn sample_plugin_metadata() -> PluginTelemetryMetadata {
|
||||
PluginTelemetryMetadata {
|
||||
plugin_id: PluginId::parse("sample@test").expect("valid plugin id"),
|
||||
remote_plugin_id: None,
|
||||
capability_summary: Some(PluginCapabilitySummary {
|
||||
config_name: "sample@test".to_string(),
|
||||
display_name: "sample".to_string(),
|
||||
|
||||
@@ -587,11 +587,16 @@ pub(crate) fn codex_app_metadata(
|
||||
}
|
||||
|
||||
pub(crate) fn codex_plugin_metadata(plugin: PluginTelemetryMetadata) -> CodexPluginMetadata {
|
||||
let capability_summary = plugin.capability_summary;
|
||||
let PluginTelemetryMetadata {
|
||||
plugin_id,
|
||||
remote_plugin_id,
|
||||
capability_summary,
|
||||
} = plugin;
|
||||
let event_plugin_id = remote_plugin_id.unwrap_or_else(|| plugin_id.as_key());
|
||||
CodexPluginMetadata {
|
||||
plugin_id: Some(plugin.plugin_id.as_key()),
|
||||
plugin_name: Some(plugin.plugin_id.plugin_name),
|
||||
marketplace_name: Some(plugin.plugin_id.marketplace_name),
|
||||
plugin_id: Some(event_plugin_id),
|
||||
plugin_name: Some(plugin_id.plugin_name),
|
||||
marketplace_name: Some(plugin_id.marketplace_name),
|
||||
has_skills: capability_summary
|
||||
.as_ref()
|
||||
.map(|summary| summary.has_skills),
|
||||
|
||||
@@ -74,8 +74,7 @@ pub(crate) struct AnalyticsReducer {
|
||||
requests: HashMap<(u64, RequestId), RequestState>,
|
||||
turns: HashMap<String, TurnState>,
|
||||
connections: HashMap<u64, ConnectionState>,
|
||||
thread_connections: HashMap<String, u64>,
|
||||
thread_metadata: HashMap<String, ThreadMetadataState>,
|
||||
threads: HashMap<String, ThreadAnalyticsState>,
|
||||
}
|
||||
|
||||
struct ConnectionState {
|
||||
@@ -83,6 +82,69 @@ struct ConnectionState {
|
||||
runtime: CodexRuntimeMetadata,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ThreadAnalyticsState {
|
||||
connection_id: Option<u64>,
|
||||
metadata: Option<ThreadMetadataState>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct AnalyticsDropSite<'a> {
|
||||
event_name: &'static str,
|
||||
thread_id: &'a str,
|
||||
turn_id: Option<&'a str>,
|
||||
review_id: Option<&'a str>,
|
||||
item_id: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl<'a> AnalyticsDropSite<'a> {
|
||||
fn guardian(input: &'a GuardianReviewEventParams) -> Self {
|
||||
Self {
|
||||
event_name: "guardian",
|
||||
thread_id: &input.thread_id,
|
||||
turn_id: Some(&input.turn_id),
|
||||
review_id: Some(&input.review_id),
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn compaction(input: &'a CodexCompactionEvent) -> Self {
|
||||
Self {
|
||||
event_name: "compaction",
|
||||
thread_id: &input.thread_id,
|
||||
turn_id: Some(&input.turn_id),
|
||||
review_id: None,
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn turn_steer(thread_id: &'a str) -> Self {
|
||||
Self {
|
||||
event_name: "turn steer",
|
||||
thread_id,
|
||||
turn_id: None,
|
||||
review_id: None,
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn turn(thread_id: &'a str, turn_id: &'a str) -> Self {
|
||||
Self {
|
||||
event_name: "turn",
|
||||
thread_id,
|
||||
turn_id: Some(turn_id),
|
||||
review_id: None,
|
||||
item_id: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum MissingAnalyticsContext {
|
||||
ThreadConnection,
|
||||
Connection { connection_id: u64 },
|
||||
ThreadMetadata,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ThreadMetadataState {
|
||||
thread_source: Option<&'static str>,
|
||||
@@ -274,6 +336,26 @@ impl AnalyticsReducer {
|
||||
input: SubAgentThreadStartedInput,
|
||||
out: &mut Vec<TrackEventRequest>,
|
||||
) {
|
||||
let parent_thread_id = input
|
||||
.parent_thread_id
|
||||
.clone()
|
||||
.or_else(|| subagent_parent_thread_id(&input.subagent_source));
|
||||
let parent_connection_id = parent_thread_id
|
||||
.as_ref()
|
||||
.and_then(|parent_thread_id| self.threads.get(parent_thread_id))
|
||||
.and_then(|thread| thread.connection_id);
|
||||
let thread_state = self.threads.entry(input.thread_id.clone()).or_default();
|
||||
thread_state
|
||||
.metadata
|
||||
.get_or_insert_with(|| ThreadMetadataState {
|
||||
thread_source: Some("subagent"),
|
||||
initialization_mode: ThreadInitializationMode::New,
|
||||
subagent_source: Some(subagent_source_name(&input.subagent_source)),
|
||||
parent_thread_id,
|
||||
});
|
||||
if thread_state.connection_id.is_none() {
|
||||
thread_state.connection_id = parent_connection_id;
|
||||
}
|
||||
out.push(TrackEventRequest::ThreadInitialized(
|
||||
subagent_thread_started_event_request(input),
|
||||
));
|
||||
@@ -284,23 +366,9 @@ impl AnalyticsReducer {
|
||||
input: GuardianReviewEventParams,
|
||||
out: &mut Vec<TrackEventRequest>,
|
||||
) {
|
||||
let Some(connection_id) = self.thread_connections.get(&input.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
review_id = %input.review_id,
|
||||
"dropping guardian analytics event: missing thread connection metadata"
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(connection_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
review_id = %input.review_id,
|
||||
connection_id,
|
||||
"dropping guardian analytics event: missing connection metadata"
|
||||
);
|
||||
let Some(connection_state) =
|
||||
self.thread_connection_or_warn(AnalyticsDropSite::guardian(&input))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::GuardianReview(Box::new(
|
||||
@@ -686,10 +754,13 @@ impl AnalyticsReducer {
|
||||
};
|
||||
let thread_metadata =
|
||||
ThreadMetadataState::from_thread_metadata(&thread_source, initialization_mode);
|
||||
self.thread_connections
|
||||
.insert(thread_id.clone(), connection_id);
|
||||
self.thread_metadata
|
||||
.insert(thread_id.clone(), thread_metadata.clone());
|
||||
self.threads.insert(
|
||||
thread_id.clone(),
|
||||
ThreadAnalyticsState {
|
||||
connection_id: Some(connection_id),
|
||||
metadata: Some(thread_metadata.clone()),
|
||||
},
|
||||
);
|
||||
out.push(TrackEventRequest::ThreadInitialized(
|
||||
ThreadInitializedEvent {
|
||||
event_type: "codex_thread_initialized",
|
||||
@@ -710,29 +781,9 @@ impl AnalyticsReducer {
|
||||
}
|
||||
|
||||
fn ingest_compaction(&mut self, input: CodexCompactionEvent, out: &mut Vec<TrackEventRequest>) {
|
||||
let Some(connection_id) = self.thread_connections.get(&input.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
"dropping compaction analytics event: missing thread connection metadata"
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(connection_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
connection_id,
|
||||
"dropping compaction analytics event: missing connection metadata"
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Some(thread_metadata) = self.thread_metadata.get(&input.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %input.thread_id,
|
||||
turn_id = %input.turn_id,
|
||||
"dropping compaction analytics event: missing thread lifecycle metadata"
|
||||
);
|
||||
let Some((connection_state, thread_metadata)) =
|
||||
self.thread_context_or_warn(AnalyticsDropSite::compaction(&input))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::Compaction(Box::new(
|
||||
@@ -787,11 +838,13 @@ impl AnalyticsReducer {
|
||||
let Some(connection_state) = self.connections.get(&connection_id) else {
|
||||
return;
|
||||
};
|
||||
let Some(thread_metadata) = self.thread_metadata.get(&pending_request.thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id = %pending_request.thread_id,
|
||||
"dropping turn steer analytics event: missing thread lifecycle metadata"
|
||||
);
|
||||
let drop_site = AnalyticsDropSite::turn_steer(&pending_request.thread_id);
|
||||
let Some(thread_metadata) = self
|
||||
.threads
|
||||
.get(drop_site.thread_id)
|
||||
.and_then(|thread| thread.metadata.as_ref())
|
||||
else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadMetadata);
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::TurnSteer(CodexTurnSteerEventRequest {
|
||||
@@ -824,42 +877,34 @@ impl AnalyticsReducer {
|
||||
{
|
||||
return;
|
||||
}
|
||||
let connection_metadata = turn_state
|
||||
.connection_id
|
||||
.and_then(|connection_id| self.connections.get(&connection_id))
|
||||
.map(|connection_state| {
|
||||
(
|
||||
connection_state.app_server_client.clone(),
|
||||
connection_state.runtime.clone(),
|
||||
)
|
||||
});
|
||||
let Some((app_server_client, runtime)) = connection_metadata else {
|
||||
if let Some(connection_id) = turn_state.connection_id {
|
||||
tracing::warn!(
|
||||
turn_id,
|
||||
connection_id,
|
||||
"dropping turn analytics event: missing connection metadata"
|
||||
);
|
||||
}
|
||||
return;
|
||||
};
|
||||
let Some(thread_id) = turn_state.thread_id.as_ref() else {
|
||||
return;
|
||||
};
|
||||
let Some(thread_metadata) = self.thread_metadata.get(thread_id) else {
|
||||
tracing::warn!(
|
||||
thread_id,
|
||||
turn_id,
|
||||
"dropping turn analytics event: missing thread lifecycle metadata"
|
||||
let Some(connection_id) = turn_state.connection_id else {
|
||||
return;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(&connection_id) else {
|
||||
warn_missing_analytics_context(
|
||||
&AnalyticsDropSite::turn(thread_id, turn_id),
|
||||
MissingAnalyticsContext::Connection { connection_id },
|
||||
);
|
||||
return;
|
||||
};
|
||||
let drop_site = AnalyticsDropSite::turn(thread_id, turn_id);
|
||||
let Some(thread_metadata) = self
|
||||
.threads
|
||||
.get(drop_site.thread_id)
|
||||
.and_then(|thread| thread.metadata.as_ref())
|
||||
else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadMetadata);
|
||||
return;
|
||||
};
|
||||
out.push(TrackEventRequest::TurnEvent(Box::new(
|
||||
CodexTurnEventRequest {
|
||||
event_type: "codex_turn_event",
|
||||
event_params: codex_turn_event_params(
|
||||
app_server_client,
|
||||
runtime,
|
||||
connection_state.app_server_client.clone(),
|
||||
connection_state.runtime.clone(),
|
||||
turn_id.to_string(),
|
||||
turn_state,
|
||||
thread_metadata,
|
||||
@@ -868,6 +913,67 @@ impl AnalyticsReducer {
|
||||
)));
|
||||
self.turns.remove(turn_id);
|
||||
}
|
||||
|
||||
fn thread_connection_or_warn(
|
||||
&self,
|
||||
drop_site: AnalyticsDropSite<'_>,
|
||||
) -> Option<&ConnectionState> {
|
||||
let Some(thread_state) = self.threads.get(drop_site.thread_id) else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadConnection);
|
||||
return None;
|
||||
};
|
||||
let Some(connection_id) = thread_state.connection_id else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadConnection);
|
||||
return None;
|
||||
};
|
||||
let Some(connection_state) = self.connections.get(&connection_id) else {
|
||||
warn_missing_analytics_context(
|
||||
&drop_site,
|
||||
MissingAnalyticsContext::Connection { connection_id },
|
||||
);
|
||||
return None;
|
||||
};
|
||||
Some(connection_state)
|
||||
}
|
||||
|
||||
fn thread_context_or_warn(
|
||||
&self,
|
||||
drop_site: AnalyticsDropSite<'_>,
|
||||
) -> Option<(&ConnectionState, &ThreadMetadataState)> {
|
||||
let connection_state = self.thread_connection_or_warn(drop_site)?;
|
||||
let Some(thread_metadata) = self
|
||||
.threads
|
||||
.get(drop_site.thread_id)
|
||||
.and_then(|thread| thread.metadata.as_ref())
|
||||
else {
|
||||
warn_missing_analytics_context(&drop_site, MissingAnalyticsContext::ThreadMetadata);
|
||||
return None;
|
||||
};
|
||||
Some((connection_state, thread_metadata))
|
||||
}
|
||||
}
|
||||
|
||||
fn warn_missing_analytics_context(
|
||||
drop_site: &AnalyticsDropSite<'_>,
|
||||
missing: MissingAnalyticsContext,
|
||||
) {
|
||||
let (missing_context, connection_id) = match missing {
|
||||
MissingAnalyticsContext::ThreadConnection => ("thread_connection", None),
|
||||
MissingAnalyticsContext::Connection { connection_id } => {
|
||||
("connection", Some(connection_id))
|
||||
}
|
||||
MissingAnalyticsContext::ThreadMetadata => ("thread_metadata", None),
|
||||
};
|
||||
tracing::warn!(
|
||||
thread_id = %drop_site.thread_id,
|
||||
turn_id = ?drop_site.turn_id,
|
||||
review_id = ?drop_site.review_id,
|
||||
item_id = ?drop_site.item_id,
|
||||
missing_context,
|
||||
connection_id,
|
||||
"dropping {} analytics event: missing analytics context",
|
||||
drop_site.event_name
|
||||
);
|
||||
}
|
||||
|
||||
fn codex_turn_event_params(
|
||||
@@ -979,7 +1085,7 @@ fn sandbox_policy_mode(permission_profile: &PermissionProfile, cwd: &Path) -> &'
|
||||
if permission_profile.network_sandbox_policy().is_enabled() {
|
||||
"full_access"
|
||||
} else {
|
||||
"custom_permissions"
|
||||
"external_sandbox"
|
||||
}
|
||||
} else if file_system_policy
|
||||
.get_writable_roots_with_cwd(cwd)
|
||||
@@ -1089,7 +1195,7 @@ mod tests {
|
||||
use codex_protocol::permissions::NetworkSandboxPolicy;
|
||||
|
||||
#[test]
|
||||
fn managed_full_disk_with_restricted_network_reports_custom_permissions() {
|
||||
fn managed_full_disk_with_restricted_network_reports_external_sandbox() {
|
||||
let permission_profile = PermissionProfile::from_runtime_permissions_with_enforcement(
|
||||
SandboxEnforcement::Managed,
|
||||
&FileSystemSandboxPolicy::unrestricted(),
|
||||
@@ -1098,7 +1204,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
sandbox_policy_mode(&permission_profile, Path::new("/")),
|
||||
"custom_permissions"
|
||||
"external_sandbox"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ pub use codex_app_server::in_process::DEFAULT_IN_PROCESS_CHANNEL_CAPACITY;
|
||||
pub use codex_app_server::in_process::InProcessServerEvent;
|
||||
use codex_app_server::in_process::InProcessStartArgs;
|
||||
use codex_app_server::in_process::LogDbLayer;
|
||||
pub use codex_app_server::in_process::StateDbHandle;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::ClientNotification;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
@@ -99,10 +100,6 @@ pub mod legacy_core {
|
||||
pub use codex_core::personality_migration::*;
|
||||
}
|
||||
|
||||
pub mod plugins {
|
||||
pub use codex_core::plugins::PluginsManager;
|
||||
}
|
||||
|
||||
pub mod review_format {
|
||||
pub use codex_core::review_format::*;
|
||||
}
|
||||
@@ -304,7 +301,15 @@ impl fmt::Display for TypedRequestError {
|
||||
write!(f, "{method} transport error: {source}")
|
||||
}
|
||||
Self::Server { method, source } => {
|
||||
write!(f, "{method} failed: {}", source.message)
|
||||
write!(
|
||||
f,
|
||||
"{method} failed: {} (code {})",
|
||||
source.message, source.code
|
||||
)?;
|
||||
if let Some(data) = source.data.as_ref() {
|
||||
write!(f, ", data: {data}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Self::Deserialize { method, source } => {
|
||||
write!(f, "{method} response decode error: {source}")
|
||||
@@ -339,6 +344,8 @@ pub struct InProcessClientStartArgs {
|
||||
pub feedback: CodexFeedback,
|
||||
/// SQLite tracing layer used to flush recently emitted logs before feedback upload.
|
||||
pub log_db: Option<LogDbLayer>,
|
||||
/// Process-wide SQLite state handle shared with the embedded app-server.
|
||||
pub state_db: Option<StateDbHandle>,
|
||||
/// Environment manager used by core execution and filesystem operations.
|
||||
pub environment_manager: Arc<EnvironmentManager>,
|
||||
/// Startup warnings emitted after initialize succeeds.
|
||||
@@ -400,6 +407,7 @@ impl InProcessClientStartArgs {
|
||||
thread_config_loader,
|
||||
feedback: self.feedback,
|
||||
log_db: self.log_db,
|
||||
state_db: self.state_db,
|
||||
environment_manager: self.environment_manager,
|
||||
config_warnings: self.config_warnings,
|
||||
session_source: self.session_source,
|
||||
@@ -979,6 +987,7 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source,
|
||||
@@ -1130,6 +1139,7 @@ mod tests {
|
||||
ServerNotification::ItemCompleted(codex_app_server_protocol::ItemCompletedNotification {
|
||||
thread_id: "thread".to_string(),
|
||||
turn_id: "turn".to_string(),
|
||||
completed_at_ms: 0,
|
||||
item: codex_app_server_protocol::ThreadItem::AgentMessage {
|
||||
id: "item".to_string(),
|
||||
text: text.to_string(),
|
||||
@@ -1919,11 +1929,15 @@ mod tests {
|
||||
method: "thread/read".to_string(),
|
||||
source: JSONRPCErrorError {
|
||||
code: -32603,
|
||||
data: None,
|
||||
data: Some(serde_json::json!({"detail": "config lock mismatch"})),
|
||||
message: "internal".to_string(),
|
||||
},
|
||||
};
|
||||
assert_eq!(std::error::Error::source(&server).is_some(), false);
|
||||
assert_eq!(
|
||||
server.to_string(),
|
||||
"thread/read failed: internal (code -32603), data: {\"detail\":\"config lock mismatch\"}"
|
||||
);
|
||||
|
||||
let deserialize = TypedRequestError::Deserialize {
|
||||
method: "thread/start".to_string(),
|
||||
@@ -1999,6 +2013,7 @@ mod tests {
|
||||
codex_app_server_protocol::ItemCompletedNotification {
|
||||
thread_id: "thread".to_string(),
|
||||
turn_id: "turn".to_string(),
|
||||
completed_at_ms: 0,
|
||||
item: codex_app_server_protocol::ThreadItem::AgentMessage {
|
||||
id: "item".to_string(),
|
||||
text: "hello".to_string(),
|
||||
@@ -2049,6 +2064,7 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: environment_manager.clone(),
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::Exec,
|
||||
@@ -2088,6 +2104,7 @@ mod tests {
|
||||
cloud_requirements: CloudRequirementsLoader::default(),
|
||||
feedback: CodexFeedback::new(),
|
||||
log_db: None,
|
||||
state_db: None,
|
||||
environment_manager: Arc::new(EnvironmentManager::default_for_tests()),
|
||||
config_warnings: Vec::new(),
|
||||
session_source: SessionSource::Exec,
|
||||
|
||||
6
codex-rs/app-server-daemon/BUILD.bazel
Normal file
6
codex-rs/app-server-daemon/BUILD.bazel
Normal file
@@ -0,0 +1,6 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "app-server-daemon",
|
||||
crate_name = "codex_app_server_daemon",
|
||||
)
|
||||
38
codex-rs/app-server-daemon/Cargo.toml
Normal file
38
codex-rs/app-server-daemon/Cargo.toml
Normal file
@@ -0,0 +1,38 @@
|
||||
[package]
|
||||
name = "codex-app-server-daemon"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_app_server_daemon"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-app-server-transport = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["rustls-tls"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"fs",
|
||||
"io-util",
|
||||
"macros",
|
||||
"process",
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
"time",
|
||||
] }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
95
codex-rs/app-server-daemon/README.md
Normal file
95
codex-rs/app-server-daemon/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# codex-app-server-daemon
|
||||
|
||||
`codex-app-server-daemon` backs the machine-readable `codex app-server`
|
||||
lifecycle commands used by remote clients such as the desktop and mobile apps.
|
||||
It is intended for Codex instances launched over SSH, including fresh developer
|
||||
machines that should expose app-server with `remote_control` enabled.
|
||||
|
||||
## Platform support
|
||||
|
||||
The current daemon implementation is Unix-only. It uses pidfile-backed
|
||||
daemonization plus Unix process and file-locking primitives, and does not yet
|
||||
support Windows lifecycle management.
|
||||
|
||||
## Commands
|
||||
|
||||
```sh
|
||||
codex app-server daemon start
|
||||
codex app-server daemon restart
|
||||
codex app-server daemon stop
|
||||
codex app-server daemon version
|
||||
codex app-server daemon bootstrap --remote-control
|
||||
```
|
||||
|
||||
On success, every command writes exactly one JSON object to stdout. Consumers
|
||||
should parse that JSON rather than relying on human-readable text. Lifecycle
|
||||
responses report the resolved backend, socket path, local CLI version, and
|
||||
running app-server version when applicable.
|
||||
|
||||
## Bootstrap flow
|
||||
|
||||
For a new remote machine:
|
||||
|
||||
```sh
|
||||
curl -fsSL https://chatgpt.com/codex/install.sh | sh
|
||||
$HOME/.codex/packages/standalone/current/codex app-server daemon bootstrap --remote-control
|
||||
```
|
||||
|
||||
`bootstrap` requires the standalone managed install. It records the daemon
|
||||
settings under `CODEX_HOME/app-server-daemon/`, starts app-server as a
|
||||
pidfile-backed detached process, and launches a detached updater loop.
|
||||
|
||||
## Installation and update cases
|
||||
|
||||
The daemon assumes Codex is installed through `install.sh` and always launches
|
||||
the standalone managed binary under `CODEX_HOME`.
|
||||
|
||||
| Situation | What starts | Does this daemon fetch new binaries? | Does a running app-server eventually move to a newer binary on its own? |
|
||||
| --- | --- | --- | --- |
|
||||
| `install.sh` has run, but only `start` is used | `start` uses `CODEX_HOME/packages/standalone/current/codex` | No | No. The managed path is used when starting or restarting, but no updater is installed. |
|
||||
| `install.sh` has run, then `bootstrap` is used | The pidfile backend uses `CODEX_HOME/packages/standalone/current/codex` | Yes. Bootstrap launches a detached updater loop that runs `install.sh` hourly. | Yes, while that updater process is alive. After a successful fetch, it restarts a currently running app-server only when the managed binary reports a different version. |
|
||||
| Some other tool updates the managed binary path | The next fresh start or restart uses the updated file at that path | No | Not automatically. The existing process keeps the old executable image until an explicit `restart`. |
|
||||
|
||||
### Standalone installs
|
||||
|
||||
For installs created by `install.sh`:
|
||||
|
||||
- lifecycle commands always use the standalone managed binary path
|
||||
- `bootstrap` is supported
|
||||
- `bootstrap` starts a detached pid-backed updater loop that fetches via
|
||||
`install.sh`, then restarts app-server if it is running on a different version
|
||||
- the updater loop is not reboot-persistent; it must be started again by
|
||||
rerunning `bootstrap` after a reboot
|
||||
|
||||
### Out-of-band updates
|
||||
|
||||
This daemon does not watch arbitrary executable files for replacement. If some
|
||||
other tool updates a binary that the daemon would use on its next launch:
|
||||
|
||||
- a currently running app-server remains on the old executable image
|
||||
- `restart` will launch the updated binary
|
||||
- for bootstrapped daemons, the detached updater loop only reacts to updates it
|
||||
fetched itself; it does not watch arbitrary file replacement
|
||||
|
||||
## Lifecycle semantics
|
||||
|
||||
`start` is idempotent and returns after app-server is ready to answer the normal
|
||||
JSON-RPC initialize handshake on the Unix control socket.
|
||||
|
||||
`restart` stops any managed daemon and starts it again.
|
||||
|
||||
`stop` sends a graceful termination request first, then sends a second
|
||||
termination signal after the grace window if the process is still alive.
|
||||
|
||||
All mutating lifecycle commands are serialized per `CODEX_HOME`, so a concurrent
|
||||
`start`, `restart`, `stop`, or `bootstrap` does not race another in-flight
|
||||
lifecycle operation.
|
||||
|
||||
## State
|
||||
|
||||
The daemon stores its local state under `CODEX_HOME/app-server-daemon/`:
|
||||
|
||||
- `settings.json` for persisted launch settings
|
||||
- `app-server.pid` for the app-server process record
|
||||
- `app-server-updater.pid` for the pid-backed standalone updater loop
|
||||
- `daemon.lock` for daemon-wide lifecycle serialization
|
||||
33
codex-rs/app-server-daemon/src/backend/mod.rs
Normal file
33
codex-rs/app-server-daemon/src/backend/mod.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
mod pid;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
pub(crate) use pid::PidBackend;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BackendKind {
|
||||
Pid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct BackendPaths {
|
||||
pub(crate) codex_bin: PathBuf,
|
||||
pub(crate) pid_file: PathBuf,
|
||||
pub(crate) update_pid_file: PathBuf,
|
||||
pub(crate) remote_control_enabled: bool,
|
||||
}
|
||||
|
||||
pub(crate) fn pid_backend(paths: BackendPaths) -> PidBackend {
|
||||
PidBackend::new(
|
||||
paths.codex_bin,
|
||||
paths.pid_file,
|
||||
paths.remote_control_enabled,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn pid_update_loop_backend(paths: BackendPaths) -> PidBackend {
|
||||
PidBackend::new_update_loop(paths.codex_bin, paths.update_pid_file)
|
||||
}
|
||||
600
codex-rs/app-server-daemon/src/backend/pid.rs
Normal file
600
codex-rs/app-server-daemon/src/backend/pid.rs
Normal file
@@ -0,0 +1,600 @@
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
#[cfg(unix)]
|
||||
use std::process::Stdio;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::bail;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use tokio::fs;
|
||||
#[cfg(unix)]
|
||||
use tokio::process::Command;
|
||||
use tokio::time::sleep;
|
||||
|
||||
const STOP_POLL_INTERVAL: Duration = Duration::from_millis(50);
|
||||
const STOP_GRACE_PERIOD: Duration = Duration::from_secs(60);
|
||||
const STOP_TIMEOUT: Duration = Duration::from_secs(70);
|
||||
const START_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
#[derive(Debug)]
|
||||
#[cfg_attr(not(unix), allow(dead_code))]
|
||||
pub(crate) struct PidBackend {
|
||||
codex_bin: PathBuf,
|
||||
pid_file: PathBuf,
|
||||
lock_file: PathBuf,
|
||||
command_kind: PidCommandKind,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct PidRecord {
|
||||
pid: u32,
|
||||
process_start_time: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum PidFileState {
|
||||
Missing,
|
||||
Starting,
|
||||
Running(PidRecord),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[cfg_attr(not(unix), allow(dead_code))]
|
||||
enum PidCommandKind {
|
||||
AppServer { remote_control_enabled: bool },
|
||||
UpdateLoop,
|
||||
}
|
||||
|
||||
impl PidBackend {
|
||||
pub(crate) fn new(codex_bin: PathBuf, pid_file: PathBuf, remote_control_enabled: bool) -> Self {
|
||||
let lock_file = pid_file.with_extension("pid.lock");
|
||||
Self {
|
||||
codex_bin,
|
||||
pid_file,
|
||||
lock_file,
|
||||
command_kind: PidCommandKind::AppServer {
|
||||
remote_control_enabled,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new_update_loop(codex_bin: PathBuf, pid_file: PathBuf) -> Self {
|
||||
let lock_file = pid_file.with_extension("pid.lock");
|
||||
Self {
|
||||
codex_bin,
|
||||
pid_file,
|
||||
lock_file,
|
||||
command_kind: PidCommandKind::UpdateLoop,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn is_starting_or_running(&self) -> Result<bool> {
|
||||
loop {
|
||||
match self.read_pid_file_state().await? {
|
||||
PidFileState::Missing => return Ok(false),
|
||||
PidFileState::Starting => return Ok(true),
|
||||
PidFileState::Running(record) => {
|
||||
if self.record_is_active(&record).await? {
|
||||
return Ok(true);
|
||||
}
|
||||
match self.refresh_after_stale_record(&record).await? {
|
||||
PidFileState::Missing => return Ok(false),
|
||||
PidFileState::Starting | PidFileState::Running(_) => continue,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub(crate) async fn start(&self) -> Result<Option<u32>> {
|
||||
if let Some(parent) = self.pid_file.parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.await
|
||||
.with_context(|| format!("failed to create pid directory {}", parent.display()))?;
|
||||
}
|
||||
let reservation_lock = self.acquire_reservation_lock().await?;
|
||||
let _pid_file = loop {
|
||||
match fs::OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&self.pid_file)
|
||||
.await
|
||||
{
|
||||
Ok(pid_file) => break pid_file,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {
|
||||
match self.read_pid_file_state_with_lock_held().await? {
|
||||
PidFileState::Missing => continue,
|
||||
PidFileState::Running(record) => {
|
||||
if self.record_is_active(&record).await? {
|
||||
return Ok(None);
|
||||
}
|
||||
let _ = fs::remove_file(&self.pid_file).await;
|
||||
continue;
|
||||
}
|
||||
PidFileState::Starting => {
|
||||
unreachable!("lock holder cannot observe starting")
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err).with_context(|| {
|
||||
format!("failed to reserve pid file {}", self.pid_file.display())
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
let mut command = Command::new(&self.codex_bin);
|
||||
command
|
||||
.args(self.command_args())
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null());
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
unsafe {
|
||||
command.pre_exec(|| {
|
||||
if libc::setsid() == -1 {
|
||||
return Err(std::io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let child = match command.spawn() {
|
||||
Ok(child) => child,
|
||||
Err(err) => {
|
||||
let _ = fs::remove_file(&self.pid_file).await;
|
||||
return Err(err).with_context(|| {
|
||||
format!(
|
||||
"failed to spawn detached app-server process using {}",
|
||||
self.codex_bin.display()
|
||||
)
|
||||
});
|
||||
}
|
||||
};
|
||||
let pid = child
|
||||
.id()
|
||||
.context("spawned app-server process has no pid")?;
|
||||
let record = match read_process_start_time(pid).await {
|
||||
Ok(process_start_time) => PidRecord {
|
||||
pid,
|
||||
process_start_time,
|
||||
},
|
||||
Err(err) => {
|
||||
let _ = self.terminate_process(pid);
|
||||
let _ = fs::remove_file(&self.pid_file).await;
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
let contents = serde_json::to_vec(&record).context("failed to serialize pid record")?;
|
||||
let temp_pid_file = self.pid_file.with_extension("pid.tmp");
|
||||
if let Err(err) = fs::write(&temp_pid_file, &contents).await {
|
||||
let _ = self.terminate_process(pid);
|
||||
let _ = fs::remove_file(&self.pid_file).await;
|
||||
return Err(err).with_context(|| {
|
||||
format!("failed to write pid temp file {}", temp_pid_file.display())
|
||||
});
|
||||
}
|
||||
if let Err(err) = fs::rename(&temp_pid_file, &self.pid_file).await {
|
||||
let _ = self.terminate_process(pid);
|
||||
let _ = fs::remove_file(&temp_pid_file).await;
|
||||
let _ = fs::remove_file(&self.pid_file).await;
|
||||
return Err(err).with_context(|| {
|
||||
format!("failed to publish pid file {}", self.pid_file.display())
|
||||
});
|
||||
}
|
||||
drop(reservation_lock);
|
||||
Ok(Some(pid))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
pub(crate) async fn start(&self) -> Result<Option<u32>> {
|
||||
bail!("pid-managed app-server startup is unsupported on this platform")
|
||||
}
|
||||
|
||||
pub(crate) async fn stop(&self) -> Result<()> {
|
||||
loop {
|
||||
let Some(record) = self.wait_for_pid_start().await? else {
|
||||
return Ok(());
|
||||
};
|
||||
if !self.record_is_active(&record).await? {
|
||||
match self.refresh_after_stale_record(&record).await? {
|
||||
PidFileState::Missing => return Ok(()),
|
||||
PidFileState::Starting | PidFileState::Running(_) => continue,
|
||||
}
|
||||
}
|
||||
|
||||
let pid = record.pid;
|
||||
self.terminate_process(pid)?;
|
||||
let started_at = tokio::time::Instant::now();
|
||||
let deadline = tokio::time::Instant::now() + STOP_TIMEOUT;
|
||||
let mut forced = false;
|
||||
while tokio::time::Instant::now() < deadline {
|
||||
if !self.record_is_active(&record).await? {
|
||||
match self.refresh_after_stale_record(&record).await? {
|
||||
PidFileState::Missing => return Ok(()),
|
||||
PidFileState::Starting | PidFileState::Running(_) => break,
|
||||
}
|
||||
}
|
||||
if !forced && started_at.elapsed() >= STOP_GRACE_PERIOD {
|
||||
self.force_terminate_process(pid)?;
|
||||
forced = true;
|
||||
}
|
||||
sleep(STOP_POLL_INTERVAL).await;
|
||||
}
|
||||
|
||||
if self.record_is_active(&record).await? {
|
||||
bail!("timed out waiting for pid-managed app server {pid} to stop");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_for_pid_start(&self) -> Result<Option<PidRecord>> {
|
||||
let deadline = tokio::time::Instant::now() + START_TIMEOUT;
|
||||
loop {
|
||||
match self.read_pid_file_state().await? {
|
||||
PidFileState::Missing => return Ok(None),
|
||||
PidFileState::Running(record) => return Ok(Some(record)),
|
||||
PidFileState::Starting if tokio::time::Instant::now() < deadline => {
|
||||
sleep(STOP_POLL_INTERVAL).await;
|
||||
}
|
||||
PidFileState::Starting => {
|
||||
bail!(
|
||||
"timed out waiting for pid reservation in {} to finish initializing",
|
||||
self.pid_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_pid_file_state(&self) -> Result<PidFileState> {
|
||||
let contents = match fs::read_to_string(&self.pid_file).await {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
return if reservation_lock_is_active(&self.lock_file).await? {
|
||||
Ok(PidFileState::Starting)
|
||||
} else {
|
||||
Ok(PidFileState::Missing)
|
||||
};
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err).with_context(|| {
|
||||
format!("failed to read pid file {}", self.pid_file.display())
|
||||
});
|
||||
}
|
||||
};
|
||||
if contents.trim().is_empty() {
|
||||
match inspect_empty_pid_reservation(&self.pid_file, &self.lock_file).await? {
|
||||
EmptyPidReservation::Active => {
|
||||
return Ok(PidFileState::Starting);
|
||||
}
|
||||
EmptyPidReservation::Stale => {
|
||||
return Ok(PidFileState::Missing);
|
||||
}
|
||||
EmptyPidReservation::Record(record) => return Ok(PidFileState::Running(record)),
|
||||
}
|
||||
}
|
||||
let record = serde_json::from_str(&contents)
|
||||
.with_context(|| format!("invalid pid file contents in {}", self.pid_file.display()))?;
|
||||
Ok(PidFileState::Running(record))
|
||||
}
|
||||
|
||||
async fn read_pid_file_state_with_lock_held(&self) -> Result<PidFileState> {
|
||||
let contents = match fs::read_to_string(&self.pid_file).await {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
return Ok(PidFileState::Missing);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err).with_context(|| {
|
||||
format!("failed to read pid file {}", self.pid_file.display())
|
||||
});
|
||||
}
|
||||
};
|
||||
if contents.trim().is_empty() {
|
||||
let _ = fs::remove_file(&self.pid_file).await;
|
||||
return Ok(PidFileState::Missing);
|
||||
}
|
||||
let record = serde_json::from_str(&contents)
|
||||
.with_context(|| format!("invalid pid file contents in {}", self.pid_file.display()))?;
|
||||
Ok(PidFileState::Running(record))
|
||||
}
|
||||
|
||||
async fn refresh_after_stale_record(&self, expected: &PidRecord) -> Result<PidFileState> {
|
||||
let reservation_lock = self.acquire_reservation_lock().await?;
|
||||
let state = match self.read_pid_file_state_with_lock_held().await? {
|
||||
PidFileState::Running(record) if record == *expected => {
|
||||
let _ = fs::remove_file(&self.pid_file).await;
|
||||
PidFileState::Missing
|
||||
}
|
||||
state => state,
|
||||
};
|
||||
drop(reservation_lock);
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
async fn acquire_reservation_lock(&self) -> Result<fs::File> {
|
||||
let reservation_lock = fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.write(true)
|
||||
.open(&self.lock_file)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to open pid lock file {}", self.lock_file.display())
|
||||
})?;
|
||||
let lock_deadline = tokio::time::Instant::now() + START_TIMEOUT;
|
||||
while !try_lock_file(&reservation_lock)? {
|
||||
if tokio::time::Instant::now() >= lock_deadline {
|
||||
bail!(
|
||||
"timed out waiting for pid lock {}",
|
||||
self.lock_file.display()
|
||||
);
|
||||
}
|
||||
sleep(STOP_POLL_INTERVAL).await;
|
||||
}
|
||||
Ok(reservation_lock)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn command_args(&self) -> Vec<&'static str> {
|
||||
match self.command_kind {
|
||||
PidCommandKind::AppServer {
|
||||
remote_control_enabled: true,
|
||||
} => vec![
|
||||
"--enable",
|
||||
"remote_control",
|
||||
"app-server",
|
||||
"--listen",
|
||||
"unix://",
|
||||
],
|
||||
PidCommandKind::AppServer {
|
||||
remote_control_enabled: false,
|
||||
} => vec!["app-server", "--listen", "unix://"],
|
||||
PidCommandKind::UpdateLoop => vec!["app-server", "daemon", "pid-update-loop"],
|
||||
}
|
||||
}
|
||||
|
||||
fn terminate_process(&self, pid: u32) -> Result<()> {
|
||||
match self.command_kind {
|
||||
PidCommandKind::AppServer { .. } => terminate_process(pid),
|
||||
PidCommandKind::UpdateLoop => terminate_process(pid),
|
||||
}
|
||||
}
|
||||
|
||||
fn force_terminate_process(&self, pid: u32) -> Result<()> {
|
||||
match self.command_kind {
|
||||
PidCommandKind::AppServer { .. } => force_terminate_process(pid),
|
||||
PidCommandKind::UpdateLoop => force_terminate_process_group(pid),
|
||||
}
|
||||
}
|
||||
|
||||
async fn record_is_active(&self, record: &PidRecord) -> Result<bool> {
|
||||
process_matches_record(record).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn process_exists(pid: u32) -> bool {
|
||||
let Ok(pid) = libc::pid_t::try_from(pid) else {
|
||||
return false;
|
||||
};
|
||||
let result = unsafe { libc::kill(pid, 0) };
|
||||
result == 0 || std::io::Error::last_os_error().raw_os_error() == Some(libc::EPERM)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn terminate_process(pid: u32) -> Result<()> {
|
||||
let raw_pid = libc::pid_t::try_from(pid)
|
||||
.with_context(|| format!("pid-managed app server pid {pid} is out of range"))?;
|
||||
let result = unsafe { libc::kill(raw_pid, libc::SIGTERM) };
|
||||
if result == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let err = std::io::Error::last_os_error();
|
||||
if err.raw_os_error() == Some(libc::ESRCH) {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err).with_context(|| format!("failed to terminate pid-managed app server {pid}"))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn force_terminate_process(pid: u32) -> Result<()> {
|
||||
let raw_pid = libc::pid_t::try_from(pid)
|
||||
.with_context(|| format!("pid-managed app server pid {pid} is out of range"))?;
|
||||
let result = unsafe { libc::kill(raw_pid, libc::SIGKILL) };
|
||||
if result == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let err = std::io::Error::last_os_error();
|
||||
if err.raw_os_error() == Some(libc::ESRCH) {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err).with_context(|| format!("failed to force terminate pid-managed app server {pid}"))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn force_terminate_process_group(pid: u32) -> Result<()> {
|
||||
let raw_pid = libc::pid_t::try_from(pid)
|
||||
.with_context(|| format!("pid-managed updater pid {pid} is out of range"))?;
|
||||
let result = unsafe { libc::kill(-raw_pid, libc::SIGKILL) };
|
||||
if result == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let err = std::io::Error::last_os_error();
|
||||
if err.raw_os_error() == Some(libc::ESRCH) {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err).with_context(|| format!("failed to force terminate pid-managed updater group {pid}"))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn terminate_process(_pid: u32) -> Result<()> {
|
||||
bail!("pid-managed app-server shutdown is unsupported on this platform")
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn force_terminate_process(_pid: u32) -> Result<()> {
|
||||
bail!("pid-managed app-server shutdown is unsupported on this platform")
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn force_terminate_process_group(_pid: u32) -> Result<()> {
|
||||
bail!("pid-managed updater shutdown is unsupported on this platform")
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn process_matches_record(record: &PidRecord) -> Result<bool> {
|
||||
if !process_exists(record.pid) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
match read_process_start_time(record.pid).await {
|
||||
Ok(start_time) => Ok(start_time == record.process_start_time),
|
||||
Err(_err) if !process_exists(record.pid) => Ok(false),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn process_matches_record(_record: &PidRecord) -> Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(not(unix), allow(dead_code))]
|
||||
enum EmptyPidReservation {
|
||||
Active,
|
||||
Stale,
|
||||
Record(PidRecord),
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn try_lock_file(file: &fs::File) -> Result<bool> {
|
||||
use std::os::fd::AsRawFd;
|
||||
|
||||
let result = unsafe { libc::flock(file.as_raw_fd(), libc::LOCK_EX | libc::LOCK_NB) };
|
||||
if result == 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let err = std::io::Error::last_os_error();
|
||||
if err.raw_os_error() == Some(libc::EWOULDBLOCK) {
|
||||
return Ok(false);
|
||||
}
|
||||
Err(err).context("failed to lock pid reservation")
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn try_lock_file(_file: &fs::File) -> Result<bool> {
|
||||
bail!("pid-managed app-server startup is unsupported on this platform")
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn reservation_lock_is_active(path: &Path) -> Result<bool> {
|
||||
let file = match fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(path)
|
||||
.await
|
||||
{
|
||||
Ok(file) => file,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
return Ok(false);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err)
|
||||
.with_context(|| format!("failed to inspect pid lock file {}", path.display()));
|
||||
}
|
||||
};
|
||||
Ok(!try_lock_file(&file)?)
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn reservation_lock_is_active(_path: &Path) -> Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn inspect_empty_pid_reservation(
|
||||
pid_path: &Path,
|
||||
lock_path: &Path,
|
||||
) -> Result<EmptyPidReservation> {
|
||||
let file = match fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(lock_path)
|
||||
.await
|
||||
{
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
return Err(err).with_context(|| {
|
||||
format!("failed to inspect pid lock file {}", lock_path.display())
|
||||
});
|
||||
}
|
||||
};
|
||||
if !try_lock_file(&file)? {
|
||||
return Ok(EmptyPidReservation::Active);
|
||||
}
|
||||
|
||||
let contents = match fs::read_to_string(pid_path).await {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
return Ok(EmptyPidReservation::Stale);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err)
|
||||
.with_context(|| format!("failed to reread pid file {}", pid_path.display()));
|
||||
}
|
||||
};
|
||||
if contents.trim().is_empty() {
|
||||
let _ = fs::remove_file(pid_path).await;
|
||||
return Ok(EmptyPidReservation::Stale);
|
||||
}
|
||||
|
||||
let record = serde_json::from_str(&contents)
|
||||
.with_context(|| format!("invalid pid file contents in {}", pid_path.display()))?;
|
||||
Ok(EmptyPidReservation::Record(record))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn inspect_empty_pid_reservation(
|
||||
_pid_path: &Path,
|
||||
_lock_path: &Path,
|
||||
) -> Result<EmptyPidReservation> {
|
||||
Ok(EmptyPidReservation::Stale)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn read_process_start_time(pid: u32) -> Result<String> {
|
||||
let output = Command::new("ps")
|
||||
.args(["-p", &pid.to_string(), "-o", "lstart="])
|
||||
.output()
|
||||
.await
|
||||
.context("failed to invoke ps for pid-managed app server")?;
|
||||
if !output.status.success() {
|
||||
bail!("failed to read start time for pid-managed app server {pid}");
|
||||
}
|
||||
|
||||
let start_time = String::from_utf8(output.stdout)
|
||||
.context("pid-managed app server start time was not utf-8")?;
|
||||
let start_time = start_time.trim();
|
||||
if start_time.is_empty() {
|
||||
bail!("pid-managed app server {pid} has no recorded start time");
|
||||
}
|
||||
Ok(start_time.to_string())
|
||||
}
|
||||
|
||||
#[cfg(all(test, unix))]
|
||||
#[path = "pid_tests.rs"]
|
||||
mod tests;
|
||||
158
codex-rs/app-server-daemon/src/backend/pid_tests.rs
Normal file
158
codex-rs/app-server-daemon/src/backend/pid_tests.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::PidBackend;
|
||||
use super::PidCommandKind;
|
||||
use super::PidFileState;
|
||||
use super::PidRecord;
|
||||
use super::try_lock_file;
|
||||
|
||||
#[tokio::test]
|
||||
async fn locked_empty_pid_file_is_treated_as_active_reservation() {
|
||||
let temp_dir = TempDir::new().expect("temp dir");
|
||||
let pid_file = temp_dir.path().join("app-server.pid");
|
||||
tokio::fs::write(&pid_file, "")
|
||||
.await
|
||||
.expect("write pid file");
|
||||
let backend = PidBackend::new(
|
||||
temp_dir.path().join("codex"),
|
||||
pid_file.clone(),
|
||||
/*remote_control_enabled*/ false,
|
||||
);
|
||||
let reservation = tokio::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.write(true)
|
||||
.open(&backend.lock_file)
|
||||
.await
|
||||
.expect("open pid lock file");
|
||||
assert!(try_lock_file(&reservation).expect("lock reservation"));
|
||||
|
||||
assert_eq!(
|
||||
backend.read_pid_file_state().await.expect("read pid"),
|
||||
PidFileState::Starting
|
||||
);
|
||||
assert!(pid_file.exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn unlocked_empty_pid_file_is_treated_as_stale_reservation() {
|
||||
let temp_dir = TempDir::new().expect("temp dir");
|
||||
let pid_file = temp_dir.path().join("app-server.pid");
|
||||
tokio::fs::write(&pid_file, "")
|
||||
.await
|
||||
.expect("write pid file");
|
||||
let backend = PidBackend::new(
|
||||
temp_dir.path().join("codex"),
|
||||
pid_file.clone(),
|
||||
/*remote_control_enabled*/ false,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
backend.read_pid_file_state().await.expect("read pid"),
|
||||
PidFileState::Missing
|
||||
);
|
||||
assert!(!pid_file.exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn stop_waits_for_live_reservation_to_resolve() {
|
||||
let temp_dir = TempDir::new().expect("temp dir");
|
||||
let pid_file = temp_dir.path().join("app-server.pid");
|
||||
tokio::fs::write(&pid_file, "")
|
||||
.await
|
||||
.expect("write pid file");
|
||||
let backend = PidBackend::new(
|
||||
temp_dir.path().join("codex"),
|
||||
pid_file.clone(),
|
||||
/*remote_control_enabled*/ false,
|
||||
);
|
||||
let reservation = tokio::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.write(true)
|
||||
.open(&backend.lock_file)
|
||||
.await
|
||||
.expect("open pid lock file");
|
||||
assert!(try_lock_file(&reservation).expect("lock reservation"));
|
||||
let cleanup = tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
drop(reservation);
|
||||
tokio::fs::remove_file(pid_file)
|
||||
.await
|
||||
.expect("remove pid file");
|
||||
});
|
||||
|
||||
backend.stop().await.expect("stop");
|
||||
cleanup.await.expect("cleanup task");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn start_retries_stale_empty_pid_file_under_its_own_lock() {
|
||||
let temp_dir = TempDir::new().expect("temp dir");
|
||||
let pid_file = temp_dir.path().join("app-server.pid");
|
||||
tokio::fs::write(&pid_file, "")
|
||||
.await
|
||||
.expect("write pid file");
|
||||
let backend = PidBackend::new(
|
||||
temp_dir.path().join("missing-codex"),
|
||||
pid_file,
|
||||
/*remote_control_enabled*/ false,
|
||||
);
|
||||
|
||||
let err = backend.start().await.expect_err("start");
|
||||
assert!(
|
||||
err.to_string()
|
||||
.starts_with("failed to spawn detached app-server process using ")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn stale_record_cleanup_preserves_replacement_record() {
|
||||
let temp_dir = TempDir::new().expect("temp dir");
|
||||
let pid_file = temp_dir.path().join("app-server.pid");
|
||||
let backend = PidBackend::new(
|
||||
temp_dir.path().join("codex"),
|
||||
pid_file.clone(),
|
||||
/*remote_control_enabled*/ false,
|
||||
);
|
||||
let stale = PidRecord {
|
||||
pid: 1,
|
||||
process_start_time: "old".to_string(),
|
||||
};
|
||||
let replacement = PidRecord {
|
||||
pid: 2,
|
||||
process_start_time: "new".to_string(),
|
||||
};
|
||||
tokio::fs::write(
|
||||
&pid_file,
|
||||
serde_json::to_vec(&replacement).expect("serialize replacement"),
|
||||
)
|
||||
.await
|
||||
.expect("write replacement pid file");
|
||||
|
||||
assert_eq!(
|
||||
backend
|
||||
.refresh_after_stale_record(&stale)
|
||||
.await
|
||||
.expect("cleanup"),
|
||||
PidFileState::Running(replacement)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_loop_uses_hidden_app_server_subcommand() {
|
||||
let backend = PidBackend {
|
||||
codex_bin: "codex".into(),
|
||||
pid_file: "updater.pid".into(),
|
||||
lock_file: "updater.pid.lock".into(),
|
||||
command_kind: PidCommandKind::UpdateLoop,
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
backend.command_args(),
|
||||
vec!["app-server", "daemon", "pid-update-loop"]
|
||||
);
|
||||
}
|
||||
131
codex-rs/app-server-daemon/src/client.rs
Normal file
131
codex-rs/app-server-daemon/src/client.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use codex_app_server_protocol::ClientInfo;
|
||||
use codex_app_server_protocol::InitializeParams;
|
||||
use codex_app_server_protocol::InitializeResponse;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_uds::UnixStream;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use tokio::time::timeout;
|
||||
use tokio_tungstenite::client_async;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
|
||||
const PROBE_TIMEOUT: Duration = Duration::from_secs(2);
|
||||
const CLIENT_NAME: &str = "codex_app_server_daemon";
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(crate) struct ProbeInfo {
|
||||
pub(crate) app_server_version: String,
|
||||
}
|
||||
|
||||
pub(crate) async fn probe(socket_path: &Path) -> Result<ProbeInfo> {
|
||||
timeout(PROBE_TIMEOUT, probe_inner(socket_path))
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"timed out probing app-server control socket {}",
|
||||
socket_path.display()
|
||||
)
|
||||
})?
|
||||
}
|
||||
|
||||
async fn probe_inner(socket_path: &Path) -> Result<ProbeInfo> {
|
||||
let stream = UnixStream::connect(socket_path)
|
||||
.await
|
||||
.with_context(|| format!("failed to connect to {}", socket_path.display()))?;
|
||||
let (mut websocket, _response) = client_async("ws://localhost/", stream)
|
||||
.await
|
||||
.with_context(|| format!("failed to upgrade {}", socket_path.display()))?;
|
||||
|
||||
let initialize = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(1),
|
||||
method: "initialize".to_string(),
|
||||
params: Some(serde_json::to_value(InitializeParams {
|
||||
client_info: ClientInfo {
|
||||
name: CLIENT_NAME.to_string(),
|
||||
title: Some("Codex App Server Daemon".to_string()),
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
},
|
||||
capabilities: None,
|
||||
})?),
|
||||
trace: None,
|
||||
});
|
||||
websocket
|
||||
.send(Message::Text(serde_json::to_string(&initialize)?.into()))
|
||||
.await
|
||||
.context("failed to send initialize request")?;
|
||||
|
||||
let response = loop {
|
||||
let frame = websocket
|
||||
.next()
|
||||
.await
|
||||
.ok_or_else(|| anyhow!("app-server closed before initialize response"))??;
|
||||
let Message::Text(payload) = frame else {
|
||||
continue;
|
||||
};
|
||||
let message = serde_json::from_str::<JSONRPCMessage>(&payload)?;
|
||||
if let JSONRPCMessage::Response(response) = message
|
||||
&& response.id == RequestId::Integer(1)
|
||||
{
|
||||
break response;
|
||||
}
|
||||
};
|
||||
let initialize_response = serde_json::from_value::<InitializeResponse>(response.result)?;
|
||||
|
||||
let initialized = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
websocket
|
||||
.send(Message::Text(serde_json::to_string(&initialized)?.into()))
|
||||
.await
|
||||
.context("failed to send initialized notification")?;
|
||||
websocket.close(None).await.ok();
|
||||
|
||||
Ok(ProbeInfo {
|
||||
app_server_version: parse_version_from_user_agent(&initialize_response.user_agent)?,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_version_from_user_agent(user_agent: &str) -> Result<String> {
|
||||
let (_originator, rest) = user_agent
|
||||
.split_once('/')
|
||||
.ok_or_else(|| anyhow!("app-server user-agent omitted version separator"))?;
|
||||
let version = rest
|
||||
.split_whitespace()
|
||||
.next()
|
||||
.filter(|version| !version.is_empty())
|
||||
.ok_or_else(|| anyhow!("app-server user-agent omitted version"))?;
|
||||
Ok(version.to_string())
|
||||
}
|
||||
|
||||
#[cfg(all(test, unix))]
|
||||
mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::parse_version_from_user_agent;
|
||||
|
||||
#[test]
|
||||
fn parses_version_from_codex_user_agent() {
|
||||
assert_eq!(
|
||||
parse_version_from_user_agent(
|
||||
"codex_app_server_daemon/1.2.3 (Linux 6.8.0; x86_64) codex_cli_rs/1.2.3",
|
||||
)
|
||||
.expect("version"),
|
||||
"1.2.3"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_user_agent_without_version() {
|
||||
assert!(parse_version_from_user_agent("codex_app_server_daemon").is_err());
|
||||
}
|
||||
}
|
||||
503
codex-rs/app-server-daemon/src/lib.rs
Normal file
503
codex-rs/app-server-daemon/src/lib.rs
Normal file
@@ -0,0 +1,503 @@
|
||||
mod backend;
|
||||
mod client;
|
||||
mod managed_install;
|
||||
mod settings;
|
||||
mod update_loop;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
pub use backend::BackendKind;
|
||||
use backend::BackendPaths;
|
||||
use codex_app_server_transport::app_server_control_socket_path;
|
||||
use codex_core::config::find_codex_home;
|
||||
use managed_install::managed_codex_bin;
|
||||
use managed_install::managed_codex_version;
|
||||
use serde::Serialize;
|
||||
use settings::DaemonSettings;
|
||||
use tokio::time::sleep;
|
||||
|
||||
const START_POLL_INTERVAL: Duration = Duration::from_millis(50);
|
||||
const START_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
const OPERATION_LOCK_TIMEOUT: Duration = Duration::from_secs(75);
|
||||
const PID_FILE_NAME: &str = "app-server.pid";
|
||||
const UPDATE_PID_FILE_NAME: &str = "app-server-updater.pid";
|
||||
const OPERATION_LOCK_FILE_NAME: &str = "daemon.lock";
|
||||
const SETTINGS_FILE_NAME: &str = "settings.json";
|
||||
const STATE_DIR_NAME: &str = "app-server-daemon";
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum LifecycleCommand {
|
||||
Start,
|
||||
Restart,
|
||||
Stop,
|
||||
Version,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum LifecycleStatus {
|
||||
AlreadyRunning,
|
||||
Started,
|
||||
Restarted,
|
||||
Stopped,
|
||||
NotRunning,
|
||||
Running,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LifecycleOutput {
|
||||
pub status: LifecycleStatus,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub backend: Option<BackendKind>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pid: Option<u32>,
|
||||
pub socket_path: PathBuf,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cli_version: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub app_server_version: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct BootstrapOptions {
|
||||
pub remote_control_enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BootstrapStatus {
|
||||
Bootstrapped,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BootstrapOutput {
|
||||
pub status: BootstrapStatus,
|
||||
pub backend: BackendKind,
|
||||
pub auto_update_enabled: bool,
|
||||
pub remote_control_enabled: bool,
|
||||
pub managed_codex_path: PathBuf,
|
||||
pub socket_path: PathBuf,
|
||||
pub cli_version: String,
|
||||
pub app_server_version: String,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub(crate) enum RestartIfRunningOutcome {
|
||||
Completed,
|
||||
Busy,
|
||||
}
|
||||
|
||||
pub async fn run(command: LifecycleCommand) -> Result<LifecycleOutput> {
|
||||
ensure_supported_platform()?;
|
||||
Daemon::from_environment()?.run(command).await
|
||||
}
|
||||
|
||||
pub async fn bootstrap(options: BootstrapOptions) -> Result<BootstrapOutput> {
|
||||
ensure_supported_platform()?;
|
||||
Daemon::from_environment()?.bootstrap(options).await
|
||||
}
|
||||
|
||||
pub async fn run_pid_update_loop() -> Result<()> {
|
||||
ensure_supported_platform()?;
|
||||
update_loop::run().await
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn ensure_supported_platform() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn ensure_supported_platform() -> Result<()> {
|
||||
Err(anyhow!(
|
||||
"codex app-server daemon lifecycle is only supported on Unix platforms"
|
||||
))
|
||||
}
|
||||
|
||||
struct Daemon {
|
||||
socket_path: PathBuf,
|
||||
pid_file: PathBuf,
|
||||
update_pid_file: PathBuf,
|
||||
operation_lock_file: PathBuf,
|
||||
settings_file: PathBuf,
|
||||
managed_codex_bin: PathBuf,
|
||||
}
|
||||
|
||||
impl Daemon {
|
||||
fn from_environment() -> Result<Self> {
|
||||
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
|
||||
let socket_path = app_server_control_socket_path(codex_home.as_path())?
|
||||
.as_path()
|
||||
.to_path_buf();
|
||||
let state_dir = codex_home.as_path().join(STATE_DIR_NAME);
|
||||
Ok(Self {
|
||||
socket_path,
|
||||
pid_file: state_dir.join(PID_FILE_NAME),
|
||||
update_pid_file: state_dir.join(UPDATE_PID_FILE_NAME),
|
||||
operation_lock_file: state_dir.join(OPERATION_LOCK_FILE_NAME),
|
||||
settings_file: state_dir.join(SETTINGS_FILE_NAME),
|
||||
managed_codex_bin: managed_codex_bin(codex_home.as_path()),
|
||||
})
|
||||
}
|
||||
|
||||
async fn run(&self, command: LifecycleCommand) -> Result<LifecycleOutput> {
|
||||
match command {
|
||||
LifecycleCommand::Start => {
|
||||
let _operation_lock = self.acquire_operation_lock().await?;
|
||||
self.start().await
|
||||
}
|
||||
LifecycleCommand::Restart => {
|
||||
let _operation_lock = self.acquire_operation_lock().await?;
|
||||
self.restart().await
|
||||
}
|
||||
LifecycleCommand::Stop => {
|
||||
let _operation_lock = self.acquire_operation_lock().await?;
|
||||
self.stop().await
|
||||
}
|
||||
LifecycleCommand::Version => self.version().await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn start(&self) -> Result<LifecycleOutput> {
|
||||
let settings = self.load_settings().await?;
|
||||
if let Ok(info) = client::probe(&self.socket_path).await {
|
||||
return Ok(self.output(
|
||||
LifecycleStatus::AlreadyRunning,
|
||||
self.running_backend(&settings).await?,
|
||||
/*pid*/ None,
|
||||
Some(info.app_server_version),
|
||||
));
|
||||
}
|
||||
|
||||
if self.running_backend_instance(&settings).await?.is_some() {
|
||||
let info = self.wait_until_ready().await?;
|
||||
return Ok(self.output(
|
||||
LifecycleStatus::AlreadyRunning,
|
||||
Some(BackendKind::Pid),
|
||||
/*pid*/ None,
|
||||
Some(info.app_server_version),
|
||||
));
|
||||
}
|
||||
|
||||
self.ensure_managed_codex_bin()?;
|
||||
let pid = self.start_managed_backend(&settings).await?;
|
||||
let info = self.wait_until_ready().await?;
|
||||
Ok(self.output(
|
||||
LifecycleStatus::Started,
|
||||
Some(BackendKind::Pid),
|
||||
pid,
|
||||
Some(info.app_server_version),
|
||||
))
|
||||
}
|
||||
|
||||
async fn restart(&self) -> Result<LifecycleOutput> {
|
||||
let settings = self.load_settings().await?;
|
||||
if client::probe(&self.socket_path).await.is_ok()
|
||||
&& self.running_backend(&settings).await?.is_none()
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"app server is running but is not managed by codex app-server daemon"
|
||||
));
|
||||
}
|
||||
|
||||
self.ensure_managed_codex_bin()?;
|
||||
if let Some(backend) = self.running_backend_instance(&settings).await? {
|
||||
backend.stop().await?;
|
||||
}
|
||||
|
||||
let pid = self.start_managed_backend(&settings).await?;
|
||||
let info = self.wait_until_ready().await?;
|
||||
Ok(self.output(
|
||||
LifecycleStatus::Restarted,
|
||||
Some(BackendKind::Pid),
|
||||
pid,
|
||||
Some(info.app_server_version),
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub(crate) async fn try_restart_if_running(&self) -> Result<RestartIfRunningOutcome> {
|
||||
let operation_lock = self.open_operation_lock_file().await?;
|
||||
if !try_lock_file(&operation_lock)? {
|
||||
return Ok(RestartIfRunningOutcome::Busy);
|
||||
}
|
||||
let settings = self.load_settings().await?;
|
||||
if let Some(backend) = self.running_backend_instance(&settings).await? {
|
||||
let Ok(info) = client::probe(&self.socket_path).await else {
|
||||
return Ok(RestartIfRunningOutcome::Completed);
|
||||
};
|
||||
let managed_version = managed_codex_version(&self.managed_codex_bin).await?;
|
||||
if info.app_server_version == managed_version {
|
||||
return Ok(RestartIfRunningOutcome::Completed);
|
||||
}
|
||||
backend.stop().await?;
|
||||
let _ = self.start_managed_backend(&settings).await?;
|
||||
self.wait_until_ready().await?;
|
||||
return Ok(RestartIfRunningOutcome::Completed);
|
||||
}
|
||||
|
||||
if client::probe(&self.socket_path).await.is_ok() {
|
||||
return Err(anyhow!(
|
||||
"app server is running but is not managed by codex app-server daemon"
|
||||
));
|
||||
}
|
||||
|
||||
Ok(RestartIfRunningOutcome::Completed)
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<LifecycleOutput> {
|
||||
let settings = self.load_settings().await?;
|
||||
if let Some(backend) = self.running_backend_instance(&settings).await? {
|
||||
backend.stop().await?;
|
||||
return Ok(self.output(
|
||||
LifecycleStatus::Stopped,
|
||||
Some(BackendKind::Pid),
|
||||
/*pid*/ None,
|
||||
/*app_server_version*/ None,
|
||||
));
|
||||
}
|
||||
|
||||
if client::probe(&self.socket_path).await.is_ok() {
|
||||
return Err(anyhow!(
|
||||
"app server is running but is not managed by codex app-server daemon"
|
||||
));
|
||||
}
|
||||
|
||||
Ok(self.output(
|
||||
LifecycleStatus::NotRunning,
|
||||
/*backend*/ None,
|
||||
/*pid*/ None,
|
||||
/*app_server_version*/ None,
|
||||
))
|
||||
}
|
||||
|
||||
async fn version(&self) -> Result<LifecycleOutput> {
|
||||
let settings = self.load_settings().await?;
|
||||
let info = client::probe(&self.socket_path).await?;
|
||||
Ok(self.output(
|
||||
LifecycleStatus::Running,
|
||||
self.running_backend(&settings).await?,
|
||||
/*pid*/ None,
|
||||
Some(info.app_server_version),
|
||||
))
|
||||
}
|
||||
|
||||
async fn wait_until_ready(&self) -> Result<client::ProbeInfo> {
|
||||
let deadline = tokio::time::Instant::now() + START_TIMEOUT;
|
||||
loop {
|
||||
match client::probe(&self.socket_path).await {
|
||||
Ok(info) => return Ok(info),
|
||||
Err(err) if tokio::time::Instant::now() < deadline => {
|
||||
let _ = err;
|
||||
sleep(START_POLL_INTERVAL).await;
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err).with_context(|| {
|
||||
format!(
|
||||
"app server did not become ready on {}",
|
||||
self.socket_path.display()
|
||||
)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn bootstrap(&self, options: BootstrapOptions) -> Result<BootstrapOutput> {
|
||||
let _operation_lock = self.acquire_operation_lock().await?;
|
||||
self.bootstrap_locked(options).await
|
||||
}
|
||||
|
||||
async fn bootstrap_locked(&self, options: BootstrapOptions) -> Result<BootstrapOutput> {
|
||||
self.ensure_managed_codex_bin()?;
|
||||
|
||||
let settings = DaemonSettings {
|
||||
remote_control_enabled: options.remote_control_enabled,
|
||||
};
|
||||
if client::probe(&self.socket_path).await.is_ok()
|
||||
&& self.running_backend(&settings).await?.is_none()
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"app server is running but is not managed by codex app-server daemon"
|
||||
));
|
||||
}
|
||||
settings.save(&self.settings_file).await?;
|
||||
|
||||
if let Some(backend) = self.running_backend_instance(&settings).await? {
|
||||
backend.stop().await?;
|
||||
}
|
||||
|
||||
let backend = backend::pid_backend(self.backend_paths(&settings));
|
||||
backend.start().await?;
|
||||
let updater = backend::pid_update_loop_backend(self.backend_paths(&settings));
|
||||
if updater.is_starting_or_running().await? {
|
||||
updater.stop().await?;
|
||||
}
|
||||
updater.start().await?;
|
||||
|
||||
let info = self.wait_until_ready().await?;
|
||||
Ok(BootstrapOutput {
|
||||
status: BootstrapStatus::Bootstrapped,
|
||||
backend: BackendKind::Pid,
|
||||
auto_update_enabled: true,
|
||||
remote_control_enabled: settings.remote_control_enabled,
|
||||
managed_codex_path: self.managed_codex_bin.clone(),
|
||||
socket_path: self.socket_path.clone(),
|
||||
cli_version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
app_server_version: info.app_server_version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn running_backend(&self, settings: &DaemonSettings) -> Result<Option<BackendKind>> {
|
||||
Ok(self
|
||||
.running_backend_instance(settings)
|
||||
.await?
|
||||
.map(|_| BackendKind::Pid))
|
||||
}
|
||||
|
||||
async fn running_backend_instance(
|
||||
&self,
|
||||
settings: &DaemonSettings,
|
||||
) -> Result<Option<backend::PidBackend>> {
|
||||
let backend = backend::pid_backend(self.backend_paths(settings));
|
||||
if backend.is_starting_or_running().await? {
|
||||
return Ok(Some(backend));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn start_managed_backend(&self, settings: &DaemonSettings) -> Result<Option<u32>> {
|
||||
let backend = backend::pid_backend(self.backend_paths(settings));
|
||||
backend.start().await
|
||||
}
|
||||
|
||||
fn ensure_managed_codex_bin(&self) -> Result<()> {
|
||||
if self.managed_codex_bin.is_file() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
"managed standalone Codex install not found at {}; install Codex first",
|
||||
self.managed_codex_bin.display()
|
||||
))
|
||||
}
|
||||
|
||||
fn backend_paths(&self, settings: &DaemonSettings) -> BackendPaths {
|
||||
BackendPaths {
|
||||
codex_bin: self.managed_codex_bin.clone(),
|
||||
pid_file: self.pid_file.clone(),
|
||||
update_pid_file: self.update_pid_file.clone(),
|
||||
remote_control_enabled: settings.remote_control_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_settings(&self) -> Result<DaemonSettings> {
|
||||
DaemonSettings::load(&self.settings_file).await
|
||||
}
|
||||
|
||||
async fn acquire_operation_lock(&self) -> Result<tokio::fs::File> {
|
||||
let operation_lock = self.open_operation_lock_file().await?;
|
||||
let deadline = tokio::time::Instant::now() + OPERATION_LOCK_TIMEOUT;
|
||||
while !try_lock_file(&operation_lock)? {
|
||||
if tokio::time::Instant::now() >= deadline {
|
||||
return Err(anyhow!(
|
||||
"timed out waiting for daemon operation lock {}",
|
||||
self.operation_lock_file.display()
|
||||
));
|
||||
}
|
||||
sleep(START_POLL_INTERVAL).await;
|
||||
}
|
||||
Ok(operation_lock)
|
||||
}
|
||||
|
||||
async fn open_operation_lock_file(&self) -> Result<tokio::fs::File> {
|
||||
if let Some(parent) = self.operation_lock_file.parent() {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"failed to create daemon state directory {}",
|
||||
parent.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
tokio::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.write(true)
|
||||
.open(&self.operation_lock_file)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to open daemon operation lock {}",
|
||||
self.operation_lock_file.display()
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn output(
|
||||
&self,
|
||||
status: LifecycleStatus,
|
||||
backend: Option<BackendKind>,
|
||||
pid: Option<u32>,
|
||||
app_server_version: Option<String>,
|
||||
) -> LifecycleOutput {
|
||||
LifecycleOutput {
|
||||
status,
|
||||
backend,
|
||||
pid,
|
||||
socket_path: self.socket_path.clone(),
|
||||
cli_version: Some(env!("CARGO_PKG_VERSION").to_string()),
|
||||
app_server_version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn try_lock_file(file: &tokio::fs::File) -> Result<bool> {
|
||||
use std::os::fd::AsRawFd;
|
||||
|
||||
let result = unsafe { libc::flock(file.as_raw_fd(), libc::LOCK_EX | libc::LOCK_NB) };
|
||||
if result == 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let err = std::io::Error::last_os_error();
|
||||
if err.raw_os_error() == Some(libc::EWOULDBLOCK) {
|
||||
return Ok(false);
|
||||
}
|
||||
Err(err).context("failed to lock daemon operation")
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn try_lock_file(_file: &tokio::fs::File) -> Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[cfg(all(test, unix))]
|
||||
mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::BootstrapStatus;
|
||||
use super::LifecycleStatus;
|
||||
|
||||
#[test]
|
||||
fn lifecycle_status_uses_camel_case_json() {
|
||||
assert_eq!(
|
||||
serde_json::to_string(&LifecycleStatus::AlreadyRunning).expect("serialize"),
|
||||
"\"alreadyRunning\""
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bootstrap_status_uses_camel_case_json() {
|
||||
assert_eq!(
|
||||
serde_json::to_string(&BootstrapStatus::Bootstrapped).expect("serialize"),
|
||||
"\"bootstrapped\""
|
||||
);
|
||||
}
|
||||
}
|
||||
60
codex-rs/app-server-daemon/src/managed_install.rs
Normal file
60
codex-rs/app-server-daemon/src/managed_install.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::anyhow;
|
||||
use tokio::process::Command;
|
||||
|
||||
pub(crate) fn managed_codex_bin(codex_home: &Path) -> PathBuf {
|
||||
codex_home
|
||||
.join("packages")
|
||||
.join("standalone")
|
||||
.join("current")
|
||||
.join(managed_codex_file_name())
|
||||
}
|
||||
|
||||
pub(crate) async fn managed_codex_version(codex_bin: &Path) -> Result<String> {
|
||||
let output = Command::new(codex_bin)
|
||||
.arg("--version")
|
||||
.output()
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to invoke managed Codex binary {}",
|
||||
codex_bin.display()
|
||||
)
|
||||
})?;
|
||||
if !output.status.success() {
|
||||
return Err(anyhow!(
|
||||
"managed Codex binary {} exited with status {}",
|
||||
codex_bin.display(),
|
||||
output.status
|
||||
));
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8(output.stdout).with_context(|| {
|
||||
format!(
|
||||
"managed Codex version was not utf-8: {}",
|
||||
codex_bin.display()
|
||||
)
|
||||
})?;
|
||||
parse_codex_version(&stdout)
|
||||
}
|
||||
|
||||
fn managed_codex_file_name() -> &'static str {
|
||||
if cfg!(windows) { "codex.exe" } else { "codex" }
|
||||
}
|
||||
|
||||
fn parse_codex_version(output: &str) -> Result<String> {
|
||||
let version = output
|
||||
.split_whitespace()
|
||||
.nth(1)
|
||||
.filter(|version| !version.is_empty())
|
||||
.ok_or_else(|| anyhow!("managed Codex version output was malformed"))?;
|
||||
Ok(version.to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "managed_install_tests.rs"]
|
||||
mod tests;
|
||||
16
codex-rs/app-server-daemon/src/managed_install_tests.rs
Normal file
16
codex-rs/app-server-daemon/src/managed_install_tests.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::parse_codex_version;
|
||||
|
||||
#[test]
|
||||
fn parses_codex_cli_version_output() {
|
||||
assert_eq!(
|
||||
parse_codex_version("codex 1.2.3\n").expect("version"),
|
||||
"1.2.3"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_malformed_codex_cli_version_output() {
|
||||
assert!(parse_codex_version("codex\n").is_err());
|
||||
}
|
||||
63
codex-rs/app-server-daemon/src/settings.rs
Normal file
63
codex-rs/app-server-daemon/src/settings.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use tokio::fs;
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct DaemonSettings {
|
||||
pub(crate) remote_control_enabled: bool,
|
||||
}
|
||||
|
||||
impl DaemonSettings {
|
||||
pub(crate) async fn load(path: &Path) -> Result<Self> {
|
||||
let contents = match fs::read_to_string(path).await {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(Self::default()),
|
||||
Err(err) => {
|
||||
return Err(err)
|
||||
.with_context(|| format!("failed to read daemon settings {}", path.display()));
|
||||
}
|
||||
};
|
||||
|
||||
serde_json::from_str(&contents)
|
||||
.with_context(|| format!("failed to parse daemon settings {}", path.display()))
|
||||
}
|
||||
|
||||
pub(crate) async fn save(&self, path: &Path) -> Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"failed to create daemon settings directory {}",
|
||||
parent.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
let contents = serde_json::to_vec_pretty(self).context("failed to serialize settings")?;
|
||||
fs::write(path, contents)
|
||||
.await
|
||||
.with_context(|| format!("failed to write daemon settings {}", path.display()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, unix))]
|
||||
mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::DaemonSettings;
|
||||
|
||||
#[test]
|
||||
fn daemon_settings_use_camel_case_json() {
|
||||
assert_eq!(
|
||||
serde_json::to_string(&DaemonSettings {
|
||||
remote_control_enabled: true,
|
||||
})
|
||||
.expect("serialize"),
|
||||
r#"{"remoteControlEnabled":true}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
132
codex-rs/app-server-daemon/src/update_loop.rs
Normal file
132
codex-rs/app-server-daemon/src/update_loop.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
#[cfg(unix)]
|
||||
use std::process::Stdio;
|
||||
#[cfg(unix)]
|
||||
use std::time::Duration;
|
||||
|
||||
#[cfg(unix)]
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
#[cfg(not(unix))]
|
||||
use anyhow::bail;
|
||||
#[cfg(unix)]
|
||||
use futures::FutureExt;
|
||||
#[cfg(unix)]
|
||||
use tokio::io::AsyncWriteExt;
|
||||
#[cfg(unix)]
|
||||
use tokio::process::Command;
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::Signal;
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::SignalKind;
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::signal;
|
||||
#[cfg(unix)]
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[cfg(unix)]
|
||||
use crate::Daemon;
|
||||
#[cfg(unix)]
|
||||
use crate::RestartIfRunningOutcome;
|
||||
|
||||
#[cfg(unix)]
|
||||
const INITIAL_UPDATE_DELAY: Duration = Duration::from_secs(5 * 60);
|
||||
#[cfg(unix)]
|
||||
const RESTART_RETRY_INTERVAL: Duration = Duration::from_millis(50);
|
||||
#[cfg(unix)]
|
||||
const UPDATE_INTERVAL: Duration = Duration::from_secs(60 * 60);
|
||||
|
||||
#[cfg(unix)]
|
||||
pub(crate) async fn run() -> Result<()> {
|
||||
let mut terminate =
|
||||
signal(SignalKind::terminate()).context("failed to install updater shutdown handler")?;
|
||||
if sleep_or_terminate(INITIAL_UPDATE_DELAY, &mut terminate).await {
|
||||
return Ok(());
|
||||
}
|
||||
loop {
|
||||
match update_once(&mut terminate).await {
|
||||
Ok(UpdateLoopControl::Continue) | Err(_) => {}
|
||||
Ok(UpdateLoopControl::Stop) => return Ok(()),
|
||||
}
|
||||
if sleep_or_terminate(UPDATE_INTERVAL, &mut terminate).await {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
pub(crate) async fn run() -> Result<()> {
|
||||
bail!("pid-managed updater loop is unsupported on this platform")
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn sleep_or_terminate(duration: Duration, terminate: &mut Signal) -> bool {
|
||||
tokio::select! {
|
||||
_ = sleep(duration) => false,
|
||||
_ = terminate.recv() => true,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
enum UpdateLoopControl {
|
||||
Continue,
|
||||
Stop,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn update_once(terminate: &mut Signal) -> Result<UpdateLoopControl> {
|
||||
install_latest_standalone().await?;
|
||||
|
||||
let daemon = Daemon::from_environment()?;
|
||||
loop {
|
||||
if terminate.recv().now_or_never().flatten().is_some() {
|
||||
return Ok(UpdateLoopControl::Stop);
|
||||
}
|
||||
match daemon.try_restart_if_running().await? {
|
||||
RestartIfRunningOutcome::Completed => return Ok(UpdateLoopControl::Continue),
|
||||
RestartIfRunningOutcome::Busy => {
|
||||
if sleep_or_terminate(RESTART_RETRY_INTERVAL, terminate).await {
|
||||
return Ok(UpdateLoopControl::Stop);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn install_latest_standalone() -> Result<()> {
|
||||
let script = reqwest::get("https://chatgpt.com/codex/install.sh")
|
||||
.await
|
||||
.context("failed to fetch standalone Codex updater")?
|
||||
.error_for_status()
|
||||
.context("standalone Codex updater request failed")?
|
||||
.bytes()
|
||||
.await
|
||||
.context("failed to read standalone Codex updater")?;
|
||||
|
||||
let mut child = Command::new("/bin/sh")
|
||||
.arg("-s")
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.context("failed to invoke standalone Codex updater")?;
|
||||
let mut stdin = child
|
||||
.stdin
|
||||
.take()
|
||||
.context("standalone Codex updater stdin was unavailable")?;
|
||||
stdin
|
||||
.write_all(&script)
|
||||
.await
|
||||
.context("failed to pass standalone Codex updater to shell")?;
|
||||
drop(stdin);
|
||||
let status = child
|
||||
.wait()
|
||||
.await
|
||||
.context("failed to wait for standalone Codex updater")?;
|
||||
|
||||
if status.success() {
|
||||
Ok(())
|
||||
} else {
|
||||
anyhow::bail!("standalone Codex updater exited with status {status}")
|
||||
}
|
||||
}
|
||||
@@ -2217,6 +2217,25 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginUninstallParams": {
|
||||
"properties": {
|
||||
"pluginId": {
|
||||
@@ -2246,6 +2265,28 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessTerminalSize": {
|
||||
"description": "PTY size in character cells for `process/spawn` PTY sessions.",
|
||||
"properties": {
|
||||
"cols": {
|
||||
"description": "Terminal width in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"rows": {
|
||||
"description": "Terminal height in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"cols",
|
||||
"rows"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"RealtimeOutputModality": {
|
||||
"enum": [
|
||||
"text",
|
||||
@@ -2831,6 +2872,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -3453,10 +3516,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
@@ -3858,10 +3917,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
@@ -4121,44 +4176,6 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ThreadTurnsListParams": {
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadUnarchiveParams": {
|
||||
"properties": {
|
||||
"threadId": {
|
||||
@@ -4889,30 +4906,6 @@
|
||||
"title": "Thread/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"thread/turns/list"
|
||||
],
|
||||
"title": "Thread/turns/listRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ThreadTurnsListParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Thread/turns/listRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Append raw Responses API items to the thread history without starting a user turn.",
|
||||
"properties": {
|
||||
@@ -5106,6 +5099,30 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
|
||||
@@ -1032,6 +1032,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
"FileChangeOutputDeltaNotification": {
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
@@ -1931,6 +1932,11 @@
|
||||
},
|
||||
"ItemCompletedNotification": {
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
@@ -1942,6 +1948,7 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
@@ -2029,6 +2036,11 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2038,6 +2050,7 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
@@ -2402,6 +2415,96 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ProcessExitedNotification": {
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputDeltaNotification": {
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"RateLimitReachedType": {
|
||||
"enum": [
|
||||
"rate_limit_reached",
|
||||
@@ -3930,7 +4033,7 @@
|
||||
"ThreadRealtimeStartedNotification": {
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
@@ -5150,6 +5253,48 @@
|
||||
"title": "Command/exec/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/outputDelta"
|
||||
],
|
||||
"title": "Process/outputDeltaNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessOutputDeltaNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Final exit notification for a `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/exited"
|
||||
],
|
||||
"title": "Process/exitedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessExitedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/exitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
@@ -5191,6 +5336,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Deprecated legacy apply_patch output stream notification.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
@@ -5894,4 +6040,4 @@
|
||||
}
|
||||
],
|
||||
"title": "ServerNotification"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -569,30 +569,6 @@
|
||||
"title": "Thread/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/v2/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"thread/turns/list"
|
||||
],
|
||||
"title": "Thread/turns/listRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ThreadTurnsListParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Thread/turns/listRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Append raw Responses API items to the thread history without starting a user turn.",
|
||||
"properties": {
|
||||
@@ -786,6 +762,30 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/v2/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -4248,6 +4248,48 @@
|
||||
"title": "Command/exec/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/outputDelta"
|
||||
],
|
||||
"title": "Process/outputDeltaNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ProcessOutputDeltaNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Final exit notification for a `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/exited"
|
||||
],
|
||||
"title": "Process/exitedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ProcessExitedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/exitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
@@ -4289,6 +4331,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Deprecated legacy apply_patch output stream notification.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
@@ -8600,6 +8643,7 @@
|
||||
},
|
||||
"FileChangeOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
@@ -10107,6 +10151,11 @@
|
||||
"ItemCompletedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/v2/ThreadItem"
|
||||
},
|
||||
@@ -10118,6 +10167,7 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
@@ -10211,6 +10261,11 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/v2/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -10220,6 +10275,7 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
@@ -11954,6 +12010,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginDetail": {
|
||||
"properties": {
|
||||
"apps": {
|
||||
@@ -12339,6 +12412,31 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -12349,7 +12447,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/PluginSummary"
|
||||
"$ref": "#/definitions/v2/PluginShareListItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -12396,6 +12494,40 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -12480,6 +12612,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/v2/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -12556,6 +12697,122 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessExitedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"title": "ProcessExitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"title": "ProcessOutputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ProcessTerminalSize": {
|
||||
"description": "PTY size in character cells for `process/spawn` PTY sessions.",
|
||||
"properties": {
|
||||
"cols": {
|
||||
"description": "Terminal width in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"rows": {
|
||||
"description": "Terminal height in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"cols",
|
||||
"rows"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProfileV2": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
@@ -13704,6 +13961,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -15051,10 +15330,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
@@ -16436,7 +16711,7 @@
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
@@ -16552,10 +16827,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
@@ -17150,76 +17421,6 @@
|
||||
"title": "ThreadTokenUsageUpdatedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v2/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"title": "ThreadTurnsListParams",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"backwardsCursor": {
|
||||
"description": "Opaque cursor to pass as `cursor` when reversing `sortDirection`. This is only populated when the page contains at least one turn. Use it with the opposite `sortDirection` to include the anchor turn again and catch updates to that turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/Turn"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"nextCursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn. if None, there are no more turns to return.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"data"
|
||||
],
|
||||
"title": "ThreadTurnsListResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadUnarchiveParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
@@ -18230,4 +18431,4 @@
|
||||
},
|
||||
"title": "CodexAppServerProtocol",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1328,30 +1328,6 @@
|
||||
"title": "Thread/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"thread/turns/list"
|
||||
],
|
||||
"title": "Thread/turns/listRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ThreadTurnsListParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Thread/turns/listRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Append raw Responses API items to the thread history without starting a user turn.",
|
||||
"properties": {
|
||||
@@ -1545,6 +1521,30 @@
|
||||
"title": "Plugin/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"plugin/skill/read"
|
||||
],
|
||||
"title": "Plugin/skill/readRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/PluginSkillReadParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Plugin/skill/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -5099,6 +5099,7 @@
|
||||
},
|
||||
"FileChangeOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
@@ -6761,6 +6762,11 @@
|
||||
"ItemCompletedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
@@ -6772,6 +6778,7 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
@@ -6865,6 +6872,11 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -6874,6 +6886,7 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
@@ -8608,6 +8621,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginDetail": {
|
||||
"properties": {
|
||||
"apps": {
|
||||
@@ -8993,6 +9023,31 @@
|
||||
"title": "PluginShareDeleteResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "PluginShareListParams",
|
||||
@@ -9003,7 +9058,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
@@ -9050,6 +9105,40 @@
|
||||
"title": "PluginShareSaveResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSkillReadResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -9134,6 +9223,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -9210,6 +9308,122 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessExitedNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"title": "ProcessExitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputDeltaNotification": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"title": "ProcessOutputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ProcessTerminalSize": {
|
||||
"description": "PTY size in character cells for `process/spawn` PTY sessions.",
|
||||
"properties": {
|
||||
"cols": {
|
||||
"description": "Terminal width in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
},
|
||||
"rows": {
|
||||
"description": "Terminal height in character cells.",
|
||||
"format": "uint16",
|
||||
"minimum": 0.0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"cols",
|
||||
"rows"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ProfileV2": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
@@ -10358,6 +10572,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -11266,6 +11502,48 @@
|
||||
"title": "Command/exec/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/outputDelta"
|
||||
],
|
||||
"title": "Process/outputDeltaNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessOutputDeltaNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/outputDeltaNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Final exit notification for a `process/spawn` session.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
"process/exited"
|
||||
],
|
||||
"title": "Process/exitedNotificationMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ProcessExitedNotification"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Process/exitedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"method": {
|
||||
@@ -11307,6 +11585,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Deprecated legacy apply_patch output stream notification.",
|
||||
"properties": {
|
||||
"method": {
|
||||
"enum": [
|
||||
@@ -12937,10 +13216,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
@@ -14322,7 +14597,7 @@
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
@@ -14438,10 +14713,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
@@ -15036,76 +15307,6 @@
|
||||
"title": "ThreadTokenUsageUpdatedNotification",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"title": "ThreadTurnsListParams",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadTurnsListResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"backwardsCursor": {
|
||||
"description": "Opaque cursor to pass as `cursor` when reversing `sortDirection`. This is only populated when the page contains at least one turn. Use it with the opposite `sortDirection` to include the anchor turn again and catch updates to that turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/Turn"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"nextCursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn. if None, there are no more turns to return.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"data"
|
||||
],
|
||||
"title": "ThreadTurnsListResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadUnarchiveParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
@@ -16115,4 +16316,4 @@
|
||||
},
|
||||
"title": "CodexAppServerProtocolV2",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Deprecated legacy notification for `apply_patch` textual output.\n\nThe server no longer emits this notification.",
|
||||
"properties": {
|
||||
"delta": {
|
||||
"type": "string"
|
||||
|
||||
@@ -1370,6 +1370,11 @@
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"completedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle completed.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
@@ -1381,10 +1386,11 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"completedAtMs",
|
||||
"item",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
"title": "ItemCompletedNotification",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1373,6 +1373,11 @@
|
||||
"item": {
|
||||
"$ref": "#/definitions/ThreadItem"
|
||||
},
|
||||
"startedAtMs": {
|
||||
"description": "Unix timestamp (in milliseconds) when this item lifecycle started.",
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1382,9 +1387,10 @@
|
||||
},
|
||||
"required": [
|
||||
"item",
|
||||
"startedAtMs",
|
||||
"threadId",
|
||||
"turnId"
|
||||
],
|
||||
"title": "ItemStartedNotification",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginInstallPolicy": {
|
||||
"enum": [
|
||||
"NOT_AVAILABLE",
|
||||
@@ -299,6 +316,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
@@ -44,6 +44,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginDetail": {
|
||||
"properties": {
|
||||
"apps": {
|
||||
@@ -318,6 +335,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
@@ -12,6 +12,23 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PluginAvailability": {
|
||||
"oneOf": [
|
||||
{
|
||||
"enum": [
|
||||
"DISABLED_BY_ADMIN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Plugin-service currently sends `\"ENABLED\"` for available remote plugins. Codex app-server exposes `\"AVAILABLE\"` in its API; the alias keeps decoding compatible with that upstream response.",
|
||||
"enum": [
|
||||
"AVAILABLE"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"PluginInstallPolicy": {
|
||||
"enum": [
|
||||
"NOT_AVAILABLE",
|
||||
@@ -150,6 +167,31 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginShareListItem": {
|
||||
"properties": {
|
||||
"localPluginPath": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AbsolutePathBuf"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
},
|
||||
"shareUrl": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"plugin",
|
||||
"shareUrl"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"PluginSource": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -234,6 +276,15 @@
|
||||
"authPolicy": {
|
||||
"$ref": "#/definitions/PluginAuthPolicy"
|
||||
},
|
||||
"availability": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PluginAvailability"
|
||||
}
|
||||
],
|
||||
"default": "AVAILABLE",
|
||||
"description": "Availability state for installing and using the plugin."
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -278,7 +329,7 @@
|
||||
"properties": {
|
||||
"data": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PluginSummary"
|
||||
"$ref": "#/definitions/PluginShareListItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
|
||||
21
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadParams.json
generated
Normal file
21
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadParams.json
generated
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"remoteMarketplaceName": {
|
||||
"type": "string"
|
||||
},
|
||||
"remotePluginId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"remoteMarketplaceName",
|
||||
"remotePluginId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "PluginSkillReadParams",
|
||||
"type": "object"
|
||||
}
|
||||
13
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadResponse.json
generated
Normal file
13
codex-rs/app-server-protocol/schema/json/v2/PluginSkillReadResponse.json
generated
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"contents": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "PluginSkillReadResponse",
|
||||
"type": "object"
|
||||
}
|
||||
41
codex-rs/app-server-protocol/schema/json/v2/ProcessExitedNotification.json
generated
Normal file
41
codex-rs/app-server-protocol/schema/json/v2/ProcessExitedNotification.json
generated
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"description": "Final process exit notification for `process/spawn`.",
|
||||
"properties": {
|
||||
"exitCode": {
|
||||
"description": "Process exit code.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderr": {
|
||||
"description": "Buffered stderr capture.\n\nEmpty when stderr was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stderrCapReached": {
|
||||
"description": "Whether stderr reached `outputBytesCap`.\n\nIn streaming mode, stderr is empty and cap state is also reported on the final stderr `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stdout": {
|
||||
"description": "Buffered stdout capture.\n\nEmpty when stdout was streamed via `process/outputDelta`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stdoutCapReached": {
|
||||
"description": "Whether stdout reached `outputBytesCap`.\n\nIn streaming mode, stdout is empty and cap state is also reported on the final stdout `process/outputDelta` notification.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"exitCode",
|
||||
"processHandle",
|
||||
"stderr",
|
||||
"stderrCapReached",
|
||||
"stdout",
|
||||
"stdoutCapReached"
|
||||
],
|
||||
"title": "ProcessExitedNotification",
|
||||
"type": "object"
|
||||
}
|
||||
55
codex-rs/app-server-protocol/schema/json/v2/ProcessOutputDeltaNotification.json
generated
Normal file
55
codex-rs/app-server-protocol/schema/json/v2/ProcessOutputDeltaNotification.json
generated
Normal file
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"ProcessOutputStream": {
|
||||
"description": "Stream label for `process/outputDelta` notifications.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "stdout stream. PTY mode multiplexes terminal output here.",
|
||||
"enum": [
|
||||
"stdout"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "stderr stream.",
|
||||
"enum": [
|
||||
"stderr"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"description": "Base64-encoded output chunk emitted for a streaming `process/spawn` request.",
|
||||
"properties": {
|
||||
"capReached": {
|
||||
"description": "True on the final streamed chunk for this stream when output was truncated by `outputBytesCap`.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"deltaBase64": {
|
||||
"description": "Base64-encoded output bytes.",
|
||||
"type": "string"
|
||||
},
|
||||
"processHandle": {
|
||||
"description": "Client-supplied, connection-scoped `processHandle` from `process/spawn`.",
|
||||
"type": "string"
|
||||
},
|
||||
"stream": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ProcessOutputStream"
|
||||
}
|
||||
],
|
||||
"description": "Output stream this chunk belongs to."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"capReached",
|
||||
"deltaBase64",
|
||||
"processHandle",
|
||||
"stream"
|
||||
],
|
||||
"title": "ProcessOutputDeltaNotification",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -732,6 +732,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
|
||||
@@ -190,10 +190,6 @@
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live fork state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after forking.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the forked thread, if any.",
|
||||
"type": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
},
|
||||
"description": "EXPERIMENTAL - emitted when thread realtime startup is accepted.",
|
||||
"properties": {
|
||||
"sessionId": {
|
||||
"realtimeSessionId": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
|
||||
@@ -862,6 +862,28 @@
|
||||
"title": "CompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"encrypted_content": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"context_compaction"
|
||||
],
|
||||
"title": "ContextCompactionResponseItemType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "ContextCompactionResponseItem",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -1045,10 +1067,6 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"excludeTurns": {
|
||||
"description": "When true, return only thread metadata and live-resume state without populating `thread.turns`. This is useful when the client plans to call `thread/turns/list` immediately after resuming.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
"description": "Configuration overrides for the resumed thread, if any.",
|
||||
"type": [
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"SortDirection": {
|
||||
"enum": [
|
||||
"asc",
|
||||
"desc"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"cursor": {
|
||||
"description": "Opaque cursor to pass to the next call to continue after the last turn.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"limit": {
|
||||
"description": "Optional turn page size.",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0,
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortDirection": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SortDirection"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional turn pagination direction; defaults to descending."
|
||||
},
|
||||
"threadId": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"threadId"
|
||||
],
|
||||
"title": "ThreadTurnsListParams",
|
||||
"type": "object"
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -14,4 +14,4 @@ export type ResponseItem = { "type": "message", role: string, content: Array<Con
|
||||
/**
|
||||
* Set when using the Responses API.
|
||||
*/
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "other" };
|
||||
call_id: string | null, status: LocalShellStatus, action: LocalShellAction, } | { "type": "function_call", name: string, namespace?: string, arguments: string, call_id: string, } | { "type": "tool_search_call", call_id: string | null, status?: string, execution: string, arguments: unknown, } | { "type": "function_call_output", call_id: string, output: FunctionCallOutputBody, } | { "type": "custom_tool_call", status?: string, call_id: string, name: string, input: string, } | { "type": "custom_tool_call_output", call_id: string, name?: string, output: FunctionCallOutputBody, } | { "type": "tool_search_output", call_id: string | null, status: string, execution: string, tools: unknown[], } | { "type": "web_search_call", status?: string, action?: WebSearchAction, } | { "type": "image_generation_call", id: string, status: string, revised_prompt?: string, result: string, } | { "type": "compaction", encrypted_content: string, } | { "type": "context_compaction", encrypted_content?: string, } | { "type": "other" };
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -2,4 +2,9 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* Deprecated legacy notification for `apply_patch` textual output.
|
||||
*
|
||||
* The server no longer emits this notification.
|
||||
*/
|
||||
export type FileChangeOutputDeltaNotification = { threadId: string, turnId: string, itemId: string, delta: string, };
|
||||
|
||||
@@ -3,4 +3,8 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ThreadItem } from "./ThreadItem";
|
||||
|
||||
export type ItemCompletedNotification = { item: ThreadItem, threadId: string, turnId: string, };
|
||||
export type ItemCompletedNotification = { item: ThreadItem, threadId: string, turnId: string,
|
||||
/**
|
||||
* Unix timestamp (in milliseconds) when this item lifecycle completed.
|
||||
*/
|
||||
completedAtMs: number, };
|
||||
|
||||
@@ -3,4 +3,8 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ThreadItem } from "./ThreadItem";
|
||||
|
||||
export type ItemStartedNotification = { item: ThreadItem, threadId: string, turnId: string, };
|
||||
export type ItemStartedNotification = { item: ThreadItem, threadId: string, turnId: string,
|
||||
/**
|
||||
* Unix timestamp (in milliseconds) when this item lifecycle started.
|
||||
*/
|
||||
startedAtMs: number, };
|
||||
|
||||
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginAvailability.ts
generated
Normal file
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginAvailability.ts
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginAvailability = "AVAILABLE" | "DISABLED_BY_ADMIN";
|
||||
7
codex-rs/app-server-protocol/schema/typescript/v2/PluginShareListItem.ts
generated
Normal file
7
codex-rs/app-server-protocol/schema/typescript/v2/PluginShareListItem.ts
generated
Normal file
@@ -0,0 +1,7 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { AbsolutePathBuf } from "../AbsolutePathBuf";
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
|
||||
export type PluginShareListItem = { plugin: PluginSummary, shareUrl: string, localPluginPath: AbsolutePathBuf | null, };
|
||||
@@ -1,6 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { PluginSummary } from "./PluginSummary";
|
||||
import type { PluginShareListItem } from "./PluginShareListItem";
|
||||
|
||||
export type PluginShareListResponse = { data: Array<PluginSummary>, };
|
||||
export type PluginShareListResponse = { data: Array<PluginShareListItem>, };
|
||||
|
||||
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadParams.ts
generated
Normal file
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadParams.ts
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadParams = { remoteMarketplaceName: string, remotePluginId: string, skillName: string, };
|
||||
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadResponse.ts
generated
Normal file
5
codex-rs/app-server-protocol/schema/typescript/v2/PluginSkillReadResponse.ts
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type PluginSkillReadResponse = { contents: string | null, };
|
||||
@@ -2,8 +2,13 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { PluginAuthPolicy } from "./PluginAuthPolicy";
|
||||
import type { PluginAvailability } from "./PluginAvailability";
|
||||
import type { PluginInstallPolicy } from "./PluginInstallPolicy";
|
||||
import type { PluginInterface } from "./PluginInterface";
|
||||
import type { PluginSource } from "./PluginSource";
|
||||
|
||||
export type PluginSummary = { id: string, name: string, source: PluginSource, installed: boolean, enabled: boolean, installPolicy: PluginInstallPolicy, authPolicy: PluginAuthPolicy, interface: PluginInterface | null, };
|
||||
export type PluginSummary = { id: string, name: string, source: PluginSource, installed: boolean, enabled: boolean, installPolicy: PluginInstallPolicy, authPolicy: PluginAuthPolicy,
|
||||
/**
|
||||
* Availability state for installing and using the plugin.
|
||||
*/
|
||||
availability: PluginAvailability, interface: PluginInterface | null, };
|
||||
|
||||
42
codex-rs/app-server-protocol/schema/typescript/v2/ProcessExitedNotification.ts
generated
Normal file
42
codex-rs/app-server-protocol/schema/typescript/v2/ProcessExitedNotification.ts
generated
Normal file
@@ -0,0 +1,42 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* Final process exit notification for `process/spawn`.
|
||||
*/
|
||||
export type ProcessExitedNotification = {
|
||||
/**
|
||||
* Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
*/
|
||||
processHandle: string,
|
||||
/**
|
||||
* Process exit code.
|
||||
*/
|
||||
exitCode: number,
|
||||
/**
|
||||
* Buffered stdout capture.
|
||||
*
|
||||
* Empty when stdout was streamed via `process/outputDelta`.
|
||||
*/
|
||||
stdout: string,
|
||||
/**
|
||||
* Whether stdout reached `outputBytesCap`.
|
||||
*
|
||||
* In streaming mode, stdout is empty and cap state is also reported on the
|
||||
* final stdout `process/outputDelta` notification.
|
||||
*/
|
||||
stdoutCapReached: boolean,
|
||||
/**
|
||||
* Buffered stderr capture.
|
||||
*
|
||||
* Empty when stderr was streamed via `process/outputDelta`.
|
||||
*/
|
||||
stderr: string,
|
||||
/**
|
||||
* Whether stderr reached `outputBytesCap`.
|
||||
*
|
||||
* In streaming mode, stderr is empty and cap state is also reported on the
|
||||
* final stderr `process/outputDelta` notification.
|
||||
*/
|
||||
stderrCapReached: boolean, };
|
||||
26
codex-rs/app-server-protocol/schema/typescript/v2/ProcessOutputDeltaNotification.ts
generated
Normal file
26
codex-rs/app-server-protocol/schema/typescript/v2/ProcessOutputDeltaNotification.ts
generated
Normal file
@@ -0,0 +1,26 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ProcessOutputStream } from "./ProcessOutputStream";
|
||||
|
||||
/**
|
||||
* Base64-encoded output chunk emitted for a streaming `process/spawn` request.
|
||||
*/
|
||||
export type ProcessOutputDeltaNotification = {
|
||||
/**
|
||||
* Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
*/
|
||||
processHandle: string,
|
||||
/**
|
||||
* Output stream this chunk belongs to.
|
||||
*/
|
||||
stream: ProcessOutputStream,
|
||||
/**
|
||||
* Base64-encoded output bytes.
|
||||
*/
|
||||
deltaBase64: string,
|
||||
/**
|
||||
* True on the final streamed chunk for this stream when output was
|
||||
* truncated by `outputBytesCap`.
|
||||
*/
|
||||
capReached: boolean, };
|
||||
8
codex-rs/app-server-protocol/schema/typescript/v2/ProcessOutputStream.ts
generated
Normal file
8
codex-rs/app-server-protocol/schema/typescript/v2/ProcessOutputStream.ts
generated
Normal file
@@ -0,0 +1,8 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* Stream label for `process/outputDelta` notifications.
|
||||
*/
|
||||
export type ProcessOutputStream = "stdout" | "stderr";
|
||||
16
codex-rs/app-server-protocol/schema/typescript/v2/ProcessTerminalSize.ts
generated
Normal file
16
codex-rs/app-server-protocol/schema/typescript/v2/ProcessTerminalSize.ts
generated
Normal file
@@ -0,0 +1,16 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
/**
|
||||
* PTY size in character cells for `process/spawn` PTY sessions.
|
||||
*/
|
||||
export type ProcessTerminalSize = {
|
||||
/**
|
||||
* Terminal height in character cells.
|
||||
*/
|
||||
rows: number,
|
||||
/**
|
||||
* Terminal width in character cells.
|
||||
*/
|
||||
cols: number, };
|
||||
@@ -23,9 +23,4 @@ model?: string | null, modelProvider?: string | null, serviceTier?: ServiceTier
|
||||
* Override where approval requests are routed for review on this thread
|
||||
* and subsequent turns.
|
||||
*/
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, ephemeral?: boolean, /**
|
||||
* When true, return only thread metadata and live fork state without
|
||||
* populating `thread.turns`. This is useful when the client plans to call
|
||||
* `thread/turns/list` immediately after forking.
|
||||
*/
|
||||
excludeTurns?: boolean};
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, ephemeral?: boolean};
|
||||
|
||||
@@ -6,4 +6,4 @@ import type { RealtimeConversationVersion } from "../RealtimeConversationVersion
|
||||
/**
|
||||
* EXPERIMENTAL - emitted when thread realtime startup is accepted.
|
||||
*/
|
||||
export type ThreadRealtimeStartedNotification = { threadId: string, sessionId: string | null, version: RealtimeConversationVersion, };
|
||||
export type ThreadRealtimeStartedNotification = { threadId: string, realtimeSessionId: string | null, version: RealtimeConversationVersion, };
|
||||
|
||||
@@ -26,9 +26,4 @@ model?: string | null, modelProvider?: string | null, serviceTier?: ServiceTier
|
||||
* Override where approval requests are routed for review on this thread
|
||||
* and subsequent turns.
|
||||
*/
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null, /**
|
||||
* When true, return only thread metadata and live-resume state without
|
||||
* populating `thread.turns`. This is useful when the client plans to call
|
||||
* `thread/turns/list` immediately after resuming.
|
||||
*/
|
||||
excludeTurns?: boolean};
|
||||
approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null};
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { SortDirection } from "./SortDirection";
|
||||
|
||||
export type ThreadTurnsListParams = { threadId: string,
|
||||
/**
|
||||
* Opaque cursor to pass to the next call to continue after the last turn.
|
||||
*/
|
||||
cursor?: string | null,
|
||||
/**
|
||||
* Optional turn page size.
|
||||
*/
|
||||
limit?: number | null,
|
||||
/**
|
||||
* Optional turn pagination direction; defaults to descending.
|
||||
*/
|
||||
sortDirection?: SortDirection | null, };
|
||||
@@ -1,18 +0,0 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { Turn } from "./Turn";
|
||||
|
||||
export type ThreadTurnsListResponse = { data: Array<Turn>,
|
||||
/**
|
||||
* Opaque cursor to pass to the next call to continue after the last turn.
|
||||
* if None, there are no more turns to return.
|
||||
*/
|
||||
nextCursor: string | null,
|
||||
/**
|
||||
* Opaque cursor to pass as `cursor` when reversing `sortDirection`.
|
||||
* This is only populated when the page contains at least one turn.
|
||||
* Use it with the opposite `sortDirection` to include the anchor turn again
|
||||
* and catch updates to that turn.
|
||||
*/
|
||||
backwardsCursor: string | null, };
|
||||
@@ -270,6 +270,7 @@ export type { PermissionsRequestApprovalParams } from "./PermissionsRequestAppro
|
||||
export type { PermissionsRequestApprovalResponse } from "./PermissionsRequestApprovalResponse";
|
||||
export type { PlanDeltaNotification } from "./PlanDeltaNotification";
|
||||
export type { PluginAuthPolicy } from "./PluginAuthPolicy";
|
||||
export type { PluginAvailability } from "./PluginAvailability";
|
||||
export type { PluginDetail } from "./PluginDetail";
|
||||
export type { PluginInstallParams } from "./PluginInstallParams";
|
||||
export type { PluginInstallPolicy } from "./PluginInstallPolicy";
|
||||
@@ -282,15 +283,22 @@ export type { PluginReadParams } from "./PluginReadParams";
|
||||
export type { PluginReadResponse } from "./PluginReadResponse";
|
||||
export type { PluginShareDeleteParams } from "./PluginShareDeleteParams";
|
||||
export type { PluginShareDeleteResponse } from "./PluginShareDeleteResponse";
|
||||
export type { PluginShareListItem } from "./PluginShareListItem";
|
||||
export type { PluginShareListParams } from "./PluginShareListParams";
|
||||
export type { PluginShareListResponse } from "./PluginShareListResponse";
|
||||
export type { PluginShareSaveParams } from "./PluginShareSaveParams";
|
||||
export type { PluginShareSaveResponse } from "./PluginShareSaveResponse";
|
||||
export type { PluginSkillReadParams } from "./PluginSkillReadParams";
|
||||
export type { PluginSkillReadResponse } from "./PluginSkillReadResponse";
|
||||
export type { PluginSource } from "./PluginSource";
|
||||
export type { PluginSummary } from "./PluginSummary";
|
||||
export type { PluginUninstallParams } from "./PluginUninstallParams";
|
||||
export type { PluginUninstallResponse } from "./PluginUninstallResponse";
|
||||
export type { PluginsMigration } from "./PluginsMigration";
|
||||
export type { ProcessExitedNotification } from "./ProcessExitedNotification";
|
||||
export type { ProcessOutputDeltaNotification } from "./ProcessOutputDeltaNotification";
|
||||
export type { ProcessOutputStream } from "./ProcessOutputStream";
|
||||
export type { ProcessTerminalSize } from "./ProcessTerminalSize";
|
||||
export type { ProfileV2 } from "./ProfileV2";
|
||||
export type { RateLimitReachedType } from "./RateLimitReachedType";
|
||||
export type { RateLimitSnapshot } from "./RateLimitSnapshot";
|
||||
@@ -395,8 +403,6 @@ export type { ThreadStatus } from "./ThreadStatus";
|
||||
export type { ThreadStatusChangedNotification } from "./ThreadStatusChangedNotification";
|
||||
export type { ThreadTokenUsage } from "./ThreadTokenUsage";
|
||||
export type { ThreadTokenUsageUpdatedNotification } from "./ThreadTokenUsageUpdatedNotification";
|
||||
export type { ThreadTurnsListParams } from "./ThreadTurnsListParams";
|
||||
export type { ThreadTurnsListResponse } from "./ThreadTurnsListResponse";
|
||||
export type { ThreadUnarchiveParams } from "./ThreadUnarchiveParams";
|
||||
export type { ThreadUnarchiveResponse } from "./ThreadUnarchiveResponse";
|
||||
export type { ThreadUnarchivedNotification } from "./ThreadUnarchivedNotification";
|
||||
|
||||
@@ -14,6 +14,7 @@ pub use export::generate_ts_with_options;
|
||||
pub use export::generate_types;
|
||||
pub use jsonrpc_lite::*;
|
||||
pub use protocol::common::*;
|
||||
pub use protocol::event_mapping::*;
|
||||
pub use protocol::item_builders::*;
|
||||
pub use protocol::thread_history::*;
|
||||
pub use protocol::v1::ApplyPatchApprovalParams;
|
||||
|
||||
@@ -80,6 +80,7 @@ pub enum ClientRequestSerializationScope {
|
||||
Thread { thread_id: String },
|
||||
ThreadPath { path: PathBuf },
|
||||
CommandExecProcess { process_id: String },
|
||||
Process { process_handle: String },
|
||||
FuzzyFileSearchSession { session_id: String },
|
||||
FsWatch { watch_id: String },
|
||||
McpOauth { server_name: String },
|
||||
@@ -127,6 +128,11 @@ macro_rules! serialization_scope_expr {
|
||||
process_id: $actual_params.$field.clone(),
|
||||
})
|
||||
};
|
||||
($actual_params:ident, process_handle($params:ident . $field:ident)) => {
|
||||
Some(ClientRequestSerializationScope::Process {
|
||||
process_handle: $actual_params.$field.clone(),
|
||||
})
|
||||
};
|
||||
($actual_params:ident, fuzzy_session_id($params:ident . $field:ident)) => {
|
||||
Some(ClientRequestSerializationScope::FuzzyFileSearchSession {
|
||||
session_id: $actual_params.$field.clone(),
|
||||
@@ -564,6 +570,7 @@ client_request_definitions! {
|
||||
serialization: thread_id(params.thread_id),
|
||||
response: v2::ThreadReadResponse,
|
||||
},
|
||||
#[experimental("thread/turns/list")]
|
||||
ThreadTurnsList => "thread/turns/list" {
|
||||
params: v2::ThreadTurnsListParams,
|
||||
// Explicitly concurrent: this primarily reads append-only rollout storage.
|
||||
@@ -611,6 +618,11 @@ client_request_definitions! {
|
||||
serialization: global("config"),
|
||||
response: v2::PluginReadResponse,
|
||||
},
|
||||
PluginSkillRead => "plugin/skill/read" {
|
||||
params: v2::PluginSkillReadParams,
|
||||
serialization: global("config"),
|
||||
response: v2::PluginSkillReadResponse,
|
||||
},
|
||||
PluginShareSave => "plugin/share/save" {
|
||||
params: v2::PluginShareSaveParams,
|
||||
serialization: global("config"),
|
||||
@@ -894,6 +906,34 @@ client_request_definitions! {
|
||||
serialization: command_process_id(params.process_id),
|
||||
response: v2::CommandExecResizeResponse,
|
||||
},
|
||||
#[experimental("process/spawn")]
|
||||
/// Spawn a standalone process (argv vector) without a Codex sandbox.
|
||||
ProcessSpawn => "process/spawn" {
|
||||
params: v2::ProcessSpawnParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessSpawnResponse,
|
||||
},
|
||||
#[experimental("process/writeStdin")]
|
||||
/// Write stdin bytes to a running `process/spawn` session or close stdin.
|
||||
ProcessWriteStdin => "process/writeStdin" {
|
||||
params: v2::ProcessWriteStdinParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessWriteStdinResponse,
|
||||
},
|
||||
#[experimental("process/kill")]
|
||||
/// Terminate a running `process/spawn` session by client-supplied `processHandle`.
|
||||
ProcessKill => "process/kill" {
|
||||
params: v2::ProcessKillParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessKillResponse,
|
||||
},
|
||||
#[experimental("process/resizePty")]
|
||||
/// Resize a running PTY-backed `process/spawn` session by client-supplied `processHandle`.
|
||||
ProcessResizePty => "process/resizePty" {
|
||||
params: v2::ProcessResizePtyParams,
|
||||
serialization: process_handle(params.process_handle),
|
||||
response: v2::ProcessResizePtyResponse,
|
||||
},
|
||||
|
||||
ConfigRead => "config/read" {
|
||||
params: v2::ConfigReadParams,
|
||||
@@ -1395,8 +1435,15 @@ server_notification_definitions! {
|
||||
PlanDelta => "item/plan/delta" (v2::PlanDeltaNotification),
|
||||
/// Stream base64-encoded stdout/stderr chunks for a running `command/exec` session.
|
||||
CommandExecOutputDelta => "command/exec/outputDelta" (v2::CommandExecOutputDeltaNotification),
|
||||
/// Stream base64-encoded stdout/stderr chunks for a running `process/spawn` session.
|
||||
#[experimental("process/outputDelta")]
|
||||
ProcessOutputDelta => "process/outputDelta" (v2::ProcessOutputDeltaNotification),
|
||||
/// Final exit notification for a `process/spawn` session.
|
||||
#[experimental("process/exited")]
|
||||
ProcessExited => "process/exited" (v2::ProcessExitedNotification),
|
||||
CommandExecutionOutputDelta => "item/commandExecution/outputDelta" (v2::CommandExecutionOutputDeltaNotification),
|
||||
TerminalInteraction => "item/commandExecution/terminalInteraction" (v2::TerminalInteractionNotification),
|
||||
/// Deprecated legacy apply_patch output stream notification.
|
||||
FileChangeOutputDelta => "item/fileChange/outputDelta" (v2::FileChangeOutputDeltaNotification),
|
||||
FileChangePatchUpdated => "item/fileChange/patchUpdated" (v2::FileChangePatchUpdatedNotification),
|
||||
ServerRequestResolved => "serverRequest/resolved" (v2::ServerRequestResolvedNotification),
|
||||
@@ -2558,7 +2605,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: Some(Some("You are on a call".to_string())),
|
||||
session_id: Some("sess_456".to_string()),
|
||||
realtime_session_id: Some("sess_456".to_string()),
|
||||
transport: None,
|
||||
voice: Some(RealtimeVoice::Marin),
|
||||
},
|
||||
@@ -2571,7 +2618,7 @@ mod tests {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"prompt": "You are on a call",
|
||||
"sessionId": "sess_456",
|
||||
"realtimeSessionId": "sess_456",
|
||||
"transport": null,
|
||||
"voice": "marin"
|
||||
}
|
||||
@@ -2589,7 +2636,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: None,
|
||||
session_id: None,
|
||||
realtime_session_id: None,
|
||||
transport: None,
|
||||
voice: None,
|
||||
},
|
||||
@@ -2601,7 +2648,7 @@ mod tests {
|
||||
"params": {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2615,7 +2662,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: Some(None),
|
||||
session_id: None,
|
||||
realtime_session_id: None,
|
||||
transport: None,
|
||||
voice: None,
|
||||
},
|
||||
@@ -2628,7 +2675,7 @@ mod tests {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"prompt": null,
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2642,7 +2689,7 @@ mod tests {
|
||||
"params": {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2659,7 +2706,7 @@ mod tests {
|
||||
"threadId": "thr_123",
|
||||
"outputModality": "audio",
|
||||
"prompt": null,
|
||||
"sessionId": null,
|
||||
"realtimeSessionId": null,
|
||||
"transport": null,
|
||||
"voice": null
|
||||
}
|
||||
@@ -2771,7 +2818,7 @@ mod tests {
|
||||
thread_id: "thr_123".to_string(),
|
||||
output_modality: RealtimeOutputModality::Audio,
|
||||
prompt: Some(Some("You are on a call".to_string())),
|
||||
session_id: None,
|
||||
realtime_session_id: None,
|
||||
transport: None,
|
||||
voice: None,
|
||||
},
|
||||
@@ -2854,7 +2901,7 @@ mod tests {
|
||||
let notification =
|
||||
ServerNotification::ThreadRealtimeStarted(v2::ThreadRealtimeStartedNotification {
|
||||
thread_id: "thr_123".to_string(),
|
||||
session_id: Some("sess_456".to_string()),
|
||||
realtime_session_id: Some("sess_456".to_string()),
|
||||
version: RealtimeConversationVersion::V1,
|
||||
});
|
||||
let reason = crate::experimental_api::ExperimentalApi::experimental_reason(¬ification);
|
||||
|
||||
597
codex-rs/app-server-protocol/src/protocol/event_mapping.rs
Normal file
597
codex-rs/app-server-protocol/src/protocol/event_mapping.rs
Normal file
@@ -0,0 +1,597 @@
|
||||
use crate::protocol::common::ServerNotification;
|
||||
use crate::protocol::item_builders::build_command_execution_begin_item;
|
||||
use crate::protocol::item_builders::build_command_execution_end_item;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use crate::protocol::v2::AgentMessageDeltaNotification;
|
||||
use crate::protocol::v2::CollabAgentState;
|
||||
use crate::protocol::v2::CollabAgentTool;
|
||||
use crate::protocol::v2::CollabAgentToolCallStatus;
|
||||
use crate::protocol::v2::CommandExecutionOutputDeltaNotification;
|
||||
use crate::protocol::v2::DynamicToolCallOutputContentItem;
|
||||
use crate::protocol::v2::DynamicToolCallStatus;
|
||||
use crate::protocol::v2::FileChangePatchUpdatedNotification;
|
||||
use crate::protocol::v2::ItemCompletedNotification;
|
||||
use crate::protocol::v2::ItemStartedNotification;
|
||||
use crate::protocol::v2::PlanDeltaNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryPartAddedNotification;
|
||||
use crate::protocol::v2::ReasoningSummaryTextDeltaNotification;
|
||||
use crate::protocol::v2::ReasoningTextDeltaNotification;
|
||||
use crate::protocol::v2::TerminalInteractionNotification;
|
||||
use crate::protocol::v2::ThreadItem;
|
||||
use codex_protocol::dynamic_tools::DynamicToolCallOutputContentItem as CoreDynamicToolCallOutputContentItem;
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Build the v2 app-server notification that directly corresponds to a single core event.
|
||||
///
|
||||
/// This only covers the stateless event-to-notification projections that have a one-to-one
|
||||
/// mapping. Callers remain responsible for any surrounding state checks or side effects before
|
||||
/// invoking this helper.
|
||||
pub fn item_event_to_server_notification(
|
||||
msg: EventMsg,
|
||||
thread_id: &str,
|
||||
turn_id: &str,
|
||||
) -> ServerNotification {
|
||||
let thread_id = thread_id.to_string();
|
||||
let turn_id = turn_id.to_string();
|
||||
match msg {
|
||||
EventMsg::DynamicToolCallResponse(response) => {
|
||||
let status = if response.success {
|
||||
DynamicToolCallStatus::Completed
|
||||
} else {
|
||||
DynamicToolCallStatus::Failed
|
||||
};
|
||||
let duration_ms = i64::try_from(response.duration.as_millis()).ok();
|
||||
let item = ThreadItem::DynamicToolCall {
|
||||
id: response.call_id,
|
||||
namespace: response.namespace,
|
||||
tool: response.tool,
|
||||
arguments: response.arguments,
|
||||
status,
|
||||
content_items: Some(
|
||||
response
|
||||
.content_items
|
||||
.into_iter()
|
||||
.map(|item| match item {
|
||||
CoreDynamicToolCallOutputContentItem::InputText { text } => {
|
||||
DynamicToolCallOutputContentItem::InputText { text }
|
||||
}
|
||||
CoreDynamicToolCallOutputContentItem::InputImage { image_url } => {
|
||||
DynamicToolCallOutputContentItem::InputImage { image_url }
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
success: Some(response.success),
|
||||
duration_ms,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id: response.turn_id,
|
||||
item,
|
||||
completed_at_ms: response.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentSpawnBegin(begin_event) => {
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::SpawnAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: Vec::new(),
|
||||
prompt: Some(begin_event.prompt),
|
||||
model: Some(begin_event.model),
|
||||
reasoning_effort: Some(begin_event.reasoning_effort),
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentSpawnEnd(end_event) => {
|
||||
let has_receiver = end_event.new_thread_id.is_some();
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ if has_receiver => CollabAgentToolCallStatus::Completed,
|
||||
_ => CollabAgentToolCallStatus::Failed,
|
||||
};
|
||||
let (receiver_thread_ids, agents_states) = match end_event.new_thread_id {
|
||||
Some(id) => {
|
||||
let receiver_id = id.to_string();
|
||||
let received_status = CollabAgentState::from(end_event.status.clone());
|
||||
(
|
||||
vec![receiver_id.clone()],
|
||||
[(receiver_id, received_status)].into_iter().collect(),
|
||||
)
|
||||
}
|
||||
None => (Vec::new(), HashMap::new()),
|
||||
};
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::SpawnAgent,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: Some(end_event.prompt),
|
||||
model: Some(end_event.model),
|
||||
reasoning_effort: Some(end_event.reasoning_effort),
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentInteractionBegin(begin_event) => {
|
||||
let receiver_thread_ids = vec![begin_event.receiver_thread_id.to_string()];
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::SendInput,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: Some(begin_event.prompt),
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabAgentInteractionEnd(end_event) => {
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ => CollabAgentToolCallStatus::Completed,
|
||||
};
|
||||
let receiver_id = end_event.receiver_thread_id.to_string();
|
||||
let received_status = CollabAgentState::from(end_event.status);
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::SendInput,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id.clone()],
|
||||
prompt: Some(end_event.prompt),
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: [(receiver_id, received_status)].into_iter().collect(),
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabWaitingBegin(begin_event) => {
|
||||
let receiver_thread_ids = begin_event
|
||||
.receiver_thread_ids
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::Wait,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabWaitingEnd(end_event) => {
|
||||
let status = if end_event.statuses.values().any(|status| {
|
||||
matches!(
|
||||
status,
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound
|
||||
)
|
||||
}) {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
} else {
|
||||
CollabAgentToolCallStatus::Completed
|
||||
};
|
||||
let receiver_thread_ids = end_event.statuses.keys().map(ToString::to_string).collect();
|
||||
let agents_states = end_event
|
||||
.statuses
|
||||
.iter()
|
||||
.map(|(id, status)| (id.to_string(), CollabAgentState::from(status.clone())))
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::Wait,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids,
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabCloseBegin(begin_event) => {
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::CloseAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![begin_event.receiver_thread_id.to_string()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabCloseEnd(end_event) => {
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ => CollabAgentToolCallStatus::Completed,
|
||||
};
|
||||
let receiver_id = end_event.receiver_thread_id.to_string();
|
||||
let agents_states = [(
|
||||
receiver_id.clone(),
|
||||
CollabAgentState::from(end_event.status),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::CloseAgent,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabResumeBegin(begin_event) => {
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: begin_event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: begin_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![begin_event.receiver_thread_id.to_string()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
};
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
started_at_ms: begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::CollabResumeEnd(end_event) => {
|
||||
let status = match &end_event.status {
|
||||
codex_protocol::protocol::AgentStatus::Errored(_)
|
||||
| codex_protocol::protocol::AgentStatus::NotFound => {
|
||||
CollabAgentToolCallStatus::Failed
|
||||
}
|
||||
_ => CollabAgentToolCallStatus::Completed,
|
||||
};
|
||||
let receiver_id = end_event.receiver_thread_id.to_string();
|
||||
let agents_states = [(
|
||||
receiver_id.clone(),
|
||||
CollabAgentState::from(end_event.status),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let item = ThreadItem::CollabAgentToolCall {
|
||||
id: end_event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status,
|
||||
sender_thread_id: end_event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states,
|
||||
};
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item,
|
||||
completed_at_ms: end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::AgentMessageContentDelta(event) => {
|
||||
let codex_protocol::protocol::AgentMessageContentDeltaEvent { item_id, delta, .. } =
|
||||
event;
|
||||
ServerNotification::AgentMessageDelta(AgentMessageDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id,
|
||||
delta,
|
||||
})
|
||||
}
|
||||
EventMsg::PlanDelta(event) => ServerNotification::PlanDelta(PlanDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
delta: event.delta,
|
||||
}),
|
||||
EventMsg::ReasoningContentDelta(event) => {
|
||||
ServerNotification::ReasoningSummaryTextDelta(ReasoningSummaryTextDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
delta: event.delta,
|
||||
summary_index: event.summary_index,
|
||||
})
|
||||
}
|
||||
EventMsg::ReasoningRawContentDelta(event) => {
|
||||
ServerNotification::ReasoningTextDelta(ReasoningTextDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
delta: event.delta,
|
||||
content_index: event.content_index,
|
||||
})
|
||||
}
|
||||
EventMsg::AgentReasoningSectionBreak(event) => {
|
||||
ServerNotification::ReasoningSummaryPartAdded(ReasoningSummaryPartAddedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.item_id,
|
||||
summary_index: event.summary_index,
|
||||
})
|
||||
}
|
||||
EventMsg::ItemStarted(item_started_event) => {
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: item_started_event.item.into(),
|
||||
started_at_ms: item_started_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::ItemCompleted(item_completed_event) => {
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: item_completed_event.item.into(),
|
||||
completed_at_ms: item_completed_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::PatchApplyUpdated(event) => {
|
||||
ServerNotification::FileChangePatchUpdated(FileChangePatchUpdatedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: event.call_id,
|
||||
changes: convert_patch_changes(&event.changes),
|
||||
})
|
||||
}
|
||||
EventMsg::ExecCommandBegin(exec_command_begin_event) => {
|
||||
ServerNotification::ItemStarted(ItemStartedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_command_execution_begin_item(&exec_command_begin_event),
|
||||
started_at_ms: exec_command_begin_event.started_at_ms,
|
||||
})
|
||||
}
|
||||
EventMsg::ExecCommandOutputDelta(exec_command_output_delta_event) => {
|
||||
let item_id = exec_command_output_delta_event.call_id;
|
||||
let delta = String::from_utf8_lossy(&exec_command_output_delta_event.chunk).to_string();
|
||||
ServerNotification::CommandExecutionOutputDelta(
|
||||
CommandExecutionOutputDeltaNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id,
|
||||
delta,
|
||||
},
|
||||
)
|
||||
}
|
||||
EventMsg::TerminalInteraction(terminal_event) => {
|
||||
ServerNotification::TerminalInteraction(TerminalInteractionNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item_id: terminal_event.call_id,
|
||||
process_id: terminal_event.process_id,
|
||||
stdin: terminal_event.stdin,
|
||||
})
|
||||
}
|
||||
EventMsg::ExecCommandEnd(exec_command_end_event) => {
|
||||
ServerNotification::ItemCompleted(ItemCompletedNotification {
|
||||
thread_id,
|
||||
turn_id,
|
||||
item: build_command_execution_end_item(&exec_command_end_event),
|
||||
completed_at_ms: exec_command_end_event.completed_at_ms,
|
||||
})
|
||||
}
|
||||
_ => unreachable!("unsupported item event"),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::CollabResumeBeginEvent;
|
||||
use codex_protocol::protocol::CollabResumeEndEvent;
|
||||
use codex_protocol::protocol::ExecCommandOutputDeltaEvent;
|
||||
use codex_protocol::protocol::ExecOutputStream;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn assert_item_started_server_notification(
|
||||
notification: ServerNotification,
|
||||
expected: ItemStartedNotification,
|
||||
) {
|
||||
match notification {
|
||||
ServerNotification::ItemStarted(payload) => assert_eq!(payload, expected),
|
||||
other => panic!("expected item started notification, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_item_completed_server_notification(
|
||||
notification: ServerNotification,
|
||||
expected: ItemCompletedNotification,
|
||||
) {
|
||||
match notification {
|
||||
ServerNotification::ItemCompleted(payload) => assert_eq!(payload, expected),
|
||||
other => panic!("expected item completed notification, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_command_execution_output_delta_server_notification(
|
||||
notification: ServerNotification,
|
||||
expected: CommandExecutionOutputDeltaNotification,
|
||||
) {
|
||||
match notification {
|
||||
ServerNotification::CommandExecutionOutputDelta(payload) => {
|
||||
assert_eq!(payload, expected)
|
||||
}
|
||||
other => panic!("expected command execution output delta, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collab_resume_begin_maps_to_item_started_resume_agent() {
|
||||
let event = CollabResumeBeginEvent {
|
||||
call_id: "call-1".to_string(),
|
||||
started_at_ms: 123,
|
||||
sender_thread_id: ThreadId::new(),
|
||||
receiver_thread_id: ThreadId::new(),
|
||||
receiver_agent_nickname: None,
|
||||
receiver_agent_role: None,
|
||||
};
|
||||
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::CollabResumeBegin(event.clone()),
|
||||
"thread-1",
|
||||
"turn-1",
|
||||
);
|
||||
assert_item_started_server_notification(
|
||||
notification,
|
||||
ItemStartedNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
started_at_ms: event.started_at_ms,
|
||||
item: ThreadItem::CollabAgentToolCall {
|
||||
id: event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status: CollabAgentToolCallStatus::InProgress,
|
||||
sender_thread_id: event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![event.receiver_thread_id.to_string()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: HashMap::new(),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collab_resume_end_maps_to_item_completed_resume_agent() {
|
||||
let event = CollabResumeEndEvent {
|
||||
call_id: "call-2".to_string(),
|
||||
completed_at_ms: 456,
|
||||
sender_thread_id: ThreadId::new(),
|
||||
receiver_thread_id: ThreadId::new(),
|
||||
receiver_agent_nickname: None,
|
||||
receiver_agent_role: None,
|
||||
status: codex_protocol::protocol::AgentStatus::NotFound,
|
||||
};
|
||||
|
||||
let receiver_id = event.receiver_thread_id.to_string();
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::CollabResumeEnd(event.clone()),
|
||||
"thread-2",
|
||||
"turn-2",
|
||||
);
|
||||
assert_item_completed_server_notification(
|
||||
notification,
|
||||
ItemCompletedNotification {
|
||||
thread_id: "thread-2".to_string(),
|
||||
turn_id: "turn-2".to_string(),
|
||||
completed_at_ms: event.completed_at_ms,
|
||||
item: ThreadItem::CollabAgentToolCall {
|
||||
id: event.call_id,
|
||||
tool: CollabAgentTool::ResumeAgent,
|
||||
status: CollabAgentToolCallStatus::Failed,
|
||||
sender_thread_id: event.sender_thread_id.to_string(),
|
||||
receiver_thread_ids: vec![receiver_id.clone()],
|
||||
prompt: None,
|
||||
model: None,
|
||||
reasoning_effort: None,
|
||||
agents_states: [(
|
||||
receiver_id,
|
||||
CollabAgentState::from(codex_protocol::protocol::AgentStatus::NotFound),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exec_command_output_delta_maps_to_command_execution_output_delta() {
|
||||
let notification = item_event_to_server_notification(
|
||||
EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent {
|
||||
call_id: "call-1".to_string(),
|
||||
stream: ExecOutputStream::Stdout,
|
||||
chunk: b"hello".to_vec(),
|
||||
}),
|
||||
"thread-1",
|
||||
"turn-1",
|
||||
);
|
||||
|
||||
assert_command_execution_output_delta_server_notification(
|
||||
notification,
|
||||
CommandExecutionOutputDeltaNotification {
|
||||
thread_id: "thread-1".to_string(),
|
||||
turn_id: "turn-1".to_string(),
|
||||
item_id: "call-1".to_string(),
|
||||
delta: "hello".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
//! Shared builders for synthetic [`ThreadItem`] values emitted by the app-server layer.
|
||||
//! Shared builders for app-server [`ThreadItem`] values derived from compatibility events.
|
||||
//!
|
||||
//! These items do not come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! Instead, the app-server synthesizes them so clients can render a coherent lifecycle for
|
||||
//! approvals and other pre-execution flows before the underlying tool has started or when the
|
||||
//! tool never starts at all.
|
||||
//! Most live tool items now come from first-class core `ItemStarted` / `ItemCompleted` events.
|
||||
//! These builders remain for approval flows, rebuilt legacy history, and other pre-execution
|
||||
//! paths where the underlying tool has not started or never starts at all.
|
||||
//!
|
||||
//! Keeping these builders in one place is useful for two reasons:
|
||||
//! - Live notifications and rebuilt `thread/read` history both need to construct the same
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`.
|
||||
|
||||
pub mod common;
|
||||
pub mod event_mapping;
|
||||
pub mod item_builders;
|
||||
mod mappers;
|
||||
mod serde_helpers;
|
||||
|
||||
@@ -217,7 +217,6 @@ impl ThreadHistoryBuilder {
|
||||
EventMsg::Error(payload) => self.handle_error(payload),
|
||||
EventMsg::TokenCount(_) => {}
|
||||
EventMsg::ThreadRolledBack(payload) => self.handle_thread_rollback(payload),
|
||||
EventMsg::UndoCompleted(_) => {}
|
||||
EventMsg::TurnAborted(payload) => self.handle_turn_aborted(payload),
|
||||
EventMsg::TurnStarted(payload) => self.handle_turn_started(payload),
|
||||
EventMsg::TurnComplete(payload) => self.handle_turn_complete(payload),
|
||||
@@ -357,7 +356,10 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
@@ -378,7 +380,10 @@ impl ThreadHistoryBuilder {
|
||||
| codex_protocol::items::TurnItem::AgentMessage(_)
|
||||
| codex_protocol::items::TurnItem::Reasoning(_)
|
||||
| codex_protocol::items::TurnItem::WebSearch(_)
|
||||
| codex_protocol::items::TurnItem::ImageView(_)
|
||||
| codex_protocol::items::TurnItem::ImageGeneration(_)
|
||||
| codex_protocol::items::TurnItem::FileChange(_)
|
||||
| codex_protocol::items::TurnItem::McpToolCall(_)
|
||||
| codex_protocol::items::TurnItem::ContextCompaction(_) => {}
|
||||
}
|
||||
}
|
||||
@@ -1351,6 +1356,7 @@ mod tests {
|
||||
id: "user-item-id".to_string(),
|
||||
content: Vec::new(),
|
||||
}),
|
||||
started_at_ms: 0,
|
||||
}),
|
||||
EventMsg::TurnComplete(TurnCompleteEvent {
|
||||
turn_id: turn_id.to_string(),
|
||||
@@ -1815,6 +1821,7 @@ mod tests {
|
||||
call_id: "exec-1".into(),
|
||||
process_id: Some("pid-1".into()),
|
||||
turn_id: "turn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["echo".into(), "hello world".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
@@ -1978,6 +1985,7 @@ mod tests {
|
||||
codex_protocol::dynamic_tools::DynamicToolCallRequest {
|
||||
call_id: "dyn-1".into(),
|
||||
turn_id: "turn-1".into(),
|
||||
started_at_ms: 0,
|
||||
namespace: Some("codex_app".into()),
|
||||
tool: "lookup_ticket".into(),
|
||||
arguments: serde_json::json!({"id":"ABC-123"}),
|
||||
@@ -1986,6 +1994,7 @@ mod tests {
|
||||
EventMsg::DynamicToolCallResponse(DynamicToolCallResponseEvent {
|
||||
call_id: "dyn-1".into(),
|
||||
turn_id: "turn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
namespace: Some("codex_app".into()),
|
||||
tool: "lookup_ticket".into(),
|
||||
arguments: serde_json::json!({"id":"ABC-123"}),
|
||||
@@ -2041,6 +2050,7 @@ mod tests {
|
||||
call_id: "exec-declined".into(),
|
||||
process_id: Some("pid-2".into()),
|
||||
turn_id: "turn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["ls".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown { cmd: "ls".into() }],
|
||||
@@ -2288,6 +2298,7 @@ mod tests {
|
||||
call_id: "exec-late".into(),
|
||||
process_id: Some("pid-42".into()),
|
||||
turn_id: "turn-a".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["echo".into(), "done".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
@@ -2379,6 +2390,7 @@ mod tests {
|
||||
call_id: "exec-unknown-turn".into(),
|
||||
process_id: Some("pid-42".into()),
|
||||
turn_id: "turn-missing".into(),
|
||||
completed_at_ms: 0,
|
||||
command: vec!["echo".into(), "done".into()],
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
parsed_cmd: vec![ParsedCommand::Unknown {
|
||||
@@ -2727,6 +2739,7 @@ mod tests {
|
||||
}),
|
||||
EventMsg::CollabResumeEnd(codex_protocol::protocol::CollabResumeEndEvent {
|
||||
call_id: "resume-1".into(),
|
||||
completed_at_ms: 0,
|
||||
sender_thread_id: ThreadId::try_from("00000000-0000-0000-0000-000000000001")
|
||||
.expect("valid sender thread id"),
|
||||
receiver_thread_id: ThreadId::try_from("00000000-0000-0000-0000-000000000002")
|
||||
@@ -2783,6 +2796,7 @@ mod tests {
|
||||
}),
|
||||
EventMsg::CollabAgentSpawnEnd(codex_protocol::protocol::CollabAgentSpawnEndEvent {
|
||||
call_id: "spawn-1".into(),
|
||||
completed_at_ms: 0,
|
||||
sender_thread_id,
|
||||
new_thread_id: Some(spawned_thread_id),
|
||||
new_agent_nickname: Some("Scout".into()),
|
||||
@@ -2844,6 +2858,7 @@ mod tests {
|
||||
EventMsg::CollabAgentInteractionBegin(
|
||||
codex_protocol::protocol::CollabAgentInteractionBeginEvent {
|
||||
call_id: "send-1".into(),
|
||||
started_at_ms: 0,
|
||||
sender_thread_id: sender,
|
||||
receiver_thread_id: receiver,
|
||||
prompt: "new task".into(),
|
||||
@@ -2852,6 +2867,7 @@ mod tests {
|
||||
EventMsg::CollabAgentInteractionEnd(
|
||||
codex_protocol::protocol::CollabAgentInteractionEndEvent {
|
||||
call_id: "send-1".into(),
|
||||
completed_at_ms: 0,
|
||||
sender_thread_id: sender,
|
||||
receiver_thread_id: receiver,
|
||||
receiver_agent_nickname: None,
|
||||
|
||||
@@ -5,6 +5,7 @@ use std::path::PathBuf;
|
||||
|
||||
use crate::RequestId;
|
||||
use crate::protocol::common::AuthMode;
|
||||
use crate::protocol::item_builders::convert_patch_changes;
|
||||
use codex_experimental_api_macros::ExperimentalApi;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::account::ProviderAccount;
|
||||
@@ -30,6 +31,8 @@ use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::config_types::WebSearchMode;
|
||||
use codex_protocol::config_types::WebSearchToolConfig;
|
||||
use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent;
|
||||
use codex_protocol::items::McpToolCallError as CoreMcpToolCallError;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::mcp::CallToolResult as CoreMcpCallToolResult;
|
||||
use codex_protocol::mcp::Resource as McpResource;
|
||||
@@ -2782,6 +2785,24 @@ impl From<CoreMcpCallToolResult> for McpServerToolCallResponse {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpCallToolResult> for McpToolCallResult {
|
||||
fn from(result: CoreMcpCallToolResult) -> Self {
|
||||
Self {
|
||||
content: result.content,
|
||||
structured_content: result.structured_content,
|
||||
meta: result.meta,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallError> for McpToolCallError {
|
||||
fn from(error: CoreMcpToolCallError) -> Self {
|
||||
Self {
|
||||
message: error.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -3537,6 +3558,204 @@ pub enum CommandExecOutputStream {
|
||||
Stderr,
|
||||
}
|
||||
|
||||
/// PTY size in character cells for `process/spawn` PTY sessions.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessTerminalSize {
|
||||
/// Terminal height in character cells.
|
||||
pub rows: u16,
|
||||
/// Terminal width in character cells.
|
||||
pub cols: u16,
|
||||
}
|
||||
|
||||
/// Spawn a standalone process (argv vector) without a Codex sandbox on the host
|
||||
/// where the app server is running.
|
||||
///
|
||||
/// `process/spawn` returns after the process has started and the connection-scoped
|
||||
/// `processHandle` has been registered. Process output and exit are reported via
|
||||
/// `process/outputDelta` and `process/exited` notifications.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessSpawnParams {
|
||||
/// Command argv vector. Empty arrays are rejected.
|
||||
pub command: Vec<String>,
|
||||
/// Client-supplied, connection-scoped process handle.
|
||||
///
|
||||
/// Duplicate active handles are rejected on the same connection. The same
|
||||
/// handle can be reused after the prior process exits.
|
||||
pub process_handle: String,
|
||||
/// Absolute working directory for the process.
|
||||
pub cwd: AbsolutePathBuf,
|
||||
/// Enable PTY mode.
|
||||
///
|
||||
/// This implies `streamStdin` and `streamStdoutStderr`.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub tty: bool,
|
||||
/// Allow follow-up `process/writeStdin` requests to write stdin bytes.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub stream_stdin: bool,
|
||||
/// Stream stdout/stderr via `process/outputDelta` notifications.
|
||||
///
|
||||
/// Streamed bytes are not duplicated into the `process/exited` notification.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub stream_stdout_stderr: bool,
|
||||
/// Optional per-stream stdout/stderr capture cap in bytes.
|
||||
///
|
||||
/// When omitted, the server default applies. Set to `null` to disable the
|
||||
/// cap.
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "super::serde_helpers::deserialize_double_option",
|
||||
serialize_with = "super::serde_helpers::serialize_double_option",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(type = "number | null")]
|
||||
#[ts(optional = nullable)]
|
||||
pub output_bytes_cap: Option<Option<usize>>,
|
||||
/// Optional timeout in milliseconds.
|
||||
///
|
||||
/// When omitted, the server default applies. Set to `null` to disable the
|
||||
/// timeout.
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "super::serde_helpers::deserialize_double_option",
|
||||
serialize_with = "super::serde_helpers::serialize_double_option",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[ts(type = "number | null")]
|
||||
#[ts(optional = nullable)]
|
||||
pub timeout_ms: Option<Option<i64>>,
|
||||
/// Optional environment overrides merged into the app-server process
|
||||
/// environment.
|
||||
///
|
||||
/// Matching names override inherited values. Set a key to `null` to unset
|
||||
/// an inherited variable.
|
||||
#[ts(optional = nullable)]
|
||||
pub env: Option<HashMap<String, Option<String>>>,
|
||||
/// Optional initial PTY size in character cells. Only valid when `tty` is
|
||||
/// true.
|
||||
#[ts(optional = nullable)]
|
||||
pub size: Option<ProcessTerminalSize>,
|
||||
}
|
||||
|
||||
/// Successful response for `process/spawn`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessSpawnResponse {}
|
||||
|
||||
/// Write stdin bytes to a running `process/spawn` session, close stdin, or
|
||||
/// both.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessWriteStdinParams {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// Optional base64-encoded stdin bytes to write.
|
||||
#[ts(optional = nullable)]
|
||||
pub delta_base64: Option<String>,
|
||||
/// Close stdin after writing `deltaBase64`, if present.
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub close_stdin: bool,
|
||||
}
|
||||
|
||||
/// Empty success response for `process/writeStdin`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessWriteStdinResponse {}
|
||||
|
||||
/// Terminate a running `process/spawn` session.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessKillParams {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
}
|
||||
|
||||
/// Empty success response for `process/kill`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessKillResponse {}
|
||||
|
||||
/// Resize a running PTY-backed `process/spawn` session.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessResizePtyParams {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// New PTY size in character cells.
|
||||
pub size: ProcessTerminalSize,
|
||||
}
|
||||
|
||||
/// Empty success response for `process/resizePty`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessResizePtyResponse {}
|
||||
|
||||
/// Stream label for `process/outputDelta` notifications.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ProcessOutputStream {
|
||||
/// stdout stream. PTY mode multiplexes terminal output here.
|
||||
Stdout,
|
||||
/// stderr stream.
|
||||
Stderr,
|
||||
}
|
||||
|
||||
/// Base64-encoded output chunk emitted for a streaming `process/spawn` request.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessOutputDeltaNotification {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// Output stream this chunk belongs to.
|
||||
pub stream: ProcessOutputStream,
|
||||
/// Base64-encoded output bytes.
|
||||
pub delta_base64: String,
|
||||
/// True on the final streamed chunk for this stream when output was
|
||||
/// truncated by `outputBytesCap`.
|
||||
pub cap_reached: bool,
|
||||
}
|
||||
|
||||
/// Final process exit notification for `process/spawn`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ProcessExitedNotification {
|
||||
/// Client-supplied, connection-scoped `processHandle` from `process/spawn`.
|
||||
pub process_handle: String,
|
||||
/// Process exit code.
|
||||
pub exit_code: i32,
|
||||
/// Buffered stdout capture.
|
||||
///
|
||||
/// Empty when stdout was streamed via `process/outputDelta`.
|
||||
pub stdout: String,
|
||||
/// Whether stdout reached `outputBytesCap`.
|
||||
///
|
||||
/// In streaming mode, stdout is empty and cap state is also reported on the
|
||||
/// final stdout `process/outputDelta` notification.
|
||||
pub stdout_cap_reached: bool,
|
||||
/// Buffered stderr capture.
|
||||
///
|
||||
/// Empty when stderr was streamed via `process/outputDelta`.
|
||||
pub stderr: String,
|
||||
/// Whether stderr reached `outputBytesCap`.
|
||||
///
|
||||
/// In streaming mode, stderr is empty and cap state is also reported on the
|
||||
/// final stderr `process/outputDelta` notification.
|
||||
pub stderr_cap_reached: bool,
|
||||
}
|
||||
|
||||
// === Threads, Turns, and Items ===
|
||||
// Thread APIs
|
||||
#[derive(
|
||||
@@ -3610,8 +3829,9 @@ pub struct ThreadStartParams {
|
||||
#[experimental("thread/start.experimentalRawEvents")]
|
||||
#[serde(default)]
|
||||
pub experimental_raw_events: bool,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on resume/fork/read.
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
#[experimental("thread/start.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -3738,10 +3958,12 @@ pub struct ThreadResumeParams {
|
||||
/// When true, return only thread metadata and live-resume state without
|
||||
/// populating `thread.turns`. This is useful when the client plans to call
|
||||
/// `thread/turns/list` immediately after resuming.
|
||||
#[experimental("thread/resume.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
#[experimental("thread/resume.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -3842,10 +4064,12 @@ pub struct ThreadForkParams {
|
||||
/// When true, return only thread metadata and live fork state without
|
||||
/// populating `thread.turns`. This is useful when the client plans to call
|
||||
/// `thread/turns/list` immediately after forking.
|
||||
#[experimental("thread/fork.excludeTurns")]
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub exclude_turns: bool,
|
||||
/// If true, persist additional rollout EventMsg variants required to
|
||||
/// reconstruct a richer thread history on subsequent resume/fork/read.
|
||||
/// If true, persist additional EventMsg variants to the rollout file.
|
||||
/// However, `thread/read`, `thread/resume`, and `thread/fork` still only
|
||||
/// return the limited form of thread history for scalability reasons.
|
||||
#[experimental("thread/fork.persistFullHistory")]
|
||||
#[serde(default)]
|
||||
pub persist_extended_history: bool,
|
||||
@@ -4607,6 +4831,22 @@ pub struct PluginReadResponse {
|
||||
pub plugin: PluginDetail,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadParams {
|
||||
pub remote_marketplace_name: String,
|
||||
pub remote_plugin_id: String,
|
||||
pub skill_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginSkillReadResponse {
|
||||
pub contents: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -4633,7 +4873,7 @@ pub struct PluginShareListParams {}
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListResponse {
|
||||
pub data: Vec<PluginSummary>,
|
||||
pub data: Vec<PluginShareListItem>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -4648,6 +4888,15 @@ pub struct PluginShareDeleteParams {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareDeleteResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct PluginShareListItem {
|
||||
pub plugin: PluginSummary,
|
||||
pub share_url: String,
|
||||
pub local_plugin_path: Option<AbsolutePathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[ts(rename_all = "snake_case")]
|
||||
@@ -4825,6 +5074,21 @@ pub enum PluginAuthPolicy {
|
||||
OnUse,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Default, JsonSchema, TS)]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum PluginAvailability {
|
||||
/// Plugin-service currently sends `"ENABLED"` for available remote plugins.
|
||||
/// Codex app-server exposes `"AVAILABLE"` in its API; the alias keeps
|
||||
/// decoding compatible with that upstream response.
|
||||
#[serde(rename = "AVAILABLE", alias = "ENABLED")]
|
||||
#[ts(rename = "AVAILABLE")]
|
||||
#[default]
|
||||
Available,
|
||||
#[serde(rename = "DISABLED_BY_ADMIN")]
|
||||
#[ts(rename = "DISABLED_BY_ADMIN")]
|
||||
DisabledByAdmin,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -4836,6 +5100,9 @@ pub struct PluginSummary {
|
||||
pub enabled: bool,
|
||||
pub install_policy: PluginInstallPolicy,
|
||||
pub auth_policy: PluginAuthPolicy,
|
||||
/// Availability state for installing and using the plugin.
|
||||
#[serde(default)]
|
||||
pub availability: PluginAvailability,
|
||||
pub interface: Option<PluginInterface>,
|
||||
}
|
||||
|
||||
@@ -5293,7 +5560,7 @@ pub struct ThreadRealtimeStartParams {
|
||||
#[ts(optional = nullable)]
|
||||
pub prompt: Option<Option<String>>,
|
||||
#[ts(optional = nullable)]
|
||||
pub session_id: Option<String>,
|
||||
pub realtime_session_id: Option<String>,
|
||||
#[ts(optional = nullable)]
|
||||
pub transport: Option<ThreadRealtimeStartTransport>,
|
||||
#[ts(optional = nullable)]
|
||||
@@ -5383,7 +5650,7 @@ pub struct ThreadRealtimeListVoicesResponse {
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ThreadRealtimeStartedNotification {
|
||||
pub thread_id: String,
|
||||
pub session_id: Option<String>,
|
||||
pub realtime_session_id: Option<String>,
|
||||
pub version: RealtimeConversationVersion,
|
||||
}
|
||||
|
||||
@@ -6417,6 +6684,10 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
query: search.query,
|
||||
action: Some(WebSearchAction::from(search.action)),
|
||||
},
|
||||
CoreTurnItem::ImageView(image) => ThreadItem::ImageView {
|
||||
id: image.id,
|
||||
path: image.path,
|
||||
},
|
||||
CoreTurnItem::ImageGeneration(image) => ThreadItem::ImageGeneration {
|
||||
id: image.id,
|
||||
status: image.status,
|
||||
@@ -6424,6 +6695,32 @@ impl From<CoreTurnItem> for ThreadItem {
|
||||
result: image.result,
|
||||
saved_path: image.saved_path,
|
||||
},
|
||||
CoreTurnItem::FileChange(file_change) => ThreadItem::FileChange {
|
||||
id: file_change.id,
|
||||
changes: convert_patch_changes(&file_change.changes),
|
||||
status: file_change
|
||||
.status
|
||||
.as_ref()
|
||||
.map(PatchApplyStatus::from)
|
||||
.unwrap_or(PatchApplyStatus::InProgress),
|
||||
},
|
||||
CoreTurnItem::McpToolCall(mcp) => {
|
||||
let duration_ms = mcp
|
||||
.duration
|
||||
.and_then(|duration| i64::try_from(duration.as_millis()).ok());
|
||||
|
||||
ThreadItem::McpToolCall {
|
||||
id: mcp.id,
|
||||
server: mcp.server,
|
||||
tool: mcp.tool,
|
||||
status: McpToolCallStatus::from(mcp.status),
|
||||
arguments: mcp.arguments,
|
||||
mcp_app_resource_uri: mcp.mcp_app_resource_uri,
|
||||
result: mcp.result.map(McpToolCallResult::from).map(Box::new),
|
||||
error: mcp.error.map(McpToolCallError::from),
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
CoreTurnItem::ContextCompaction(compaction) => {
|
||||
ThreadItem::ContextCompaction { id: compaction.id }
|
||||
}
|
||||
@@ -6533,6 +6830,16 @@ impl From<&CorePatchApplyStatus> for PatchApplyStatus {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreMcpToolCallStatus> for McpToolCallStatus {
|
||||
fn from(value: CoreMcpToolCallStatus) -> Self {
|
||||
match value {
|
||||
CoreMcpToolCallStatus::InProgress => McpToolCallStatus::InProgress,
|
||||
CoreMcpToolCallStatus::Completed => McpToolCallStatus::Completed,
|
||||
CoreMcpToolCallStatus::Failed => McpToolCallStatus::Failed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -6819,6 +7126,9 @@ pub struct ItemStartedNotification {
|
||||
pub item: ThreadItem,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
/// Unix timestamp (in milliseconds) when this item lifecycle started.
|
||||
#[ts(type = "number")]
|
||||
pub started_at_ms: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -6881,6 +7191,9 @@ pub struct ItemCompletedNotification {
|
||||
pub item: ThreadItem,
|
||||
pub thread_id: String,
|
||||
pub turn_id: String,
|
||||
/// Unix timestamp (in milliseconds) when this item lifecycle completed.
|
||||
#[ts(type = "number")]
|
||||
pub completed_at_ms: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -6992,6 +7305,9 @@ pub struct CommandExecOutputDeltaNotification {
|
||||
pub cap_reached: bool,
|
||||
}
|
||||
|
||||
/// Deprecated legacy notification for `apply_patch` textual output.
|
||||
///
|
||||
/// The server no longer emits this notification.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -8030,10 +8346,15 @@ mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::items::AgentMessageContent;
|
||||
use codex_protocol::items::AgentMessageItem;
|
||||
use codex_protocol::items::FileChangeItem;
|
||||
use codex_protocol::items::ImageViewItem;
|
||||
use codex_protocol::items::McpToolCallItem;
|
||||
use codex_protocol::items::McpToolCallStatus as CoreMcpToolCallStatus;
|
||||
use codex_protocol::items::ReasoningItem;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::items::UserMessageItem;
|
||||
use codex_protocol::items::WebSearchItem;
|
||||
use codex_protocol::mcp::CallToolResult;
|
||||
use codex_protocol::models::WebSearchAction as CoreWebSearchAction;
|
||||
use codex_protocol::protocol::NetworkAccess as CoreNetworkAccess;
|
||||
use codex_protocol::user_input::UserInput as CoreUserInput;
|
||||
@@ -8043,6 +8364,7 @@ mod tests {
|
||||
use serde_json::json;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
fn absolute_path_string(path: &str) -> String {
|
||||
let path = format!("/{}", path.trim_start_matches('/'));
|
||||
@@ -9023,6 +9345,97 @@ mod tests {
|
||||
assert_eq!(decoded, params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_spawn_params_round_trips_without_sandbox_policy() {
|
||||
let params = ProcessSpawnParams {
|
||||
command: vec!["sleep".to_string(), "30".to_string()],
|
||||
process_handle: "sleep-1".to_string(),
|
||||
cwd: test_absolute_path(),
|
||||
tty: false,
|
||||
stream_stdin: false,
|
||||
stream_stdout_stderr: false,
|
||||
output_bytes_cap: None,
|
||||
timeout_ms: None,
|
||||
env: None,
|
||||
size: None,
|
||||
};
|
||||
|
||||
let value = serde_json::to_value(¶ms).expect("serialize process/spawn params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
"env": null,
|
||||
"size": null,
|
||||
})
|
||||
);
|
||||
|
||||
let decoded =
|
||||
serde_json::from_value::<ProcessSpawnParams>(value).expect("deserialize round-trip");
|
||||
assert_eq!(decoded, params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_spawn_params_distinguish_omitted_null_and_value_limits() {
|
||||
let base = json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
});
|
||||
|
||||
let expected_omitted = ProcessSpawnParams {
|
||||
command: vec!["sleep".to_string(), "30".to_string()],
|
||||
process_handle: "sleep-1".to_string(),
|
||||
cwd: test_absolute_path(),
|
||||
tty: false,
|
||||
stream_stdin: false,
|
||||
stream_stdout_stderr: false,
|
||||
output_bytes_cap: None,
|
||||
timeout_ms: None,
|
||||
env: None,
|
||||
size: None,
|
||||
};
|
||||
let decoded =
|
||||
serde_json::from_value::<ProcessSpawnParams>(base).expect("deserialize omitted limits");
|
||||
assert_eq!(decoded, expected_omitted);
|
||||
|
||||
let decoded = serde_json::from_value::<ProcessSpawnParams>(json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
"outputBytesCap": null,
|
||||
"timeoutMs": null,
|
||||
}))
|
||||
.expect("deserialize disabled limits");
|
||||
assert_eq!(
|
||||
decoded,
|
||||
ProcessSpawnParams {
|
||||
output_bytes_cap: Some(None),
|
||||
timeout_ms: Some(None),
|
||||
..expected_omitted.clone()
|
||||
}
|
||||
);
|
||||
|
||||
let decoded = serde_json::from_value::<ProcessSpawnParams>(json!({
|
||||
"command": ["sleep", "30"],
|
||||
"processHandle": "sleep-1",
|
||||
"cwd": absolute_path_string("readable"),
|
||||
"outputBytesCap": 123,
|
||||
"timeoutMs": 456,
|
||||
}))
|
||||
.expect("deserialize explicit limits");
|
||||
assert_eq!(
|
||||
decoded,
|
||||
ProcessSpawnParams {
|
||||
output_bytes_cap: Some(Some(123)),
|
||||
timeout_ms: Some(Some(456)),
|
||||
..expected_omitted
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_exec_params_round_trips_disable_output_cap() {
|
||||
let params = CommandExecParams {
|
||||
@@ -9255,6 +9668,110 @@ mod tests {
|
||||
assert_eq!(decoded, notification);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_control_params_round_trip() {
|
||||
let write = ProcessWriteStdinParams {
|
||||
process_handle: "proc-7".to_string(),
|
||||
delta_base64: None,
|
||||
close_stdin: true,
|
||||
};
|
||||
let value = serde_json::to_value(&write).expect("serialize process/writeStdin params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-7",
|
||||
"deltaBase64": null,
|
||||
"closeStdin": true,
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessWriteStdinParams>(value)
|
||||
.expect("deserialize process/writeStdin params");
|
||||
assert_eq!(decoded, write);
|
||||
|
||||
let resize = ProcessResizePtyParams {
|
||||
process_handle: "proc-7".to_string(),
|
||||
size: ProcessTerminalSize {
|
||||
rows: 50,
|
||||
cols: 160,
|
||||
},
|
||||
};
|
||||
let value = serde_json::to_value(&resize).expect("serialize process/resizePty params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-7",
|
||||
"size": {
|
||||
"rows": 50,
|
||||
"cols": 160,
|
||||
},
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessResizePtyParams>(value)
|
||||
.expect("deserialize process/resizePty params");
|
||||
assert_eq!(decoded, resize);
|
||||
|
||||
let kill = ProcessKillParams {
|
||||
process_handle: "proc-7".to_string(),
|
||||
};
|
||||
let value = serde_json::to_value(&kill).expect("serialize process/kill params");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-7",
|
||||
})
|
||||
);
|
||||
let decoded =
|
||||
serde_json::from_value::<ProcessKillParams>(value).expect("deserialize process/kill");
|
||||
assert_eq!(decoded, kill);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_notifications_round_trip() {
|
||||
let delta = ProcessOutputDeltaNotification {
|
||||
process_handle: "proc-1".to_string(),
|
||||
stream: ProcessOutputStream::Stdout,
|
||||
delta_base64: "AQI=".to_string(),
|
||||
cap_reached: false,
|
||||
};
|
||||
let value = serde_json::to_value(&delta).expect("serialize process/outputDelta");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-1",
|
||||
"stream": "stdout",
|
||||
"deltaBase64": "AQI=",
|
||||
"capReached": false,
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessOutputDeltaNotification>(value)
|
||||
.expect("deserialize process/outputDelta");
|
||||
assert_eq!(decoded, delta);
|
||||
|
||||
let exited = ProcessExitedNotification {
|
||||
process_handle: "proc-1".to_string(),
|
||||
exit_code: 0,
|
||||
stdout: "out".to_string(),
|
||||
stdout_cap_reached: false,
|
||||
stderr: "err".to_string(),
|
||||
stderr_cap_reached: true,
|
||||
};
|
||||
let value = serde_json::to_value(&exited).expect("serialize process/exited");
|
||||
assert_eq!(
|
||||
value,
|
||||
json!({
|
||||
"processHandle": "proc-1",
|
||||
"exitCode": 0,
|
||||
"stdout": "out",
|
||||
"stdoutCapReached": false,
|
||||
"stderr": "err",
|
||||
"stderrCapReached": true,
|
||||
})
|
||||
);
|
||||
let decoded = serde_json::from_value::<ProcessExitedNotification>(value)
|
||||
.expect("deserialize process/exited");
|
||||
assert_eq!(decoded, exited);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_execution_output_delta_round_trips() {
|
||||
let notification = CommandExecutionOutputDeltaNotification {
|
||||
@@ -10310,6 +10827,111 @@ mod tests {
|
||||
}),
|
||||
}
|
||||
);
|
||||
|
||||
let image_view_item = TurnItem::ImageView(ImageViewItem {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(image_view_item),
|
||||
ThreadItem::ImageView {
|
||||
id: "view-image-1".to_string(),
|
||||
path: test_path_buf("/tmp/view-image.png").abs(),
|
||||
}
|
||||
);
|
||||
|
||||
let file_change_item = TurnItem::FileChange(FileChangeItem {
|
||||
id: "patch-1".to_string(),
|
||||
changes: [(
|
||||
PathBuf::from("README.md"),
|
||||
codex_protocol::protocol::FileChange::Add {
|
||||
content: "hello\n".to_string(),
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
status: Some(codex_protocol::protocol::PatchApplyStatus::Completed),
|
||||
auto_approved: None,
|
||||
stdout: Some("Done!".to_string()),
|
||||
stderr: Some(String::new()),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(file_change_item),
|
||||
ThreadItem::FileChange {
|
||||
id: "patch-1".to_string(),
|
||||
changes: vec![FileUpdateChange {
|
||||
path: "README.md".to_string(),
|
||||
kind: PatchChangeKind::Add,
|
||||
diff: "hello\n".to_string(),
|
||||
}],
|
||||
status: PatchApplyStatus::Completed,
|
||||
}
|
||||
);
|
||||
|
||||
let mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
status: CoreMcpToolCallStatus::InProgress,
|
||||
result: None,
|
||||
error: None,
|
||||
duration: None,
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-1".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::InProgress,
|
||||
arguments: json!({"arg": "value"}),
|
||||
mcp_app_resource_uri: Some("app://connector".to_string()),
|
||||
result: None,
|
||||
error: None,
|
||||
duration_ms: None,
|
||||
}
|
||||
);
|
||||
|
||||
let completed_mcp_tool_call_item = TurnItem::McpToolCall(McpToolCallItem {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
status: CoreMcpToolCallStatus::Completed,
|
||||
result: Some(CallToolResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
is_error: Some(false),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
}),
|
||||
error: None,
|
||||
duration: Some(Duration::from_millis(42)),
|
||||
});
|
||||
|
||||
assert_eq!(
|
||||
ThreadItem::from(completed_mcp_tool_call_item),
|
||||
ThreadItem::McpToolCall {
|
||||
id: "mcp-2".to_string(),
|
||||
server: "server".to_string(),
|
||||
tool: "tool".to_string(),
|
||||
status: McpToolCallStatus::Completed,
|
||||
arguments: JsonValue::Null,
|
||||
mcp_app_resource_uri: None,
|
||||
result: Some(Box::new(McpToolCallResult {
|
||||
content: vec![json!({"type": "text", "text": "ok"})],
|
||||
structured_content: Some(json!({"ok": true})),
|
||||
meta: Some(json!({"trace": "1"})),
|
||||
})),
|
||||
error: None,
|
||||
duration_ms: Some(42),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -10644,6 +11266,23 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_skill_read_params_serialization_uses_remote_plugin_id() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginSkillReadParams {
|
||||
remote_marketplace_name: "chatgpt-global".to_string(),
|
||||
remote_plugin_id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
skill_name: "plan-work".to_string(),
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"remoteMarketplaceName": "chatgpt-global",
|
||||
"remotePluginId": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"skillName": "plan-work",
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_params_and_response_serialization_use_camel_case_fields() {
|
||||
let plugin_path = if cfg!(windows) {
|
||||
@@ -10709,36 +11348,74 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_share_list_response_serializes_plugin_summaries() {
|
||||
fn plugin_share_list_response_serializes_share_items() {
|
||||
assert_eq!(
|
||||
serde_json::to_value(PluginShareListResponse {
|
||||
data: vec![PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
interface: None,
|
||||
data: vec![PluginShareListItem {
|
||||
plugin: PluginSummary {
|
||||
id: "plugins~Plugin_00000000000000000000000000000000".to_string(),
|
||||
name: "gmail".to_string(),
|
||||
source: PluginSource::Remote,
|
||||
installed: false,
|
||||
enabled: false,
|
||||
install_policy: PluginInstallPolicy::Available,
|
||||
auth_policy: PluginAuthPolicy::OnUse,
|
||||
availability: PluginAvailability::Available,
|
||||
interface: None,
|
||||
},
|
||||
share_url: "https://chatgpt.example/plugins/share/share-key-1".to_string(),
|
||||
local_plugin_path: None,
|
||||
}],
|
||||
})
|
||||
.unwrap(),
|
||||
json!({
|
||||
"data": [{
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"interface": null,
|
||||
"plugin": {
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"availability": "AVAILABLE",
|
||||
"interface": null,
|
||||
},
|
||||
"shareUrl": "https://chatgpt.example/plugins/share/share-key-1",
|
||||
"localPluginPath": null,
|
||||
}],
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_summary_defaults_missing_availability_to_available() {
|
||||
let summary: PluginSummary = serde_json::from_value(json!({
|
||||
"id": "plugins~Plugin_00000000000000000000000000000000",
|
||||
"name": "gmail",
|
||||
"source": { "type": "remote" },
|
||||
"installed": false,
|
||||
"enabled": false,
|
||||
"installPolicy": "AVAILABLE",
|
||||
"authPolicy": "ON_USE",
|
||||
"interface": null,
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(summary.availability, PluginAvailability::Available);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_availability_deserializes_enabled_alias() {
|
||||
let availability: PluginAvailability = serde_json::from_value(json!("ENABLED")).unwrap();
|
||||
|
||||
assert_eq!(availability, PluginAvailability::Available);
|
||||
assert_eq!(
|
||||
serde_json::to_value(availability).unwrap(),
|
||||
json!("AVAILABLE")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plugin_uninstall_params_serialization_omits_force_remote_sync() {
|
||||
assert_eq!(
|
||||
|
||||
6
codex-rs/app-server-transport/BUILD.bazel
Normal file
6
codex-rs/app-server-transport/BUILD.bazel
Normal file
@@ -0,0 +1,6 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "app-server-transport",
|
||||
crate_name = "codex_app_server_transport",
|
||||
)
|
||||
58
codex-rs/app-server-transport/Cargo.toml
Normal file
58
codex-rs/app-server-transport/Cargo.toml
Normal file
@@ -0,0 +1,58 @@
|
||||
[package]
|
||||
name = "codex-app-server-transport"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_app_server_transport"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
axum = { workspace = true, default-features = false, features = [
|
||||
"http1",
|
||||
"json",
|
||||
"tokio",
|
||||
"ws",
|
||||
] }
|
||||
base64 = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-api = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-model-provider = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-uds = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-rustls-provider = { workspace = true }
|
||||
constant_time_eq = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
gethostname = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
owo-colors = { workspace = true, features = ["supports-colors"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
time = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
] }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
url = { workspace = true }
|
||||
uuid = { workspace = true, features = ["serde", "v7"] }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = { workspace = true }
|
||||
codex-config = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
20
codex-rs/app-server-transport/src/lib.rs
Normal file
20
codex-rs/app-server-transport/src/lib.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
mod outgoing_message;
|
||||
mod transport;
|
||||
|
||||
pub use outgoing_message::ConnectionId;
|
||||
pub use outgoing_message::OutgoingError;
|
||||
pub use outgoing_message::OutgoingMessage;
|
||||
pub use outgoing_message::OutgoingResponse;
|
||||
pub use outgoing_message::QueuedOutgoingMessage;
|
||||
pub use transport::AppServerTransport;
|
||||
pub use transport::AppServerTransportParseError;
|
||||
pub use transport::CHANNEL_CAPACITY;
|
||||
pub use transport::ConnectionOrigin;
|
||||
pub use transport::RemoteControlHandle;
|
||||
pub use transport::TransportEvent;
|
||||
pub use transport::app_server_control_socket_path;
|
||||
pub use transport::auth;
|
||||
pub use transport::start_control_socket_acceptor;
|
||||
pub use transport::start_remote_control;
|
||||
pub use transport::start_stdio_connection;
|
||||
pub use transport::start_websocket_acceptor;
|
||||
58
codex-rs/app-server-transport/src/outgoing_message.rs
Normal file
58
codex-rs/app-server-transport/src/outgoing_message.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use std::fmt;
|
||||
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::Result;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use serde::Serialize;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
/// Stable identifier for a transport connection.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub struct ConnectionId(pub u64);
|
||||
|
||||
impl fmt::Display for ConnectionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing message from the server to the client.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum OutgoingMessage {
|
||||
Request(ServerRequest),
|
||||
/// AppServerNotification is specific to the case where this is run as an
|
||||
/// "app server" as opposed to an MCP server.
|
||||
AppServerNotification(ServerNotification),
|
||||
Response(OutgoingResponse),
|
||||
Error(OutgoingError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingResponse {
|
||||
pub id: RequestId,
|
||||
pub result: Result,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||
pub struct OutgoingError {
|
||||
pub error: JSONRPCErrorError,
|
||||
pub id: RequestId,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct QueuedOutgoingMessage {
|
||||
pub message: OutgoingMessage,
|
||||
pub write_complete_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl QueuedOutgoingMessage {
|
||||
pub fn new(message: OutgoingMessage) -> Self {
|
||||
Self {
|
||||
message,
|
||||
write_complete_tx: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -86,7 +86,7 @@ pub enum AppServerWebsocketCapabilityTokenSource {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub(crate) struct WebsocketAuthPolicy {
|
||||
pub struct WebsocketAuthPolicy {
|
||||
pub(crate) mode: Option<WebsocketAuthMode>,
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ impl AppServerWebsocketAuthArgs {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn policy_from_settings(
|
||||
pub fn policy_from_settings(
|
||||
settings: &AppServerWebsocketAuthSettings,
|
||||
) -> io::Result<WebsocketAuthPolicy> {
|
||||
let mode = match settings.config.as_ref() {
|
||||
478
codex-rs/app-server-transport/src/transport/mod.rs
Normal file
478
codex-rs/app-server-transport/src/transport/mod.rs
Normal file
@@ -0,0 +1,478 @@
|
||||
pub mod auth;
|
||||
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::OutgoingError;
|
||||
use crate::outgoing_message::OutgoingMessage;
|
||||
use crate::outgoing_message::QueuedOutgoingMessage;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::error;
|
||||
use tracing::warn;
|
||||
|
||||
/// Size of the bounded channels used to communicate between tasks. The value
|
||||
/// is a balance between throughput and memory usage - 128 messages should be
|
||||
/// plenty for an interactive CLI.
|
||||
pub const CHANNEL_CAPACITY: usize = 128;
|
||||
|
||||
mod remote_control;
|
||||
mod stdio;
|
||||
mod unix_socket;
|
||||
#[cfg(test)]
|
||||
mod unix_socket_tests;
|
||||
mod websocket;
|
||||
|
||||
pub use remote_control::RemoteControlHandle;
|
||||
pub use remote_control::start_remote_control;
|
||||
pub use stdio::start_stdio_connection;
|
||||
pub use unix_socket::start_control_socket_acceptor;
|
||||
pub use websocket::start_websocket_acceptor;
|
||||
|
||||
const OVERLOADED_ERROR_CODE: i64 = -32001;
|
||||
|
||||
const APP_SERVER_CONTROL_SOCKET_DIR_NAME: &str = "app-server-control";
|
||||
const APP_SERVER_CONTROL_SOCKET_FILE_NAME: &str = "app-server-control.sock";
|
||||
|
||||
pub fn app_server_control_socket_path(codex_home: &Path) -> std::io::Result<AbsolutePathBuf> {
|
||||
AbsolutePathBuf::from_absolute_path(
|
||||
codex_home
|
||||
.join(APP_SERVER_CONTROL_SOCKET_DIR_NAME)
|
||||
.join(APP_SERVER_CONTROL_SOCKET_FILE_NAME),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum AppServerTransport {
|
||||
Stdio,
|
||||
UnixSocket { socket_path: AbsolutePathBuf },
|
||||
WebSocket { bind_address: SocketAddr },
|
||||
Off,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum AppServerTransportParseError {
|
||||
UnsupportedListenUrl(String),
|
||||
InvalidUnixSocketPath { listen_url: String, message: String },
|
||||
InvalidWebSocketListenUrl(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AppServerTransportParseError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AppServerTransportParseError::UnsupportedListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"unsupported --listen URL `{listen_url}`; expected `stdio://`, `unix://`, `unix://PATH`, `ws://IP:PORT`, or `off`"
|
||||
),
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url,
|
||||
message,
|
||||
} => write!(
|
||||
f,
|
||||
"invalid unix socket --listen URL `{listen_url}`; failed to resolve socket path: {message}"
|
||||
),
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url) => write!(
|
||||
f,
|
||||
"invalid websocket --listen URL `{listen_url}`; expected `ws://IP:PORT`"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for AppServerTransportParseError {}
|
||||
|
||||
impl AppServerTransport {
|
||||
pub const DEFAULT_LISTEN_URL: &'static str = "stdio://";
|
||||
|
||||
pub fn from_listen_url(listen_url: &str) -> Result<Self, AppServerTransportParseError> {
|
||||
if listen_url == Self::DEFAULT_LISTEN_URL {
|
||||
return Ok(Self::Stdio);
|
||||
}
|
||||
|
||||
if let Some(raw_socket_path) = listen_url.strip_prefix("unix://") {
|
||||
let socket_path = if raw_socket_path.is_empty() {
|
||||
let codex_home = find_codex_home().map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: format!("failed to resolve CODEX_HOME: {err}"),
|
||||
}
|
||||
})?;
|
||||
app_server_control_socket_path(&codex_home).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
} else {
|
||||
AbsolutePathBuf::relative_to_current_dir(raw_socket_path).map_err(|err| {
|
||||
AppServerTransportParseError::InvalidUnixSocketPath {
|
||||
listen_url: listen_url.to_string(),
|
||||
message: err.to_string(),
|
||||
}
|
||||
})?
|
||||
};
|
||||
return Ok(Self::UnixSocket { socket_path });
|
||||
}
|
||||
|
||||
if listen_url == "off" {
|
||||
return Ok(Self::Off);
|
||||
}
|
||||
|
||||
if let Some(socket_addr) = listen_url.strip_prefix("ws://") {
|
||||
let bind_address = socket_addr.parse::<SocketAddr>().map_err(|_| {
|
||||
AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url.to_string())
|
||||
})?;
|
||||
return Ok(Self::WebSocket { bind_address });
|
||||
}
|
||||
|
||||
Err(AppServerTransportParseError::UnsupportedListenUrl(
|
||||
listen_url.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AppServerTransport {
|
||||
type Err = AppServerTransportParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::from_listen_url(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TransportEvent {
|
||||
ConnectionOpened {
|
||||
connection_id: ConnectionId,
|
||||
origin: ConnectionOrigin,
|
||||
writer: mpsc::Sender<QueuedOutgoingMessage>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
},
|
||||
ConnectionClosed {
|
||||
connection_id: ConnectionId,
|
||||
},
|
||||
IncomingMessage {
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConnectionOrigin {
|
||||
Stdio,
|
||||
InProcess,
|
||||
WebSocket,
|
||||
RemoteControl,
|
||||
}
|
||||
|
||||
impl ConnectionOrigin {
|
||||
pub fn allows_device_key_requests(self) -> bool {
|
||||
// Device-key endpoints are only for local connections that own the app-server instance.
|
||||
// Do not include remote transports such as SSH or remote-control websocket connections.
|
||||
matches!(self, Self::Stdio | Self::InProcess)
|
||||
}
|
||||
}
|
||||
|
||||
static CONNECTION_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
fn next_connection_id() -> ConnectionId {
|
||||
ConnectionId(CONNECTION_ID_COUNTER.fetch_add(1, Ordering::Relaxed))
|
||||
}
|
||||
|
||||
async fn forward_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
payload: &str,
|
||||
) -> bool {
|
||||
match serde_json::from_str::<JSONRPCMessage>(payload) {
|
||||
Ok(message) => {
|
||||
enqueue_incoming_message(transport_event_tx, writer, connection_id, message).await
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Failed to deserialize JSONRPCMessage: {err}");
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn enqueue_incoming_message(
|
||||
transport_event_tx: &mpsc::Sender<TransportEvent>,
|
||||
writer: &mpsc::Sender<QueuedOutgoingMessage>,
|
||||
connection_id: ConnectionId,
|
||||
message: JSONRPCMessage,
|
||||
) -> bool {
|
||||
let event = TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message,
|
||||
};
|
||||
match transport_event_tx.try_send(event) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Request(request),
|
||||
})) => {
|
||||
let overload_error = OutgoingMessage::Error(OutgoingError {
|
||||
id: request.id,
|
||||
error: JSONRPCErrorError {
|
||||
code: OVERLOADED_ERROR_CODE,
|
||||
message: "Server overloaded; retry later.".to_string(),
|
||||
data: None,
|
||||
},
|
||||
});
|
||||
match writer.try_send(QueuedOutgoingMessage::new(overload_error)) {
|
||||
Ok(()) => true,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => false,
|
||||
Err(mpsc::error::TrySendError::Full(_overload_error)) => {
|
||||
warn!(
|
||||
"dropping overload response for connection {:?}: outbound queue is full",
|
||||
connection_id
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Full(event)) => transport_event_tx.send(event).await.is_ok(),
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_outgoing_message(outgoing_message: OutgoingMessage) -> Option<String> {
|
||||
let value = match serde_json::to_value(outgoing_message) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("Failed to convert OutgoingMessage to JSON value: {err}");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
match serde_json::to_string(&value) {
|
||||
Ok(json) => Some(json),
|
||||
Err(err) => {
|
||||
error!("Failed to serialize JSONRPCMessage: {err}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::JSONRPCRequest;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[test]
|
||||
fn listen_off_parses_as_off_transport() {
|
||||
assert_eq!(
|
||||
AppServerTransport::from_listen_url("off"),
|
||||
Ok(AppServerTransport::Off)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_returns_overload_error_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
assert!(
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request).await
|
||||
);
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should stay queued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let overload = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should receive overload error");
|
||||
let overload_json =
|
||||
serde_json::to_value(overload.message).expect("serialize overload error");
|
||||
assert_eq!(
|
||||
overload_json,
|
||||
json!({
|
||||
"id": 7,
|
||||
"error": {
|
||||
"code": OVERLOADED_ERROR_CODE,
|
||||
"message": "Server overloaded; retry later."
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_response_waits_instead_of_dropping_when_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, _writer_rx) = mpsc::channel(1);
|
||||
|
||||
let first_message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: first_message.clone(),
|
||||
})
|
||||
.await
|
||||
.expect("queue should accept first message");
|
||||
|
||||
let response = JSONRPCMessage::Response(JSONRPCResponse {
|
||||
id: RequestId::Integer(7),
|
||||
result: json!({"ok": true}),
|
||||
});
|
||||
let transport_event_tx_for_enqueue = transport_event_tx.clone();
|
||||
let writer_tx_for_enqueue = writer_tx.clone();
|
||||
let enqueue_handle = tokio::spawn(async move {
|
||||
enqueue_incoming_message(
|
||||
&transport_event_tx_for_enqueue,
|
||||
&writer_tx_for_enqueue,
|
||||
connection_id,
|
||||
response,
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let queued_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("first event should be dequeued");
|
||||
match queued_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message,
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(message, first_message);
|
||||
}
|
||||
_ => panic!("expected queued incoming message"),
|
||||
}
|
||||
|
||||
let enqueue_result = enqueue_handle.await.expect("enqueue task should not panic");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let forwarded_event = transport_event_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("response should be forwarded instead of dropped");
|
||||
match forwarded_event {
|
||||
TransportEvent::IncomingMessage {
|
||||
connection_id: queued_connection_id,
|
||||
message: JSONRPCMessage::Response(JSONRPCResponse { id, result }),
|
||||
} => {
|
||||
assert_eq!(queued_connection_id, connection_id);
|
||||
assert_eq!(id, RequestId::Integer(7));
|
||||
assert_eq!(result, json!({"ok": true}));
|
||||
}
|
||||
_ => panic!("expected forwarded response message"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_incoming_request_does_not_block_when_writer_queue_is_full() {
|
||||
let connection_id = ConnectionId(42);
|
||||
let (transport_event_tx, _transport_event_rx) = mpsc::channel(1);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
transport_event_tx
|
||||
.send(TransportEvent::IncomingMessage {
|
||||
connection_id,
|
||||
message: JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
}),
|
||||
})
|
||||
.await
|
||||
.expect("transport queue should accept first message");
|
||||
|
||||
writer_tx
|
||||
.send(QueuedOutgoingMessage::new(
|
||||
OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning(
|
||||
ConfigWarningNotification {
|
||||
summary: "queued".to_string(),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
},
|
||||
)),
|
||||
))
|
||||
.await
|
||||
.expect("writer queue should accept first message");
|
||||
|
||||
let request = JSONRPCMessage::Request(JSONRPCRequest {
|
||||
id: RequestId::Integer(7),
|
||||
method: "config/read".to_string(),
|
||||
params: Some(json!({ "includeLayers": false })),
|
||||
trace: None,
|
||||
});
|
||||
|
||||
let enqueue_result = timeout(
|
||||
Duration::from_millis(100),
|
||||
enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request),
|
||||
)
|
||||
.await
|
||||
.expect("enqueue should not block while writer queue is full");
|
||||
assert!(enqueue_result);
|
||||
|
||||
let queued_outgoing = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("writer queue should still contain original message");
|
||||
let queued_json =
|
||||
serde_json::to_value(queued_outgoing.message).expect("serialize queued message");
|
||||
assert_eq!(
|
||||
queued_json,
|
||||
json!({
|
||||
"method": "configWarning",
|
||||
"params": {
|
||||
"summary": "queued",
|
||||
"details": null,
|
||||
},
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -195,7 +195,7 @@ impl ClientTracker {
|
||||
})
|
||||
.await
|
||||
}
|
||||
ClientEvent::Ack => Ok(()),
|
||||
ClientEvent::ClientMessageChunk { .. } | ClientEvent::Ack { .. } => Ok(()),
|
||||
ClientEvent::Ping => {
|
||||
if let Some(client) = self.clients.get_mut(&client_key) {
|
||||
client.last_activity_at = Instant::now();
|
||||
@@ -1,6 +1,7 @@
|
||||
mod client_tracker;
|
||||
mod enroll;
|
||||
mod protocol;
|
||||
mod segment;
|
||||
mod websocket;
|
||||
|
||||
use crate::transport::remote_control::websocket::RemoteControlChannels;
|
||||
@@ -35,14 +36,14 @@ pub(super) struct QueuedServerEnvelope {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct RemoteControlHandle {
|
||||
pub struct RemoteControlHandle {
|
||||
enabled_tx: Arc<watch::Sender<bool>>,
|
||||
status_tx: Arc<watch::Sender<RemoteControlStatusChangedNotification>>,
|
||||
state_db_available: bool,
|
||||
}
|
||||
|
||||
impl RemoteControlHandle {
|
||||
pub(crate) fn set_enabled(&self, enabled: bool) {
|
||||
pub fn set_enabled(&self, enabled: bool) {
|
||||
let requested_enabled = enabled;
|
||||
let enabled = enabled && self.state_db_available;
|
||||
if requested_enabled && !self.state_db_available {
|
||||
@@ -55,14 +56,12 @@ impl RemoteControlHandle {
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn status_receiver(
|
||||
&self,
|
||||
) -> watch::Receiver<RemoteControlStatusChangedNotification> {
|
||||
pub fn status_receiver(&self) -> watch::Receiver<RemoteControlStatusChangedNotification> {
|
||||
self.status_tx.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn start_remote_control(
|
||||
pub async fn start_remote_control(
|
||||
remote_control_url: String,
|
||||
state_db: Option<Arc<StateRuntime>>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
@@ -121,5 +120,7 @@ pub(crate) async fn start_remote_control(
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod segment_tests;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
@@ -47,10 +47,20 @@ pub enum ClientEvent {
|
||||
ClientMessage {
|
||||
message: JSONRPCMessage,
|
||||
},
|
||||
ClientMessageChunk {
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
message_chunk_base64: String,
|
||||
},
|
||||
/// Backend-generated acknowledgement for all server envelopes addressed to
|
||||
/// `client_id` and `stream_id` whose envelope `seq_id` is less than or equal
|
||||
/// to this ack's `seq_id`. This cursor is stream-scoped.
|
||||
Ack,
|
||||
/// to this ack's `seq_id`. Chunk acknowledgements carry `segment_id` so the
|
||||
/// sender can retain only the still-unacked wire chunks on reconnect.
|
||||
Ack {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
segment_id: Option<usize>,
|
||||
},
|
||||
Ping,
|
||||
ClientClosed,
|
||||
}
|
||||
@@ -85,6 +95,12 @@ pub enum ServerEvent {
|
||||
ServerMessage {
|
||||
message: Box<OutgoingMessage>,
|
||||
},
|
||||
ServerMessageChunk {
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
message_chunk_base64: String,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
Ack,
|
||||
Pong {
|
||||
@@ -92,6 +108,15 @@ pub enum ServerEvent {
|
||||
},
|
||||
}
|
||||
|
||||
impl ServerEvent {
|
||||
pub(crate) fn segment_id(&self) -> Option<usize> {
|
||||
match self {
|
||||
Self::ServerMessageChunk { segment_id, .. } => Some(*segment_id),
|
||||
Self::ServerMessage { .. } | Self::Ack | Self::Pong { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub(crate) struct ServerEnvelope {
|
||||
@@ -0,0 +1,449 @@
|
||||
use super::protocol::ClientEnvelope;
|
||||
use super::protocol::ClientEvent;
|
||||
use super::protocol::ClientId;
|
||||
use super::protocol::ServerEnvelope;
|
||||
use super::protocol::ServerEvent;
|
||||
use super::protocol::StreamId;
|
||||
use base64::DecodeSliceError;
|
||||
use base64::Engine;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Write;
|
||||
use tokio::time::Instant;
|
||||
use tracing::warn;
|
||||
|
||||
pub(super) const REMOTE_CONTROL_SEGMENT_TARGET_BYTES: usize = 100 * 1024;
|
||||
pub(super) const REMOTE_CONTROL_SEGMENT_MAX_BYTES: usize = 150 * 1024;
|
||||
pub(super) const REMOTE_CONTROL_REASSEMBLED_MAX_BYTES: usize = 100 * 1024 * 1024;
|
||||
pub(super) const REMOTE_CONTROL_SEGMENT_COUNT_MAX: usize = 1024;
|
||||
const REMOTE_CONTROL_SEGMENT_ASSEMBLY_MAX_COUNT: usize = 128;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ClientSegmentAssembly {
|
||||
stream_id: StreamId,
|
||||
metadata: ClientSegmentMetadata,
|
||||
raw: Vec<u8>,
|
||||
next_segment_id: usize,
|
||||
last_chunk_seen_at: Instant,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct ClientSegmentMetadata {
|
||||
seq_id: u64,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(super) struct ClientSegmentReassembler {
|
||||
assemblies: HashMap<ClientId, ClientSegmentAssembly>,
|
||||
}
|
||||
|
||||
pub(super) enum ClientSegmentObservation {
|
||||
Forward(Box<ClientEnvelope>),
|
||||
Pending,
|
||||
Dropped,
|
||||
}
|
||||
|
||||
impl ClientSegmentReassembler {
|
||||
pub(super) fn observe(&mut self, envelope: ClientEnvelope) -> ClientSegmentObservation {
|
||||
let ClientEvent::ClientMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64,
|
||||
} = &envelope.event
|
||||
else {
|
||||
return ClientSegmentObservation::Forward(Box::new(envelope));
|
||||
};
|
||||
let segment_id = *segment_id;
|
||||
let segment_count = *segment_count;
|
||||
let message_size_bytes = *message_size_bytes;
|
||||
|
||||
let Some(metadata) = ClientSegmentMetadata::from_envelope(&envelope) else {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope without seq_id"
|
||||
);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
};
|
||||
let Some(stream_id) = envelope.stream_id.clone() else {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope without stream_id"
|
||||
);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
};
|
||||
if self.should_ignore_chunk(&envelope.client_id, &stream_id, metadata.seq_id, segment_id) {
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
if segment_count == 0
|
||||
|| segment_count > REMOTE_CONTROL_SEGMENT_COUNT_MAX
|
||||
|| segment_id >= segment_count
|
||||
|| message_size_bytes == 0
|
||||
|| message_size_bytes > REMOTE_CONTROL_REASSEMBLED_MAX_BYTES
|
||||
|| message_chunk_base64.is_empty()
|
||||
{
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping invalid segmented remote-control client envelope"
|
||||
);
|
||||
self.remove_assembly(&envelope.client_id, &stream_id);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
match self.assemblies.get(&envelope.client_id) {
|
||||
Some(assembly) if assembly.stream_id != stream_id => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"resetting segmented remote-control client envelope after stream change"
|
||||
);
|
||||
self.assemblies.insert(
|
||||
envelope.client_id.clone(),
|
||||
ClientSegmentAssembly {
|
||||
stream_id: stream_id.clone(),
|
||||
metadata: metadata.clone(),
|
||||
raw: Vec::new(),
|
||||
next_segment_id: 0,
|
||||
last_chunk_seen_at: now,
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(_) => {}
|
||||
None => {
|
||||
self.evict_assemblies_if_full();
|
||||
self.assemblies.insert(
|
||||
envelope.client_id.clone(),
|
||||
ClientSegmentAssembly {
|
||||
stream_id: stream_id.clone(),
|
||||
metadata: metadata.clone(),
|
||||
raw: Vec::new(),
|
||||
next_segment_id: 0,
|
||||
last_chunk_seen_at: now,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
let result = {
|
||||
let Some(assembly) = self.assemblies.get_mut(&envelope.client_id) else {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope without assembly"
|
||||
);
|
||||
return ClientSegmentObservation::Dropped;
|
||||
};
|
||||
if metadata.seq_id < assembly.metadata.seq_id {
|
||||
AssemblyUpdate::Ignore
|
||||
} else if assembly.metadata != metadata {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"resetting segmented remote-control client envelope after metadata mismatch"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
} else if segment_id < assembly.next_segment_id {
|
||||
AssemblyUpdate::Pending
|
||||
} else if segment_id != assembly.next_segment_id {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping out-of-order segmented remote-control client envelope"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
} else {
|
||||
assembly.last_chunk_seen_at = now;
|
||||
let chunk_start = assembly.raw.len();
|
||||
let decoded_chunk_len = base64::decoded_len_estimate(message_chunk_base64.len());
|
||||
let chunk_end = usize::min(
|
||||
message_size_bytes,
|
||||
chunk_start.saturating_add(decoded_chunk_len),
|
||||
);
|
||||
assembly.raw.resize(chunk_end, 0);
|
||||
match base64::engine::general_purpose::STANDARD.decode_slice(
|
||||
message_chunk_base64.as_bytes(),
|
||||
&mut assembly.raw[chunk_start..],
|
||||
) {
|
||||
Ok(decoded_chunk_len) => {
|
||||
assembly.raw.truncate(chunk_start + decoded_chunk_len);
|
||||
assembly.next_segment_id += 1;
|
||||
if assembly.next_segment_id < segment_count {
|
||||
AssemblyUpdate::Pending
|
||||
} else if assembly.raw.len() != message_size_bytes {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping reassembled remote-control client envelope with mismatched size"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
} else {
|
||||
match serde_json::from_slice::<JSONRPCMessage>(&assembly.raw) {
|
||||
Ok(message) => AssemblyUpdate::Complete(message),
|
||||
Err(err) => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping invalid reassembled remote-control client envelope: {err}"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(DecodeSliceError::OutputSliceTooSmall) => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope after size overflow"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
client_id = envelope.client_id.0.as_str(),
|
||||
"dropping segmented remote-control client envelope with invalid base64: {err}"
|
||||
);
|
||||
AssemblyUpdate::Drop
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
AssemblyUpdate::Pending => ClientSegmentObservation::Pending,
|
||||
AssemblyUpdate::Ignore => ClientSegmentObservation::Dropped,
|
||||
AssemblyUpdate::Drop => {
|
||||
self.remove_assembly(&envelope.client_id, &stream_id);
|
||||
ClientSegmentObservation::Dropped
|
||||
}
|
||||
AssemblyUpdate::Complete(message) => {
|
||||
self.remove_assembly(&envelope.client_id, &stream_id);
|
||||
ClientSegmentObservation::Forward(Box::new(ClientEnvelope {
|
||||
event: ClientEvent::ClientMessage { message },
|
||||
..envelope
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn invalidate_stream(&mut self, client_id: &ClientId, stream_id: &StreamId) {
|
||||
self.remove_assembly(client_id, stream_id);
|
||||
}
|
||||
|
||||
pub(super) fn invalidate_client(&mut self, client_id: &ClientId) {
|
||||
self.assemblies.remove(client_id);
|
||||
}
|
||||
|
||||
pub(super) fn should_ignore_chunk(
|
||||
&self,
|
||||
client_id: &ClientId,
|
||||
stream_id: &StreamId,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
) -> bool {
|
||||
self.assemblies.get(client_id).is_some_and(|assembly| {
|
||||
assembly.stream_id == *stream_id
|
||||
&& (seq_id < assembly.metadata.seq_id
|
||||
|| (seq_id == assembly.metadata.seq_id
|
||||
&& segment_id < assembly.next_segment_id))
|
||||
})
|
||||
}
|
||||
|
||||
fn remove_assembly(&mut self, client_id: &ClientId, stream_id: &StreamId) {
|
||||
if self
|
||||
.assemblies
|
||||
.get(client_id)
|
||||
.is_some_and(|assembly| &assembly.stream_id == stream_id)
|
||||
{
|
||||
self.assemblies.remove(client_id);
|
||||
}
|
||||
}
|
||||
|
||||
fn evict_assemblies_if_full(&mut self) {
|
||||
while self.assemblies.len() >= REMOTE_CONTROL_SEGMENT_ASSEMBLY_MAX_COUNT {
|
||||
let Some(client_id) = self
|
||||
.assemblies
|
||||
.iter()
|
||||
.min_by_key(|(_, assembly)| assembly.last_chunk_seen_at)
|
||||
.map(|(client_id, _)| client_id.clone())
|
||||
else {
|
||||
return;
|
||||
};
|
||||
self.assemblies.remove(&client_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum AssemblyUpdate {
|
||||
Pending,
|
||||
Ignore,
|
||||
Drop,
|
||||
Complete(JSONRPCMessage),
|
||||
}
|
||||
|
||||
impl ClientSegmentMetadata {
|
||||
fn from_envelope(envelope: &ClientEnvelope) -> Option<Self> {
|
||||
let ClientEvent::ClientMessageChunk {
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
..
|
||||
} = &envelope.event
|
||||
else {
|
||||
return None;
|
||||
};
|
||||
Some(Self {
|
||||
seq_id: envelope.seq_id?,
|
||||
segment_count: *segment_count,
|
||||
message_size_bytes: *message_size_bytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn split_server_envelope_for_transport(
|
||||
envelope: ServerEnvelope,
|
||||
) -> io::Result<Vec<ServerEnvelope>> {
|
||||
if !matches!(envelope.event, ServerEvent::ServerMessage { .. }) {
|
||||
return Ok(vec![envelope]);
|
||||
}
|
||||
|
||||
let envelope_size_bytes = serialized_len(&envelope)?;
|
||||
if envelope_size_bytes <= REMOTE_CONTROL_SEGMENT_MAX_BYTES {
|
||||
return Ok(vec![envelope]);
|
||||
}
|
||||
|
||||
let ServerEvent::ServerMessage { message } = envelope.event.clone() else {
|
||||
unreachable!("server message variant checked above");
|
||||
};
|
||||
let raw = serde_json::to_vec(message.as_ref()).map_err(io::Error::other)?;
|
||||
let message_size_bytes = raw.len();
|
||||
if message_size_bytes > REMOTE_CONTROL_REASSEMBLED_MAX_BYTES {
|
||||
warn!("dropping remote-control server envelope that exceeds reassembled size limit");
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let minimal_segment_count =
|
||||
usize::min(message_size_bytes.max(1), REMOTE_CONTROL_SEGMENT_COUNT_MAX);
|
||||
let minimal_chunk = &raw[..usize::min(raw.len(), 1)];
|
||||
if serialized_chunk_len(
|
||||
&envelope,
|
||||
/*segment_id*/ 0,
|
||||
minimal_segment_count,
|
||||
message_size_bytes,
|
||||
minimal_chunk,
|
||||
)? > REMOTE_CONTROL_SEGMENT_MAX_BYTES
|
||||
{
|
||||
warn!("dropping remote-control server envelope that cannot fit within segment size limit");
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut segment_count = usize::max(
|
||||
2,
|
||||
message_size_bytes.div_ceil(REMOTE_CONTROL_SEGMENT_TARGET_BYTES),
|
||||
);
|
||||
loop {
|
||||
let chunk_size = usize::max(1, message_size_bytes.div_ceil(segment_count));
|
||||
segment_count = message_size_bytes.div_ceil(chunk_size);
|
||||
let segments_fit = raw
|
||||
.chunks(chunk_size)
|
||||
.enumerate()
|
||||
.all(|(segment_id, chunk)| {
|
||||
serialized_chunk_len(
|
||||
&envelope,
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
chunk,
|
||||
)
|
||||
.is_ok_and(|size| size <= REMOTE_CONTROL_SEGMENT_MAX_BYTES)
|
||||
});
|
||||
if segments_fit {
|
||||
return raw
|
||||
.chunks(chunk_size)
|
||||
.enumerate()
|
||||
.map(|(segment_id, chunk)| {
|
||||
build_chunk_envelope(
|
||||
&envelope,
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
chunk,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
if chunk_size == 1 {
|
||||
warn!(
|
||||
"dropping remote-control server envelope that cannot fit within segment size limit"
|
||||
);
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let next_segment_count = segment_count + 1;
|
||||
let next_chunk_size = usize::max(1, message_size_bytes.div_ceil(next_segment_count));
|
||||
segment_count = if next_chunk_size == chunk_size {
|
||||
message_size_bytes
|
||||
} else {
|
||||
next_segment_count
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn serialized_chunk_len(
|
||||
envelope: &ServerEnvelope,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> io::Result<usize> {
|
||||
serialized_len(&build_chunk_envelope(
|
||||
envelope,
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
chunk,
|
||||
)?)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct CountingWriter {
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Write for CountingWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.len += buf.len();
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn serialized_len(value: &impl serde::Serialize) -> io::Result<usize> {
|
||||
let mut writer = CountingWriter::default();
|
||||
serde_json::to_writer(&mut writer, value).map_err(io::Error::other)?;
|
||||
Ok(writer.len)
|
||||
}
|
||||
|
||||
fn build_chunk_envelope(
|
||||
envelope: &ServerEnvelope,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> io::Result<ServerEnvelope> {
|
||||
if segment_count > REMOTE_CONTROL_SEGMENT_COUNT_MAX {
|
||||
return Err(io::Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
"remote-control segment count exceeds maximum",
|
||||
));
|
||||
}
|
||||
Ok(ServerEnvelope {
|
||||
event: ServerEvent::ServerMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64: base64::engine::general_purpose::STANDARD.encode(chunk),
|
||||
},
|
||||
client_id: envelope.client_id.clone(),
|
||||
stream_id: envelope.stream_id.clone(),
|
||||
seq_id: envelope.seq_id,
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,386 @@
|
||||
use super::protocol::ClientEnvelope;
|
||||
use super::protocol::ClientEvent;
|
||||
use super::protocol::ClientId;
|
||||
use super::protocol::ServerEnvelope;
|
||||
use super::protocol::ServerEvent;
|
||||
use super::protocol::StreamId;
|
||||
use super::segment::ClientSegmentObservation;
|
||||
use super::segment::ClientSegmentReassembler;
|
||||
use super::segment::REMOTE_CONTROL_SEGMENT_MAX_BYTES;
|
||||
use super::segment::split_server_envelope_for_transport;
|
||||
use crate::outgoing_message::OutgoingMessage;
|
||||
use base64::Engine;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn reassembles_client_message_chunks() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
let reassembled = match reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id,
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)) {
|
||||
ClientSegmentObservation::Forward(reassembled) => *reassembled,
|
||||
ClientSegmentObservation::Pending | ClientSegmentObservation::Dropped => {
|
||||
panic!("message should reassemble")
|
||||
}
|
||||
};
|
||||
assert_eq!(reassembled.client_id, client_id);
|
||||
assert_eq!(
|
||||
reassembled.stream_id,
|
||||
Some(StreamId("stream-1".to_string()))
|
||||
);
|
||||
assert_eq!(reassembled.seq_id, Some(7));
|
||||
assert_eq!(reassembled.cursor, None);
|
||||
match reassembled.event {
|
||||
ClientEvent::ClientMessage {
|
||||
message: reassembled_message,
|
||||
} => assert_eq!(reassembled_message, message),
|
||||
other => panic!("expected client message, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn splits_large_server_messages_into_wire_chunks() {
|
||||
let envelope = ServerEnvelope {
|
||||
event: ServerEvent::ServerMessage {
|
||||
message: Box::new(OutgoingMessage::AppServerNotification(
|
||||
ServerNotification::ConfigWarning(ConfigWarningNotification {
|
||||
summary: "x".repeat(REMOTE_CONTROL_SEGMENT_MAX_BYTES),
|
||||
details: None,
|
||||
path: None,
|
||||
range: None,
|
||||
}),
|
||||
)),
|
||||
},
|
||||
client_id: ClientId("client-1".to_string()),
|
||||
stream_id: StreamId("stream-1".to_string()),
|
||||
seq_id: 9,
|
||||
};
|
||||
|
||||
let segments = split_server_envelope_for_transport(envelope).expect("split should succeed");
|
||||
|
||||
assert!(segments.len() > 1);
|
||||
assert!(
|
||||
segments
|
||||
.iter()
|
||||
.all(|segment| matches!(segment.event, ServerEvent::ServerMessageChunk { .. }))
|
||||
);
|
||||
assert!(segments.iter().all(|segment| segment.seq_id == 9));
|
||||
assert!(segments.iter().all(|segment| {
|
||||
serde_json::to_vec(segment)
|
||||
.expect("segment should serialize")
|
||||
.len()
|
||||
<= REMOTE_CONTROL_SEGMENT_MAX_BYTES
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidates_incomplete_stream_assemblies() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(stream_id.clone()),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
reassembler.invalidate_stream(&client_id, &stream_id);
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
Some(stream_id),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resets_incomplete_client_assembly_when_stream_changes() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let first_stream_id = StreamId("stream-1".to_string());
|
||||
let second_stream_id = StreamId("stream-2".to_string());
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(first_stream_id.clone()),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(second_stream_id.clone()),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
let reassembled = match reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
Some(second_stream_id),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)) {
|
||||
ClientSegmentObservation::Forward(reassembled) => *reassembled,
|
||||
ClientSegmentObservation::Pending | ClientSegmentObservation::Dropped => {
|
||||
panic!("replacement stream should reassemble")
|
||||
}
|
||||
};
|
||||
assert_eq!(
|
||||
reassembled.stream_id,
|
||||
Some(StreamId("stream-2".to_string()))
|
||||
);
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
Some(first_stream_id),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_stale_chunks_without_dropping_newer_assembly() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
stream_id,
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_invalid_stale_chunks_without_dropping_newer_assembly() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
b"",
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
stream_id,
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_invalid_duplicate_chunks_without_dropping_current_assembly() {
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = Some(StreamId("stream-1".to_string()));
|
||||
let mut reassembler = ClientSegmentReassembler::default();
|
||||
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
)),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id.clone(),
|
||||
stream_id.clone(),
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
b"",
|
||||
)),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
reassembler.observe(chunk_envelope(
|
||||
client_id,
|
||||
stream_id,
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
)),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
fn chunk_envelope(
|
||||
client_id: ClientId,
|
||||
stream_id: Option<StreamId>,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> ClientEnvelope {
|
||||
ClientEnvelope {
|
||||
event: ClientEvent::ClientMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64: base64::engine::general_purpose::STANDARD.encode(chunk),
|
||||
},
|
||||
client_id,
|
||||
stream_id,
|
||||
seq_id: Some(seq_id),
|
||||
cursor: None,
|
||||
}
|
||||
}
|
||||
@@ -831,7 +831,7 @@ async fn remote_control_transport_clears_outgoing_buffer_when_backend_acks() {
|
||||
send_client_event(
|
||||
&mut first_websocket,
|
||||
ClientEnvelope {
|
||||
event: ClientEvent::Ack,
|
||||
event: ClientEvent::Ack { segment_id: None },
|
||||
client_id: client_id.clone(),
|
||||
stream_id: Some(stream_id),
|
||||
seq_id: Some(1),
|
||||
@@ -15,6 +15,10 @@ use super::protocol::ClientId;
|
||||
use super::protocol::RemoteControlTarget;
|
||||
use super::protocol::ServerEnvelope;
|
||||
use super::protocol::StreamId;
|
||||
use super::segment::ClientSegmentObservation;
|
||||
use super::segment::ClientSegmentReassembler;
|
||||
use super::segment::REMOTE_CONTROL_SEGMENT_MAX_BYTES;
|
||||
use super::segment::split_server_envelope_for_transport;
|
||||
use axum::http::HeaderValue;
|
||||
use base64::Engine;
|
||||
use codex_app_server_protocol::RemoteControlConnectionStatus;
|
||||
@@ -49,7 +53,7 @@ use tracing::error;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
|
||||
pub(super) const REMOTE_CONTROL_PROTOCOL_VERSION: &str = "2";
|
||||
pub(super) const REMOTE_CONTROL_PROTOCOL_VERSION: &str = "3";
|
||||
pub(super) const REMOTE_CONTROL_ACCOUNT_ID_HEADER: &str = "chatgpt-account-id";
|
||||
const REMOTE_CONTROL_SUBSCRIBE_CURSOR_HEADER: &str = "x-codex-subscribe-cursor";
|
||||
const REMOTE_CONTROL_WEBSOCKET_PING_INTERVAL: std::time::Duration =
|
||||
@@ -85,17 +89,29 @@ impl BoundedOutboundBuffer {
|
||||
self.used_tx.send_modify(|used| *used += 1);
|
||||
}
|
||||
|
||||
fn ack(&mut self, client_id: &ClientId, stream_id: &StreamId, acked_seq_id: u64) {
|
||||
fn ack(
|
||||
&mut self,
|
||||
client_id: &ClientId,
|
||||
stream_id: &StreamId,
|
||||
acked_seq_id: u64,
|
||||
acked_segment_id: Option<usize>,
|
||||
) {
|
||||
let key = (client_id.clone(), stream_id.clone());
|
||||
let Some(buffer) = self.buffer_by_stream.get_mut(&key) else {
|
||||
return;
|
||||
};
|
||||
while let Some(server_envelope) = buffer.front()
|
||||
&& server_envelope.seq_id <= acked_seq_id
|
||||
{
|
||||
buffer.pop_front();
|
||||
self.used_tx.send_modify(|used| *used -= 1);
|
||||
}
|
||||
let acked_cursor = (acked_seq_id, acked_segment_id.unwrap_or(usize::MAX));
|
||||
buffer.retain(|server_envelope| {
|
||||
let envelope_cursor = (
|
||||
server_envelope.seq_id,
|
||||
server_envelope.event.segment_id().unwrap_or_default(),
|
||||
);
|
||||
let is_acked = envelope_cursor <= acked_cursor;
|
||||
if is_acked {
|
||||
self.used_tx.send_modify(|used| *used -= 1);
|
||||
}
|
||||
!is_acked
|
||||
});
|
||||
if buffer.is_empty() {
|
||||
self.buffer_by_stream.remove(&key);
|
||||
}
|
||||
@@ -112,6 +128,88 @@ struct WebsocketState {
|
||||
outbound_buffer: BoundedOutboundBuffer,
|
||||
subscribe_cursor: Option<String>,
|
||||
next_seq_id_by_stream: HashMap<(ClientId, StreamId), u64>,
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap<(ClientId, Option<StreamId>), u64>,
|
||||
client_segment_reassembler: ClientSegmentReassembler,
|
||||
}
|
||||
|
||||
impl WebsocketState {
|
||||
fn observe_client_message(
|
||||
&mut self,
|
||||
client_envelope: ClientEnvelope,
|
||||
wire_size_bytes: usize,
|
||||
) -> ClientSegmentObservation {
|
||||
let client_message_key = Self::client_message_key(&client_envelope);
|
||||
if let Some((key, seq_id)) = client_message_key.as_ref()
|
||||
&& self
|
||||
.last_completed_client_chunk_seq_id_by_stream
|
||||
.get(key)
|
||||
.is_some_and(|last_seq_id| last_seq_id >= seq_id)
|
||||
{
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
if let (
|
||||
Some((_, seq_id)),
|
||||
Some(stream_id),
|
||||
ClientEvent::ClientMessageChunk { segment_id, .. },
|
||||
) = (
|
||||
client_message_key.as_ref(),
|
||||
client_envelope.stream_id.as_ref(),
|
||||
&client_envelope.event,
|
||||
) && self.client_segment_reassembler.should_ignore_chunk(
|
||||
&client_envelope.client_id,
|
||||
stream_id,
|
||||
*seq_id,
|
||||
*segment_id,
|
||||
) {
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
if client_message_key.is_some() && wire_size_bytes > REMOTE_CONTROL_SEGMENT_MAX_BYTES {
|
||||
warn!(
|
||||
client_id = client_envelope.client_id.0.as_str(),
|
||||
"dropping oversized segmented remote-control client envelope"
|
||||
);
|
||||
if let Some(stream_id) = client_envelope.stream_id.as_ref() {
|
||||
self.client_segment_reassembler
|
||||
.invalidate_stream(&client_envelope.client_id, stream_id);
|
||||
}
|
||||
return ClientSegmentObservation::Dropped;
|
||||
}
|
||||
|
||||
let observation = self.client_segment_reassembler.observe(client_envelope);
|
||||
if matches!(observation, ClientSegmentObservation::Forward(_))
|
||||
&& let Some((key, seq_id)) = client_message_key
|
||||
{
|
||||
self.last_completed_client_chunk_seq_id_by_stream
|
||||
.insert(key, seq_id);
|
||||
}
|
||||
observation
|
||||
}
|
||||
|
||||
fn invalidate_client_message_stream(&mut self, client_id: &ClientId, stream_id: &StreamId) {
|
||||
self.last_completed_client_chunk_seq_id_by_stream
|
||||
.remove(&(client_id.clone(), Some(stream_id.clone())));
|
||||
}
|
||||
|
||||
fn invalidate_client_message_client(&mut self, client_id: &ClientId) {
|
||||
self.last_completed_client_chunk_seq_id_by_stream
|
||||
.retain(|(cursor_client_id, _), _| cursor_client_id != client_id);
|
||||
}
|
||||
|
||||
fn client_message_key(
|
||||
client_envelope: &ClientEnvelope,
|
||||
) -> Option<((ClientId, Option<StreamId>), u64)> {
|
||||
let seq_id = match (&client_envelope.event, client_envelope.seq_id) {
|
||||
(ClientEvent::ClientMessageChunk { .. }, Some(seq_id)) => seq_id,
|
||||
_ => return None,
|
||||
};
|
||||
Some((
|
||||
(
|
||||
client_envelope.client_id.clone(),
|
||||
client_envelope.stream_id.clone(),
|
||||
),
|
||||
seq_id,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RemoteControlWebsocket {
|
||||
@@ -231,6 +329,8 @@ impl RemoteControlWebsocket {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
})),
|
||||
server_event_rx: Arc::new(Mutex::new(server_event_rx)),
|
||||
used_rx,
|
||||
@@ -556,7 +656,7 @@ impl RemoteControlWebsocket {
|
||||
}
|
||||
}
|
||||
};
|
||||
let (payload, write_complete_tx) = {
|
||||
let (payloads, write_complete_tx) = {
|
||||
let mut state = state.lock().await;
|
||||
let seq_key = (
|
||||
queued_server_envelope.client_id.clone(),
|
||||
@@ -573,29 +673,42 @@ impl RemoteControlWebsocket {
|
||||
seq_id,
|
||||
stream_id: queued_server_envelope.stream_id,
|
||||
};
|
||||
let payload = match serde_json::to_string(&server_envelope) {
|
||||
Ok(payload) => payload,
|
||||
let server_envelopes = match split_server_envelope_for_transport(server_envelope) {
|
||||
Ok(server_envelopes) => server_envelopes,
|
||||
Err(err) => {
|
||||
error!("failed to serialize remote-control server event: {err}");
|
||||
error!("failed to split remote-control server event: {err}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let mut payloads = Vec::with_capacity(server_envelopes.len());
|
||||
for server_envelope in server_envelopes {
|
||||
let payload = match serde_json::to_string(&server_envelope) {
|
||||
Ok(payload) => payload,
|
||||
Err(err) => {
|
||||
error!("failed to serialize remote-control server event: {err}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
state.outbound_buffer.insert(&server_envelope);
|
||||
payloads.push(payload);
|
||||
}
|
||||
state
|
||||
.next_seq_id_by_stream
|
||||
.insert(seq_key, seq_id.saturating_add(1));
|
||||
state.outbound_buffer.insert(&server_envelope);
|
||||
|
||||
(payload, queued_server_envelope.write_complete_tx)
|
||||
(payloads, queued_server_envelope.write_complete_tx)
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = shutdown_token.cancelled() => return Ok(()),
|
||||
send_result = websocket_writer.send(tungstenite::Message::Text(payload.into())) => {
|
||||
if let Err(err) = send_result {
|
||||
return Err(io::Error::other(err));
|
||||
for payload in payloads {
|
||||
tokio::select! {
|
||||
_ = shutdown_token.cancelled() => return Ok(()),
|
||||
send_result = websocket_writer.send(tungstenite::Message::Text(payload.into())) => {
|
||||
if let Err(err) = send_result {
|
||||
return Err(io::Error::other(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
if let Some(write_complete_tx) = write_complete_tx {
|
||||
let _ = write_complete_tx.send(());
|
||||
}
|
||||
@@ -657,11 +770,30 @@ impl RemoteControlWebsocket {
|
||||
if client_tracker.close_client(&client_key).await.is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
state
|
||||
.lock()
|
||||
.await
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_key.0, &client_key.1);
|
||||
state
|
||||
.lock()
|
||||
.await
|
||||
.invalidate_client_message_stream(&client_key.0, &client_key.1);
|
||||
continue;
|
||||
}
|
||||
_ = idle_sweep_interval.tick() => {
|
||||
if client_tracker.close_expired_clients().await.is_err() {
|
||||
return Ok(());
|
||||
match client_tracker.close_expired_clients().await {
|
||||
Ok(client_keys) => {
|
||||
let mut websocket_state = state.lock().await;
|
||||
for (client_id, stream_id) in client_keys {
|
||||
websocket_state
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_id, &stream_id);
|
||||
websocket_state
|
||||
.invalidate_client_message_stream(&client_id, &stream_id);
|
||||
}
|
||||
}
|
||||
Err(_) => return Ok(()),
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -672,10 +804,11 @@ impl RemoteControlWebsocket {
|
||||
}
|
||||
}
|
||||
};
|
||||
let client_envelope = match incoming_message {
|
||||
let (client_envelope, wire_size_bytes) = match incoming_message {
|
||||
Ok(tungstenite::Message::Text(text)) => {
|
||||
let wire_size_bytes = text.len();
|
||||
match serde_json::from_str::<ClientEnvelope>(&text) {
|
||||
Ok(client_envelope) => client_envelope,
|
||||
Ok(client_envelope) => (client_envelope, wire_size_bytes),
|
||||
Err(err) => {
|
||||
warn!("failed to deserialize remote-control client event: {err}");
|
||||
continue;
|
||||
@@ -707,12 +840,21 @@ impl RemoteControlWebsocket {
|
||||
}
|
||||
};
|
||||
|
||||
let observation = {
|
||||
let mut websocket_state = state.lock().await;
|
||||
websocket_state.observe_client_message(client_envelope, wire_size_bytes)
|
||||
};
|
||||
let client_envelope = match observation {
|
||||
ClientSegmentObservation::Forward(client_envelope) => *client_envelope,
|
||||
ClientSegmentObservation::Pending | ClientSegmentObservation::Dropped => continue,
|
||||
};
|
||||
|
||||
{
|
||||
let mut websocket_state = state.lock().await;
|
||||
if let Some(cursor) = client_envelope.cursor.as_deref() {
|
||||
websocket_state.subscribe_cursor = Some(cursor.to_string());
|
||||
}
|
||||
if let ClientEvent::Ack = &client_envelope.event
|
||||
if let ClientEvent::Ack { segment_id } = &client_envelope.event
|
||||
&& let Some(acked_seq_id) = client_envelope.seq_id
|
||||
&& let Some(stream_id) = client_envelope.stream_id.as_ref()
|
||||
{
|
||||
@@ -720,10 +862,18 @@ impl RemoteControlWebsocket {
|
||||
&client_envelope.client_id,
|
||||
stream_id,
|
||||
acked_seq_id,
|
||||
*segment_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let closed_client =
|
||||
matches!(&client_envelope.event, ClientEvent::ClientClosed).then(|| {
|
||||
(
|
||||
client_envelope.client_id.clone(),
|
||||
client_envelope.stream_id.clone(),
|
||||
)
|
||||
});
|
||||
if client_tracker
|
||||
.handle_message(client_envelope)
|
||||
.await
|
||||
@@ -731,6 +881,20 @@ impl RemoteControlWebsocket {
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
if let Some((client_id, stream_id)) = closed_client {
|
||||
let mut websocket_state = state.lock().await;
|
||||
if let Some(stream_id) = stream_id {
|
||||
websocket_state
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_id, &stream_id);
|
||||
websocket_state.invalidate_client_message_stream(&client_id, &stream_id);
|
||||
} else {
|
||||
websocket_state
|
||||
.client_segment_reassembler
|
||||
.invalidate_client(&client_id);
|
||||
websocket_state.invalidate_client_message_client(&client_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1052,6 +1216,8 @@ mod tests {
|
||||
use chrono::Utc;
|
||||
use codex_app_server_protocol::AuthMode;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::JSONRPCNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_config::types::AuthCredentialsStoreMode;
|
||||
use codex_core::test_support::auth_manager_from_auth;
|
||||
@@ -1603,6 +1769,8 @@ mod tests {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
}));
|
||||
let (_server_event_tx, server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY);
|
||||
let server_event_rx = Arc::new(Mutex::new(server_event_rx));
|
||||
@@ -1639,6 +1807,8 @@ mod tests {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
}));
|
||||
let (server_event_tx, server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY);
|
||||
let server_event_rx = Arc::new(Mutex::new(server_event_rx));
|
||||
@@ -1716,6 +1886,8 @@ mod tests {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
}));
|
||||
let (server_event_tx, _server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY);
|
||||
let (transport_event_tx, _transport_event_rx) =
|
||||
@@ -1771,7 +1943,9 @@ mod tests {
|
||||
"first-client-new-stream",
|
||||
));
|
||||
|
||||
outbound_buffer.ack(&client_1, &stream_1, /*acked_seq_id*/ 3);
|
||||
outbound_buffer.ack(
|
||||
&client_1, &stream_1, /*acked_seq_id*/ 3, /*acked_segment_id*/ None,
|
||||
);
|
||||
|
||||
let mut retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
@@ -1814,7 +1988,9 @@ mod tests {
|
||||
&client_2, "stream-1", /*seq_id*/ 3, "second",
|
||||
));
|
||||
|
||||
outbound_buffer.ack(&client_1, &stream_1, /*acked_seq_id*/ 1);
|
||||
outbound_buffer.ack(
|
||||
&client_1, &stream_1, /*acked_seq_id*/ 1, /*acked_segment_id*/ None,
|
||||
);
|
||||
|
||||
let mut retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
@@ -1834,6 +2010,390 @@ mod tests {
|
||||
assert_eq!(*used_rx.borrow(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn outbound_buffer_advances_segmented_acks_by_wire_cursor() {
|
||||
let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new();
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
));
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
));
|
||||
|
||||
outbound_buffer.ack(
|
||||
&client_id,
|
||||
&stream_id,
|
||||
/*acked_seq_id*/ 4,
|
||||
/*acked_segment_id*/ Some(1),
|
||||
);
|
||||
|
||||
let retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
.map(|server_envelope| server_envelope.event.segment_id())
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(retained, Vec::<Option<usize>>::new());
|
||||
assert_eq!(*used_rx.borrow(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn outbound_buffer_treats_segmentless_acks_as_seq_level_acks() {
|
||||
let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new();
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
));
|
||||
outbound_buffer.insert(&server_chunk_envelope(
|
||||
&client_id, "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
));
|
||||
|
||||
outbound_buffer.ack(
|
||||
&client_id, &stream_id, /*acked_seq_id*/ 4, /*acked_segment_id*/ None,
|
||||
);
|
||||
|
||||
let retained = outbound_buffer
|
||||
.server_envelopes()
|
||||
.map(|server_envelope| server_envelope.event.segment_id())
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(retained, Vec::<Option<usize>>::new());
|
||||
assert_eq!(*used_rx.borrow(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_drops_duplicate_client_chunks_while_pending() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"y",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_drops_replayed_client_chunks_after_completion() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 4,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 4,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_allows_replay_after_rejected_out_of_order_chunk() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"y",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_allows_replay_after_later_chunk_drops() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
);
|
||||
let invalid_second_chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 1,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk.clone()),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, invalid_second_chunk),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_drops_oversized_client_chunk_frames() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let chunk = client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 1, /*message_size_bytes*/ 1, b"x",
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
state.observe_client_message(chunk, REMOTE_CONTROL_SEGMENT_MAX_BYTES + 1),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_ignores_oversized_stale_chunks_without_dropping_newer_assembly() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let first_newer_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let oversized_stale_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 7,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let second_newer_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_newer_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
state.observe_client_message(
|
||||
oversized_stale_chunk,
|
||||
REMOTE_CONTROL_SEGMENT_MAX_BYTES + 1,
|
||||
),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_newer_chunk),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_ignores_oversized_duplicate_chunks_without_dropping_current_assembly() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let message = JSONRPCMessage::Notification(JSONRPCNotification {
|
||||
method: "initialized".to_string(),
|
||||
params: None,
|
||||
});
|
||||
let raw = serde_json::to_vec(&message).expect("message should serialize");
|
||||
let split = raw.len() / 2;
|
||||
let first_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let oversized_duplicate_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 0,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[..split],
|
||||
);
|
||||
let second_chunk = client_chunk_envelope(
|
||||
"client-1",
|
||||
"stream-1",
|
||||
/*seq_id*/ 8,
|
||||
/*segment_id*/ 1,
|
||||
/*segment_count*/ 2,
|
||||
raw.len(),
|
||||
&raw[split..],
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, first_chunk),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
assert!(matches!(
|
||||
state.observe_client_message(
|
||||
oversized_duplicate_chunk,
|
||||
REMOTE_CONTROL_SEGMENT_MAX_BYTES + 1,
|
||||
),
|
||||
ClientSegmentObservation::Dropped
|
||||
));
|
||||
assert!(matches!(
|
||||
observe_client_message(&mut state, second_chunk),
|
||||
ClientSegmentObservation::Forward(_)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn websocket_state_clears_chunk_cursor_when_stream_is_invalidated() {
|
||||
let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new();
|
||||
let mut state = WebsocketState {
|
||||
outbound_buffer,
|
||||
subscribe_cursor: None,
|
||||
next_seq_id_by_stream: HashMap::new(),
|
||||
last_completed_client_chunk_seq_id_by_stream: HashMap::new(),
|
||||
client_segment_reassembler: ClientSegmentReassembler::default(),
|
||||
};
|
||||
let client_id = ClientId("client-1".to_string());
|
||||
let stream_id = StreamId("stream-1".to_string());
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(
|
||||
&mut state,
|
||||
client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 4, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
)
|
||||
),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
state.invalidate_client_message_stream(&client_id, &stream_id);
|
||||
state
|
||||
.client_segment_reassembler
|
||||
.invalidate_stream(&client_id, &stream_id);
|
||||
|
||||
assert!(matches!(
|
||||
observe_client_message(
|
||||
&mut state,
|
||||
client_chunk_envelope(
|
||||
"client-1", "stream-1", /*seq_id*/ 1, /*segment_id*/ 0,
|
||||
/*segment_count*/ 2, /*message_size_bytes*/ 2, b"x",
|
||||
)
|
||||
),
|
||||
ClientSegmentObservation::Pending
|
||||
));
|
||||
}
|
||||
|
||||
fn server_envelope(
|
||||
client_id: &ClientId,
|
||||
stream_id: &str,
|
||||
@@ -1857,6 +2417,58 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn server_chunk_envelope(
|
||||
client_id: &ClientId,
|
||||
stream_id: &str,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
) -> ServerEnvelope {
|
||||
ServerEnvelope {
|
||||
event: ServerEvent::ServerMessageChunk {
|
||||
segment_id,
|
||||
segment_count: 2,
|
||||
message_size_bytes: 2,
|
||||
message_chunk_base64: String::new(),
|
||||
},
|
||||
client_id: client_id.clone(),
|
||||
stream_id: StreamId(stream_id.to_string()),
|
||||
seq_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn client_chunk_envelope(
|
||||
client_id: &str,
|
||||
stream_id: &str,
|
||||
seq_id: u64,
|
||||
segment_id: usize,
|
||||
segment_count: usize,
|
||||
message_size_bytes: usize,
|
||||
chunk: &[u8],
|
||||
) -> ClientEnvelope {
|
||||
ClientEnvelope {
|
||||
event: ClientEvent::ClientMessageChunk {
|
||||
segment_id,
|
||||
segment_count,
|
||||
message_size_bytes,
|
||||
message_chunk_base64: base64::engine::general_purpose::STANDARD.encode(chunk),
|
||||
},
|
||||
client_id: ClientId(client_id.to_string()),
|
||||
stream_id: Some(StreamId(stream_id.to_string())),
|
||||
seq_id: Some(seq_id),
|
||||
cursor: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn observe_client_message(
|
||||
state: &mut WebsocketState,
|
||||
envelope: ClientEnvelope,
|
||||
) -> ClientSegmentObservation {
|
||||
let wire_size_bytes = serde_json::to_vec(&envelope)
|
||||
.expect("client envelope should serialize")
|
||||
.len();
|
||||
state.observe_client_message(envelope, wire_size_bytes)
|
||||
}
|
||||
|
||||
async fn accept_http_request(listener: &TcpListener) -> (TcpStream, String) {
|
||||
let (stream, _) = timeout(TEST_HTTP_ACCEPT_TIMEOUT, listener.accept())
|
||||
.await
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user