mirror of
https://github.com/openai/codex.git
synced 2026-03-02 12:43:18 +00:00
Compare commits
131 Commits
pr12442
...
codex/dire
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
925bd22dbc | ||
|
|
f26a3c8e74 | ||
|
|
68ec43d213 | ||
|
|
5dabc52e73 | ||
|
|
02525c193a | ||
|
|
0f570e20d5 | ||
|
|
9a393c9b6f | ||
|
|
8362b79cb4 | ||
|
|
01f25a7b96 | ||
|
|
bccce0d75f | ||
|
|
8d49e0d0c4 | ||
|
|
e4bfa763f6 | ||
|
|
5441130e0a | ||
|
|
5a9a5b51b2 | ||
|
|
bcd6e68054 | ||
|
|
93efcfd50d | ||
|
|
6d6570d89d | ||
|
|
f46b767b7e | ||
|
|
a046849438 | ||
|
|
10c04e11b8 | ||
|
|
6a3233da64 | ||
|
|
c4ec6be4ab | ||
|
|
59398125f6 | ||
|
|
c086b36b58 | ||
|
|
9501669a24 | ||
|
|
ddfa032eb8 | ||
|
|
6cb2f02ef8 | ||
|
|
1151972fb2 | ||
|
|
8f3f2c3c02 | ||
|
|
16ca527c80 | ||
|
|
e6bb5d8553 | ||
|
|
125fbec317 | ||
|
|
74e112ea09 | ||
|
|
e88f74d140 | ||
|
|
448fb6ac22 | ||
|
|
63c2ac96cd | ||
|
|
5163850025 | ||
|
|
3d356723c4 | ||
|
|
8da40c9251 | ||
|
|
5571a022eb | ||
|
|
ee1520e79e | ||
|
|
61cd3a9700 | ||
|
|
fefdc03b25 | ||
|
|
dcab40123f | ||
|
|
bd192b54cd | ||
|
|
b6ab2214e3 | ||
|
|
3b5fc7547e | ||
|
|
daf0f03ac8 | ||
|
|
061d1d3b5e | ||
|
|
67d9261e2c | ||
|
|
0b6c2e5652 | ||
|
|
74cebceed7 | ||
|
|
3ca0e7673b | ||
|
|
8d3d58f992 | ||
|
|
ca556fa313 | ||
|
|
f6053fdfb3 | ||
|
|
9a8adbf6e5 | ||
|
|
97d0068658 | ||
|
|
0679e70bfc | ||
|
|
3fe365ad8a | ||
|
|
8758db5d5b | ||
|
|
15f6cfb047 | ||
|
|
68a7d98363 | ||
|
|
7e46e5b9c2 | ||
|
|
58763afa0f | ||
|
|
a4076ab4b1 | ||
|
|
09a82f364f | ||
|
|
fbeda61cc3 | ||
|
|
c3048ff90a | ||
|
|
af215eb390 | ||
|
|
38f84b6b29 | ||
|
|
5a3bdcb27b | ||
|
|
d580995957 | ||
|
|
10a3adad8e | ||
|
|
855e275591 | ||
|
|
50953ea39a | ||
|
|
cd5acf6af7 | ||
|
|
be4203023d | ||
|
|
48e08a1561 | ||
|
|
bfe622f495 | ||
|
|
7f75e74201 | ||
|
|
fec517cd38 | ||
|
|
5c52ef8e60 | ||
|
|
3cea3e665e | ||
|
|
5221575f23 | ||
|
|
a606e85859 | ||
|
|
6e60f724bc | ||
|
|
3b6c50d925 | ||
|
|
eace7c6610 | ||
|
|
2119532a81 | ||
|
|
862a5b3eb3 | ||
|
|
e8709bc11a | ||
|
|
764ac9449f | ||
|
|
cf0210bf22 | ||
|
|
829d1080f6 | ||
|
|
9d826a20c6 | ||
|
|
6fbf19ef5f | ||
|
|
2b9d0c385f | ||
|
|
cfcbff4c48 | ||
|
|
8e9312958d | ||
|
|
956f2f439e | ||
|
|
335a4e1cbc | ||
|
|
e8949f4507 | ||
|
|
7e569f1162 | ||
|
|
d5fef5c190 | ||
|
|
5684c82e45 | ||
|
|
e00fa19328 | ||
|
|
2ada9e1b2d | ||
|
|
0a0caa9df2 | ||
|
|
4666a6e631 | ||
|
|
55fc075723 | ||
|
|
85b00ae8de | ||
|
|
82d3c9ed76 | ||
|
|
37610240ec | ||
|
|
c4f1af7a86 | ||
|
|
1dad0a7f4a | ||
|
|
b73c4b50a2 | ||
|
|
5e505ff877 | ||
|
|
031d701705 | ||
|
|
2ba2c57af4 | ||
|
|
66d5d34e6e | ||
|
|
f33ac830aa | ||
|
|
3586fcb802 | ||
|
|
b17148f13a | ||
|
|
a6b2bacb5b | ||
|
|
5a635f3427 | ||
|
|
b3202cbd58 | ||
|
|
e7b6f38b58 | ||
|
|
f5d7a74568 | ||
|
|
85ce91a5b3 | ||
|
|
2fe4be1aa9 |
185
.codex/skills/babysit-pr/SKILL.md
Normal file
185
.codex/skills/babysit-pr/SKILL.md
Normal file
@@ -0,0 +1,185 @@
|
||||
---
|
||||
name: babysit-pr
|
||||
description: Babysit a GitHub pull request after creation by continuously polling CI checks/workflow runs, new review comments, and mergeability state until the PR is ready to merge (or merged/closed). Diagnose failures, retry likely flaky failures up to 3 times, auto-fix/push branch-related issues when appropriate, and stop only when user help is required (for example CI infrastructure issues, exhausted flaky retries, or ambiguous/blocking situations). Use when the user asks Codex to monitor a PR, watch CI, handle review comments, or keep an eye on failures and feedback on an open PR.
|
||||
---
|
||||
|
||||
# PR Babysitter
|
||||
|
||||
## Objective
|
||||
Babysit a PR persistently until one of these terminal outcomes occurs:
|
||||
|
||||
- The PR is merged or closed.
|
||||
- CI is successful, there are no unaddressed review comments surfaced by the watcher, required review approval is not blocking merge, and there are no potential merge conflicts (PR is mergeable / not reporting conflict risk).
|
||||
- A situation requires user help (for example CI infrastructure issues, repeated flaky failures after retry budget is exhausted, permission problems, or ambiguity that cannot be resolved safely).
|
||||
|
||||
Do not stop merely because a single snapshot returns `idle` while checks are still pending.
|
||||
|
||||
## Inputs
|
||||
Accept any of the following:
|
||||
|
||||
- No PR argument: infer the PR from the current branch (`--pr auto`)
|
||||
- PR number
|
||||
- PR URL
|
||||
|
||||
## Core Workflow
|
||||
|
||||
1. When the user asks to "monitor"/"watch"/"babysit" a PR, start with the watcher's continuous mode (`--watch`) unless you are intentionally doing a one-shot diagnostic snapshot.
|
||||
2. Run the watcher script to snapshot PR/CI/review state (or consume each streamed snapshot from `--watch`).
|
||||
3. Inspect the `actions` list in the JSON response.
|
||||
4. If `diagnose_ci_failure` is present, inspect failed run logs and classify the failure.
|
||||
5. If the failure is likely caused by the current branch, patch code locally, commit, and push.
|
||||
6. If `process_review_comment` is present, inspect surfaced review items and decide whether to address them.
|
||||
7. If a review item is actionable and correct, patch code locally, commit, and push.
|
||||
8. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
|
||||
9. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
|
||||
10. On every loop, verify mergeability / merge-conflict status (for example via `gh pr view`) in addition to CI and review state.
|
||||
11. After any push or rerun action, immediately return to step 1 and continue polling on the updated SHA/state.
|
||||
12. If you had been using `--watch` before pausing to patch/commit/push, relaunch `--watch` yourself in the same turn immediately after the push (do not wait for the user to re-invoke the skill).
|
||||
13. Repeat polling until the PR is green + review-clean + mergeable, `stop_pr_closed` appears, or a user-help-required blocker is reached.
|
||||
14. Maintain terminal/session ownership: while babysitting is active, keep consuming watcher output in the same turn; do not leave a detached `--watch` process running and then end the turn as if monitoring were complete.
|
||||
|
||||
## Commands
|
||||
|
||||
### One-shot snapshot
|
||||
|
||||
```bash
|
||||
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr auto --once
|
||||
```
|
||||
|
||||
### Continuous watch (JSONL)
|
||||
|
||||
```bash
|
||||
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr auto --watch
|
||||
```
|
||||
|
||||
### Trigger flaky retry cycle (only when watcher indicates)
|
||||
|
||||
```bash
|
||||
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr auto --retry-failed-now
|
||||
```
|
||||
|
||||
### Explicit PR target
|
||||
|
||||
```bash
|
||||
python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr <number-or-url> --once
|
||||
```
|
||||
|
||||
## CI Failure Classification
|
||||
Use `gh` commands to inspect failed runs before deciding to rerun.
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh run view <run-id> --log-failed`
|
||||
|
||||
Prefer treating failures as branch-related when logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
|
||||
|
||||
Prefer treating failures as flaky/unrelated when logs show transient infra/external issues (timeouts, runner provisioning failures, registry/network outages, GitHub Actions infra errors).
|
||||
|
||||
If classification is ambiguous, perform one manual diagnosis attempt before choosing rerun.
|
||||
|
||||
Read `.codex/skills/babysit-pr/references/heuristics.md` for a concise checklist.
|
||||
|
||||
## Review Comment Handling
|
||||
The watcher surfaces review items from:
|
||||
|
||||
- PR issue comments
|
||||
- Inline review comments
|
||||
- Review submissions (COMMENT / APPROVED / CHANGES_REQUESTED)
|
||||
|
||||
It intentionally surfaces Codex reviewer bot feedback (for example comments/reviews from `chatgpt-codex-connector[bot]`) in addition to human reviewer feedback. Most unrelated bot noise should still be ignored.
|
||||
For safety, the watcher only auto-surfaces trusted human review authors (for example repo OWNER/MEMBER/COLLABORATOR, plus the authenticated operator) and approved review bots such as Codex.
|
||||
On a fresh watcher state file, existing pending review feedback may be surfaced immediately (not only comments that arrive after monitoring starts). This is intentional so already-open review comments are not missed.
|
||||
|
||||
When you agree with a comment and it is actionable:
|
||||
|
||||
1. Patch code locally.
|
||||
2. Commit with `codex: address PR review feedback (#<n>)`.
|
||||
3. Push to the PR head branch.
|
||||
4. Resume watching on the new SHA immediately (do not stop after reporting the push).
|
||||
5. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
|
||||
|
||||
If you disagree or the comment is non-actionable/already addressed, record it as handled by continuing the watcher loop (the script de-duplicates surfaced items via state after surfacing them).
|
||||
If a code review comment/thread is already marked as resolved in GitHub, treat it as non-actionable and safely ignore it unless new unresolved follow-up feedback appears.
|
||||
|
||||
## Git Safety Rules
|
||||
|
||||
- Work only on the PR head branch.
|
||||
- Avoid destructive git commands.
|
||||
- Do not switch branches unless necessary to recover context.
|
||||
- Before editing, check for unrelated uncommitted changes. If present, stop and ask the user.
|
||||
- After each successful fix, commit and `git push`, then re-run the watcher.
|
||||
- If you interrupted a live `--watch` session to make the fix, restart `--watch` immediately after the push in the same turn.
|
||||
- Do not run multiple concurrent `--watch` processes for the same PR/state file; keep one watcher session active and reuse it until it stops or you intentionally restart it.
|
||||
- A push is not a terminal outcome; continue the monitoring loop unless a strict stop condition is met.
|
||||
|
||||
Commit message defaults:
|
||||
|
||||
- `codex: fix CI failure on PR #<n>`
|
||||
- `codex: address PR review feedback (#<n>)`
|
||||
|
||||
## Monitoring Loop Pattern
|
||||
Use this loop in a live Codex session:
|
||||
|
||||
1. Run `--once`.
|
||||
2. Read `actions`.
|
||||
3. First check whether the PR is now merged or otherwise closed; if so, report that terminal state and stop polling immediately.
|
||||
4. Check CI summary, new review items, and mergeability/conflict status.
|
||||
5. Diagnose CI failures and classify branch-related vs flaky/unrelated.
|
||||
6. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
|
||||
7. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit.
|
||||
8. If you pushed a commit or triggered a rerun, report the action briefly and continue polling (do not stop).
|
||||
9. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
|
||||
10. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report success and stop.
|
||||
11. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.
|
||||
12. Otherwise sleep according to the polling cadence below and repeat.
|
||||
|
||||
When the user explicitly asks to monitor/watch/babysit a PR, prefer `--watch` so polling continues autonomously in one command. Use repeated `--once` snapshots only for debugging, local testing, or when the user explicitly asks for a one-shot check.
|
||||
Do not stop to ask the user whether to continue polling; continue autonomously until a strict stop condition is met or the user explicitly interrupts.
|
||||
Do not hand control back to the user after a review-fix push just because a new SHA was created; restarting the watcher and re-entering the poll loop is part of the same babysitting task.
|
||||
If a `--watch` process is still running and no strict stop condition has been reached, the babysitting task is still in progress; keep streaming/consuming watcher output instead of ending the turn.
|
||||
|
||||
## Polling Cadence
|
||||
Use adaptive polling and continue monitoring even after CI turns green:
|
||||
|
||||
- While CI is not green (pending/running/queued or failing): poll every 1 minute.
|
||||
- After CI turns green: start at every 1 minute, then back off exponentially when there is no change (for example 1m, 2m, 4m, 8m, 16m, 32m), capping at every 1 hour.
|
||||
- Reset the green-state polling interval back to 1 minute whenever anything changes (new commit/SHA, check status changes, new review comments, mergeability changes, review decision changes).
|
||||
- If CI stops being green again (new commit, rerun, or regression): return to 1-minute polling.
|
||||
- If any poll shows the PR is merged or otherwise closed: stop polling immediately and report the terminal state.
|
||||
|
||||
## Stop Conditions (Strict)
|
||||
Stop only when one of the following is true:
|
||||
|
||||
- PR merged or closed (stop as soon as a poll/snapshot confirms this).
|
||||
- PR is ready to merge: CI succeeded, no surfaced unaddressed review comments, not blocked on required review approval, and no merge conflict risk.
|
||||
- User intervention is required and Codex cannot safely proceed alone.
|
||||
|
||||
Keep polling when:
|
||||
|
||||
- `actions` contains only `idle` but checks are still pending.
|
||||
- CI is still running/queued.
|
||||
- Review state is quiet but CI is not terminal.
|
||||
- CI is green but mergeability is unknown/pending.
|
||||
- CI is green and mergeable, but the PR is still open and you are waiting for possible new review comments or merge-conflict changes per the green-state cadence.
|
||||
- The PR is green but blocked on review approval (`REVIEW_REQUIRED` / similar); continue polling on the green-state cadence and surface any new review comments without asking for confirmation to keep watching.
|
||||
|
||||
## Output Expectations
|
||||
Provide concise progress updates while monitoring and a final summary that includes:
|
||||
|
||||
- During long unchanged monitoring periods, avoid emitting a full update on every poll; summarize only status changes plus occasional heartbeat updates.
|
||||
- Treat push confirmations, intermediate CI snapshots, and review-action updates as progress updates only; do not emit the final summary or end the babysitting session unless a strict stop condition is met.
|
||||
- A user request to "monitor" is not satisfied by a couple of sample polls; remain in the loop until a strict stop condition or an explicit user interruption.
|
||||
- A review-fix commit + push is not a completion event; immediately resume live monitoring (`--watch`) in the same turn and continue reporting progress updates.
|
||||
- When CI first transitions to all green for the current SHA, emit a one-time celebratory progress update (do not repeat it on every green poll). Preferred style: `🚀 CI is all green! 33/33 passed. Still on watch for review approval.`
|
||||
- Do not send the final summary while a watcher terminal is still running unless the watcher has emitted/confirmed a strict stop condition; otherwise continue with progress updates.
|
||||
|
||||
- Final PR SHA
|
||||
- CI status summary
|
||||
- Mergeability / conflict status
|
||||
- Fixes pushed
|
||||
- Flaky retry cycles used
|
||||
- Remaining unresolved failures or review comments
|
||||
|
||||
## References
|
||||
|
||||
- Heuristics and decision tree: `.codex/skills/babysit-pr/references/heuristics.md`
|
||||
- GitHub CLI/API details used by the watcher: `.codex/skills/babysit-pr/references/github-api-notes.md`
|
||||
4
.codex/skills/babysit-pr/agents/openai.yaml
Normal file
4
.codex/skills/babysit-pr/agents/openai.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
interface:
|
||||
display_name: "PR Babysitter"
|
||||
short_description: "Watch PR CI, reviews, and merge conflicts"
|
||||
default_prompt: "Babysit the current PR: monitor CI, reviewer comments, and merge-conflict status (prefer the watcher’s --watch mode for live monitoring); fix valid issues, push updates, and rerun flaky failures up to 3 times. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Continue polling autonomously after any push/rerun until a strict terminal stop condition is reached or the user interrupts."
|
||||
72
.codex/skills/babysit-pr/references/github-api-notes.md
Normal file
72
.codex/skills/babysit-pr/references/github-api-notes.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# GitHub CLI / API Notes For `babysit-pr`
|
||||
|
||||
## Primary commands used
|
||||
|
||||
### PR metadata
|
||||
|
||||
- `gh pr view --json number,url,state,mergedAt,closedAt,headRefName,headRefOid,headRepository,headRepositoryOwner`
|
||||
|
||||
Used to resolve PR number, URL, branch, head SHA, and closed/merged state.
|
||||
|
||||
### PR checks summary
|
||||
|
||||
- `gh pr checks --json name,state,bucket,link,workflow,event,startedAt,completedAt`
|
||||
|
||||
Used to compute pending/failed/passed counts and whether the current CI round is terminal.
|
||||
|
||||
### Workflow runs for head SHA
|
||||
|
||||
- `gh api repos/{owner}/{repo}/actions/runs -X GET -f head_sha=<sha> -f per_page=100`
|
||||
|
||||
Used to discover failed workflow runs and rerunnable run IDs.
|
||||
|
||||
### Failed log inspection
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh run view <run-id> --log-failed`
|
||||
|
||||
Used by Codex to classify branch-related vs flaky/unrelated failures.
|
||||
|
||||
### Retry failed jobs only
|
||||
|
||||
- `gh run rerun <run-id> --failed`
|
||||
|
||||
Reruns only failed jobs (and dependencies) for a workflow run.
|
||||
|
||||
## Review-related endpoints
|
||||
|
||||
- Issue comments on PR:
|
||||
- `gh api repos/{owner}/{repo}/issues/<pr_number>/comments?per_page=100`
|
||||
- Inline PR review comments:
|
||||
- `gh api repos/{owner}/{repo}/pulls/<pr_number>/comments?per_page=100`
|
||||
- Review submissions:
|
||||
- `gh api repos/{owner}/{repo}/pulls/<pr_number>/reviews?per_page=100`
|
||||
|
||||
## JSON fields consumed by the watcher
|
||||
|
||||
### `gh pr view`
|
||||
|
||||
- `number`
|
||||
- `url`
|
||||
- `state`
|
||||
- `mergedAt`
|
||||
- `closedAt`
|
||||
- `headRefName`
|
||||
- `headRefOid`
|
||||
|
||||
### `gh pr checks`
|
||||
|
||||
- `bucket` (`pass`, `fail`, `pending`, `skipping`)
|
||||
- `state`
|
||||
- `name`
|
||||
- `workflow`
|
||||
- `link`
|
||||
|
||||
### Actions runs API (`workflow_runs[]`)
|
||||
|
||||
- `id`
|
||||
- `name`
|
||||
- `status`
|
||||
- `conclusion`
|
||||
- `html_url`
|
||||
- `head_sha`
|
||||
58
.codex/skills/babysit-pr/references/heuristics.md
Normal file
58
.codex/skills/babysit-pr/references/heuristics.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# CI / Review Heuristics
|
||||
|
||||
## CI classification checklist
|
||||
|
||||
Treat as **branch-related** when logs clearly indicate a regression caused by the PR branch:
|
||||
|
||||
- Compile/typecheck/lint failures in files or modules touched by the branch
|
||||
- Deterministic unit/integration test failures in changed areas
|
||||
- Snapshot output changes caused by UI/text changes in the branch
|
||||
- Static analysis violations introduced by the latest push
|
||||
- Build script/config changes in the PR causing a deterministic failure
|
||||
|
||||
Treat as **likely flaky or unrelated** when evidence points to transient or external issues:
|
||||
|
||||
- DNS/network/registry timeout errors while fetching dependencies
|
||||
- Runner image provisioning or startup failures
|
||||
- GitHub Actions infrastructure/service outages
|
||||
- Cloud/service rate limits or transient API outages
|
||||
- Non-deterministic failures in unrelated integration tests with known flake patterns
|
||||
|
||||
If uncertain, inspect failed logs once before choosing rerun.
|
||||
|
||||
## Decision tree (fix vs rerun vs stop)
|
||||
|
||||
1. If PR is merged/closed: stop.
|
||||
2. If there are failed checks:
|
||||
- Diagnose first.
|
||||
- If branch-related: fix locally, commit, push.
|
||||
- If likely flaky/unrelated and all checks for the current SHA are terminal: rerun failed jobs.
|
||||
- If checks are still pending: wait.
|
||||
3. If flaky reruns for the same SHA reach the configured limit (default 3): stop and report persistent failure.
|
||||
4. Independently, process any new human review comments.
|
||||
|
||||
## Review comment agreement criteria
|
||||
|
||||
Address the comment when:
|
||||
|
||||
- The comment is technically correct.
|
||||
- The change is actionable in the current branch.
|
||||
- The requested change does not conflict with the user’s intent or recent guidance.
|
||||
- The change can be made safely without unrelated refactors.
|
||||
|
||||
Do not auto-fix when:
|
||||
|
||||
- The comment is ambiguous and needs clarification.
|
||||
- The request conflicts with explicit user instructions.
|
||||
- The proposed change requires product/design decisions the user has not made.
|
||||
- The codebase is in a dirty/unrelated state that makes safe editing uncertain.
|
||||
|
||||
## Stop-and-ask conditions
|
||||
|
||||
Stop and ask the user instead of continuing automatically when:
|
||||
|
||||
- The local worktree has unrelated uncommitted changes.
|
||||
- `gh` auth/permissions fail.
|
||||
- The PR branch cannot be pushed.
|
||||
- CI failures persist after the flaky retry budget.
|
||||
- Reviewer feedback requires a product decision or cross-team coordination.
|
||||
805
.codex/skills/babysit-pr/scripts/gh_pr_watch.py
Executable file
805
.codex/skills/babysit-pr/scripts/gh_pr_watch.py
Executable file
@@ -0,0 +1,805 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Watch GitHub PR CI and review activity for Codex PR babysitting workflows."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
|
||||
FAILED_RUN_CONCLUSIONS = {
|
||||
"failure",
|
||||
"timed_out",
|
||||
"cancelled",
|
||||
"action_required",
|
||||
"startup_failure",
|
||||
"stale",
|
||||
}
|
||||
PENDING_CHECK_STATES = {
|
||||
"QUEUED",
|
||||
"IN_PROGRESS",
|
||||
"PENDING",
|
||||
"WAITING",
|
||||
"REQUESTED",
|
||||
}
|
||||
REVIEW_BOT_LOGIN_KEYWORDS = {
|
||||
"codex",
|
||||
}
|
||||
TRUSTED_AUTHOR_ASSOCIATIONS = {
|
||||
"OWNER",
|
||||
"MEMBER",
|
||||
"COLLABORATOR",
|
||||
}
|
||||
MERGE_BLOCKING_REVIEW_DECISIONS = {
|
||||
"REVIEW_REQUIRED",
|
||||
"CHANGES_REQUESTED",
|
||||
}
|
||||
MERGE_CONFLICT_OR_BLOCKING_STATES = {
|
||||
"BLOCKED",
|
||||
"DIRTY",
|
||||
"DRAFT",
|
||||
"UNKNOWN",
|
||||
}
|
||||
GREEN_STATE_MAX_POLL_SECONDS = 60 * 60
|
||||
|
||||
|
||||
class GhCommandError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Normalize PR/CI/review state for Codex PR babysitting and optionally "
|
||||
"trigger flaky reruns."
|
||||
)
|
||||
)
|
||||
parser.add_argument("--pr", default="auto", help="auto, PR number, or PR URL")
|
||||
parser.add_argument("--repo", help="Optional OWNER/REPO override")
|
||||
parser.add_argument("--poll-seconds", type=int, default=30, help="Watch poll interval")
|
||||
parser.add_argument(
|
||||
"--max-flaky-retries",
|
||||
type=int,
|
||||
default=3,
|
||||
help="Max rerun cycles per head SHA before stop recommendation",
|
||||
)
|
||||
parser.add_argument("--state-file", help="Path to state JSON file")
|
||||
parser.add_argument("--once", action="store_true", help="Emit one snapshot and exit")
|
||||
parser.add_argument("--watch", action="store_true", help="Continuously emit JSONL snapshots")
|
||||
parser.add_argument(
|
||||
"--retry-failed-now",
|
||||
action="store_true",
|
||||
help="Rerun failed jobs for current failed workflow runs when policy allows",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Emit machine-readable output (default behavior for --once and --retry-failed-now)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.poll_seconds <= 0:
|
||||
parser.error("--poll-seconds must be > 0")
|
||||
if args.max_flaky_retries < 0:
|
||||
parser.error("--max-flaky-retries must be >= 0")
|
||||
if args.watch and args.retry_failed_now:
|
||||
parser.error("--watch cannot be combined with --retry-failed-now")
|
||||
if not args.once and not args.watch and not args.retry_failed_now:
|
||||
args.once = True
|
||||
return args
|
||||
|
||||
|
||||
def _format_gh_error(cmd, err):
|
||||
stdout = (err.stdout or "").strip()
|
||||
stderr = (err.stderr or "").strip()
|
||||
parts = [f"GitHub CLI command failed: {' '.join(cmd)}"]
|
||||
if stdout:
|
||||
parts.append(f"stdout: {stdout}")
|
||||
if stderr:
|
||||
parts.append(f"stderr: {stderr}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def gh_text(args, repo=None):
|
||||
cmd = ["gh"]
|
||||
# `gh api` does not accept `-R/--repo` on all gh versions. The watcher's
|
||||
# API calls use explicit endpoints (e.g. repos/{owner}/{repo}/...), so the
|
||||
# repo flag is unnecessary there.
|
||||
if repo and (not args or args[0] != "api"):
|
||||
cmd.extend(["-R", repo])
|
||||
cmd.extend(args)
|
||||
try:
|
||||
proc = subprocess.run(cmd, check=True, capture_output=True, text=True)
|
||||
except FileNotFoundError as err:
|
||||
raise GhCommandError("`gh` command not found") from err
|
||||
except subprocess.CalledProcessError as err:
|
||||
raise GhCommandError(_format_gh_error(cmd, err)) from err
|
||||
return proc.stdout
|
||||
|
||||
|
||||
def gh_json(args, repo=None):
|
||||
raw = gh_text(args, repo=repo).strip()
|
||||
if not raw:
|
||||
return None
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError as err:
|
||||
raise GhCommandError(f"Failed to parse JSON from gh output for {' '.join(args)}") from err
|
||||
|
||||
|
||||
def parse_pr_spec(pr_spec):
|
||||
if pr_spec == "auto":
|
||||
return {"mode": "auto", "value": None}
|
||||
if re.fullmatch(r"\d+", pr_spec):
|
||||
return {"mode": "number", "value": pr_spec}
|
||||
parsed = urlparse(pr_spec)
|
||||
if parsed.scheme and parsed.netloc and "/pull/" in parsed.path:
|
||||
return {"mode": "url", "value": pr_spec}
|
||||
raise ValueError("--pr must be 'auto', a PR number, or a PR URL")
|
||||
|
||||
|
||||
def pr_view_fields():
|
||||
return (
|
||||
"number,url,state,mergedAt,closedAt,headRefName,headRefOid,"
|
||||
"headRepository,headRepositoryOwner,mergeable,mergeStateStatus,reviewDecision"
|
||||
)
|
||||
|
||||
|
||||
def checks_fields():
|
||||
return "name,state,bucket,link,workflow,event,startedAt,completedAt"
|
||||
|
||||
|
||||
def resolve_pr(pr_spec, repo_override=None):
|
||||
parsed = parse_pr_spec(pr_spec)
|
||||
cmd = ["pr", "view"]
|
||||
if parsed["value"] is not None:
|
||||
cmd.append(parsed["value"])
|
||||
cmd.extend(["--json", pr_view_fields()])
|
||||
data = gh_json(cmd, repo=repo_override)
|
||||
if not isinstance(data, dict):
|
||||
raise GhCommandError("Unexpected PR payload from `gh pr view`")
|
||||
|
||||
pr_url = str(data.get("url") or "")
|
||||
repo = (
|
||||
repo_override
|
||||
or extract_repo_from_pr_url(pr_url)
|
||||
or extract_repo_from_pr_view(data)
|
||||
)
|
||||
if not repo:
|
||||
raise GhCommandError("Unable to determine OWNER/REPO for the PR")
|
||||
|
||||
state = str(data.get("state") or "")
|
||||
merged = bool(data.get("mergedAt"))
|
||||
closed = bool(data.get("closedAt")) or state.upper() == "CLOSED"
|
||||
|
||||
return {
|
||||
"number": int(data["number"]),
|
||||
"url": pr_url,
|
||||
"repo": repo,
|
||||
"head_sha": str(data.get("headRefOid") or ""),
|
||||
"head_branch": str(data.get("headRefName") or ""),
|
||||
"state": state,
|
||||
"merged": merged,
|
||||
"closed": closed,
|
||||
"mergeable": str(data.get("mergeable") or ""),
|
||||
"merge_state_status": str(data.get("mergeStateStatus") or ""),
|
||||
"review_decision": str(data.get("reviewDecision") or ""),
|
||||
}
|
||||
|
||||
|
||||
def extract_repo_from_pr_view(data):
|
||||
head_repo = data.get("headRepository")
|
||||
head_owner = data.get("headRepositoryOwner")
|
||||
owner = None
|
||||
name = None
|
||||
if isinstance(head_owner, dict):
|
||||
owner = head_owner.get("login") or head_owner.get("name")
|
||||
elif isinstance(head_owner, str):
|
||||
owner = head_owner
|
||||
if isinstance(head_repo, dict):
|
||||
name = head_repo.get("name")
|
||||
repo_owner = head_repo.get("owner")
|
||||
if not owner and isinstance(repo_owner, dict):
|
||||
owner = repo_owner.get("login") or repo_owner.get("name")
|
||||
elif isinstance(head_repo, str):
|
||||
name = head_repo
|
||||
if owner and name:
|
||||
return f"{owner}/{name}"
|
||||
return None
|
||||
def extract_repo_from_pr_url(pr_url):
|
||||
parsed = urlparse(pr_url)
|
||||
parts = [p for p in parsed.path.split("/") if p]
|
||||
if len(parts) >= 4 and parts[2] == "pull":
|
||||
return f"{parts[0]}/{parts[1]}"
|
||||
return None
|
||||
|
||||
|
||||
def load_state(path):
|
||||
if path.exists():
|
||||
try:
|
||||
data = json.loads(path.read_text())
|
||||
except json.JSONDecodeError as err:
|
||||
raise RuntimeError(f"State file is not valid JSON: {path}") from err
|
||||
if not isinstance(data, dict):
|
||||
raise RuntimeError(f"State file must contain an object: {path}")
|
||||
return data, False
|
||||
return {
|
||||
"pr": {},
|
||||
"started_at": None,
|
||||
"last_seen_head_sha": None,
|
||||
"retries_by_sha": {},
|
||||
"seen_issue_comment_ids": [],
|
||||
"seen_review_comment_ids": [],
|
||||
"seen_review_ids": [],
|
||||
"last_snapshot_at": None,
|
||||
}, True
|
||||
|
||||
|
||||
def save_state(path, state):
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
payload = json.dumps(state, indent=2, sort_keys=True) + "\n"
|
||||
fd, tmp_name = tempfile.mkstemp(prefix=f"{path.name}.", suffix=".tmp", dir=path.parent)
|
||||
tmp_path = Path(tmp_name)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as tmp_file:
|
||||
tmp_file.write(payload)
|
||||
os.replace(tmp_path, path)
|
||||
except Exception:
|
||||
try:
|
||||
tmp_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def default_state_file_for(pr):
|
||||
repo_slug = pr["repo"].replace("/", "-")
|
||||
return Path(f"/tmp/codex-babysit-pr-{repo_slug}-pr{pr['number']}.json")
|
||||
|
||||
|
||||
def get_pr_checks(pr_spec, repo):
|
||||
parsed = parse_pr_spec(pr_spec)
|
||||
cmd = ["pr", "checks"]
|
||||
if parsed["value"] is not None:
|
||||
cmd.append(parsed["value"])
|
||||
cmd.extend(["--json", checks_fields()])
|
||||
data = gh_json(cmd, repo=repo)
|
||||
if data is None:
|
||||
return []
|
||||
if not isinstance(data, list):
|
||||
raise GhCommandError("Unexpected payload from `gh pr checks`")
|
||||
return data
|
||||
|
||||
|
||||
def is_pending_check(check):
|
||||
bucket = str(check.get("bucket") or "").lower()
|
||||
state = str(check.get("state") or "").upper()
|
||||
return bucket == "pending" or state in PENDING_CHECK_STATES
|
||||
|
||||
|
||||
def summarize_checks(checks):
|
||||
pending_count = 0
|
||||
failed_count = 0
|
||||
passed_count = 0
|
||||
for check in checks:
|
||||
bucket = str(check.get("bucket") or "").lower()
|
||||
if is_pending_check(check):
|
||||
pending_count += 1
|
||||
if bucket == "fail":
|
||||
failed_count += 1
|
||||
if bucket == "pass":
|
||||
passed_count += 1
|
||||
return {
|
||||
"pending_count": pending_count,
|
||||
"failed_count": failed_count,
|
||||
"passed_count": passed_count,
|
||||
"all_terminal": pending_count == 0,
|
||||
}
|
||||
|
||||
|
||||
def get_workflow_runs_for_sha(repo, head_sha):
|
||||
endpoint = f"repos/{repo}/actions/runs"
|
||||
data = gh_json(
|
||||
["api", endpoint, "-X", "GET", "-f", f"head_sha={head_sha}", "-f", "per_page=100"],
|
||||
repo=repo,
|
||||
)
|
||||
if not isinstance(data, dict):
|
||||
raise GhCommandError("Unexpected payload from actions runs API")
|
||||
runs = data.get("workflow_runs") or []
|
||||
if not isinstance(runs, list):
|
||||
raise GhCommandError("Expected `workflow_runs` to be a list")
|
||||
return runs
|
||||
|
||||
|
||||
def failed_runs_from_workflow_runs(runs, head_sha):
|
||||
failed_runs = []
|
||||
for run in runs:
|
||||
if not isinstance(run, dict):
|
||||
continue
|
||||
if str(run.get("head_sha") or "") != head_sha:
|
||||
continue
|
||||
conclusion = str(run.get("conclusion") or "")
|
||||
if conclusion not in FAILED_RUN_CONCLUSIONS:
|
||||
continue
|
||||
failed_runs.append(
|
||||
{
|
||||
"run_id": run.get("id"),
|
||||
"workflow_name": run.get("name") or run.get("display_title") or "",
|
||||
"status": str(run.get("status") or ""),
|
||||
"conclusion": conclusion,
|
||||
"html_url": str(run.get("html_url") or ""),
|
||||
}
|
||||
)
|
||||
failed_runs.sort(key=lambda item: (str(item.get("workflow_name") or ""), str(item.get("run_id") or "")))
|
||||
return failed_runs
|
||||
|
||||
|
||||
def get_authenticated_login():
|
||||
data = gh_json(["api", "user"])
|
||||
if not isinstance(data, dict) or not data.get("login"):
|
||||
raise GhCommandError("Unable to determine authenticated GitHub login from `gh api user`")
|
||||
return str(data["login"])
|
||||
|
||||
|
||||
def comment_endpoints(repo, pr_number):
|
||||
return {
|
||||
"issue_comment": f"repos/{repo}/issues/{pr_number}/comments",
|
||||
"review_comment": f"repos/{repo}/pulls/{pr_number}/comments",
|
||||
"review": f"repos/{repo}/pulls/{pr_number}/reviews",
|
||||
}
|
||||
|
||||
|
||||
def gh_api_list_paginated(endpoint, repo=None, per_page=100):
|
||||
items = []
|
||||
page = 1
|
||||
while True:
|
||||
sep = "&" if "?" in endpoint else "?"
|
||||
page_endpoint = f"{endpoint}{sep}per_page={per_page}&page={page}"
|
||||
payload = gh_json(["api", page_endpoint], repo=repo)
|
||||
if payload is None:
|
||||
break
|
||||
if not isinstance(payload, list):
|
||||
raise GhCommandError(f"Unexpected paginated payload from gh api {endpoint}")
|
||||
items.extend(payload)
|
||||
if len(payload) < per_page:
|
||||
break
|
||||
page += 1
|
||||
return items
|
||||
|
||||
|
||||
def normalize_issue_comments(items):
|
||||
out = []
|
||||
for item in items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
out.append(
|
||||
{
|
||||
"kind": "issue_comment",
|
||||
"id": str(item.get("id") or ""),
|
||||
"author": extract_login(item.get("user")),
|
||||
"author_association": str(item.get("author_association") or ""),
|
||||
"created_at": str(item.get("created_at") or ""),
|
||||
"body": str(item.get("body") or ""),
|
||||
"path": None,
|
||||
"line": None,
|
||||
"url": str(item.get("html_url") or ""),
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def normalize_review_comments(items):
|
||||
out = []
|
||||
for item in items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
line = item.get("line")
|
||||
if line is None:
|
||||
line = item.get("original_line")
|
||||
out.append(
|
||||
{
|
||||
"kind": "review_comment",
|
||||
"id": str(item.get("id") or ""),
|
||||
"author": extract_login(item.get("user")),
|
||||
"author_association": str(item.get("author_association") or ""),
|
||||
"created_at": str(item.get("created_at") or ""),
|
||||
"body": str(item.get("body") or ""),
|
||||
"path": item.get("path"),
|
||||
"line": line,
|
||||
"url": str(item.get("html_url") or ""),
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def normalize_reviews(items):
|
||||
out = []
|
||||
for item in items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
out.append(
|
||||
{
|
||||
"kind": "review",
|
||||
"id": str(item.get("id") or ""),
|
||||
"author": extract_login(item.get("user")),
|
||||
"author_association": str(item.get("author_association") or ""),
|
||||
"created_at": str(item.get("submitted_at") or item.get("created_at") or ""),
|
||||
"body": str(item.get("body") or ""),
|
||||
"path": None,
|
||||
"line": None,
|
||||
"url": str(item.get("html_url") or ""),
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def extract_login(user_obj):
|
||||
if isinstance(user_obj, dict):
|
||||
return str(user_obj.get("login") or "")
|
||||
return ""
|
||||
|
||||
|
||||
def is_bot_login(login):
|
||||
return bool(login) and login.endswith("[bot]")
|
||||
|
||||
|
||||
def is_actionable_review_bot_login(login):
|
||||
if not is_bot_login(login):
|
||||
return False
|
||||
lower_login = login.lower()
|
||||
return any(keyword in lower_login for keyword in REVIEW_BOT_LOGIN_KEYWORDS)
|
||||
|
||||
|
||||
def is_trusted_human_review_author(item, authenticated_login):
|
||||
author = str(item.get("author") or "")
|
||||
if not author:
|
||||
return False
|
||||
if authenticated_login and author == authenticated_login:
|
||||
return True
|
||||
association = str(item.get("author_association") or "").upper()
|
||||
return association in TRUSTED_AUTHOR_ASSOCIATIONS
|
||||
|
||||
|
||||
def fetch_new_review_items(pr, state, fresh_state, authenticated_login=None):
|
||||
repo = pr["repo"]
|
||||
pr_number = pr["number"]
|
||||
endpoints = comment_endpoints(repo, pr_number)
|
||||
|
||||
issue_payload = gh_api_list_paginated(endpoints["issue_comment"], repo=repo)
|
||||
review_comment_payload = gh_api_list_paginated(endpoints["review_comment"], repo=repo)
|
||||
review_payload = gh_api_list_paginated(endpoints["review"], repo=repo)
|
||||
|
||||
issue_items = normalize_issue_comments(issue_payload)
|
||||
review_comment_items = normalize_review_comments(review_comment_payload)
|
||||
review_items = normalize_reviews(review_payload)
|
||||
all_items = issue_items + review_comment_items + review_items
|
||||
|
||||
seen_issue = {str(x) for x in state.get("seen_issue_comment_ids") or []}
|
||||
seen_review_comment = {str(x) for x in state.get("seen_review_comment_ids") or []}
|
||||
seen_review = {str(x) for x in state.get("seen_review_ids") or []}
|
||||
|
||||
# On a brand-new state file, surface existing review activity instead of
|
||||
# silently treating it as seen. This avoids missing already-pending review
|
||||
# feedback when monitoring starts after comments were posted.
|
||||
|
||||
new_items = []
|
||||
for item in all_items:
|
||||
item_id = item.get("id")
|
||||
if not item_id:
|
||||
continue
|
||||
author = item.get("author") or ""
|
||||
if not author:
|
||||
continue
|
||||
if is_bot_login(author):
|
||||
if not is_actionable_review_bot_login(author):
|
||||
continue
|
||||
elif not is_trusted_human_review_author(item, authenticated_login):
|
||||
continue
|
||||
|
||||
kind = item["kind"]
|
||||
if kind == "issue_comment" and item_id in seen_issue:
|
||||
continue
|
||||
if kind == "review_comment" and item_id in seen_review_comment:
|
||||
continue
|
||||
if kind == "review" and item_id in seen_review:
|
||||
continue
|
||||
|
||||
new_items.append(item)
|
||||
if kind == "issue_comment":
|
||||
seen_issue.add(item_id)
|
||||
elif kind == "review_comment":
|
||||
seen_review_comment.add(item_id)
|
||||
elif kind == "review":
|
||||
seen_review.add(item_id)
|
||||
|
||||
new_items.sort(key=lambda item: (item.get("created_at") or "", item.get("kind") or "", item.get("id") or ""))
|
||||
state["seen_issue_comment_ids"] = sorted(seen_issue)
|
||||
state["seen_review_comment_ids"] = sorted(seen_review_comment)
|
||||
state["seen_review_ids"] = sorted(seen_review)
|
||||
return new_items
|
||||
|
||||
|
||||
def current_retry_count(state, head_sha):
|
||||
retries = state.get("retries_by_sha") or {}
|
||||
value = retries.get(head_sha, 0)
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return 0
|
||||
|
||||
|
||||
def set_retry_count(state, head_sha, count):
|
||||
retries = state.get("retries_by_sha")
|
||||
if not isinstance(retries, dict):
|
||||
retries = {}
|
||||
retries[head_sha] = int(count)
|
||||
state["retries_by_sha"] = retries
|
||||
|
||||
|
||||
def unique_actions(actions):
|
||||
out = []
|
||||
seen = set()
|
||||
for action in actions:
|
||||
if action not in seen:
|
||||
out.append(action)
|
||||
seen.add(action)
|
||||
return out
|
||||
|
||||
|
||||
def is_pr_ready_to_merge(pr, checks_summary, new_review_items):
|
||||
if pr["closed"] or pr["merged"]:
|
||||
return False
|
||||
if not checks_summary["all_terminal"]:
|
||||
return False
|
||||
if checks_summary["failed_count"] > 0 or checks_summary["pending_count"] > 0:
|
||||
return False
|
||||
if new_review_items:
|
||||
return False
|
||||
if str(pr.get("mergeable") or "") != "MERGEABLE":
|
||||
return False
|
||||
if str(pr.get("merge_state_status") or "") in MERGE_CONFLICT_OR_BLOCKING_STATES:
|
||||
return False
|
||||
if str(pr.get("review_decision") or "") in MERGE_BLOCKING_REVIEW_DECISIONS:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries_used, max_retries):
|
||||
actions = []
|
||||
if pr["closed"] or pr["merged"]:
|
||||
if new_review_items:
|
||||
actions.append("process_review_comment")
|
||||
actions.append("stop_pr_closed")
|
||||
return unique_actions(actions)
|
||||
|
||||
if is_pr_ready_to_merge(pr, checks_summary, new_review_items):
|
||||
actions.append("stop_ready_to_merge")
|
||||
return unique_actions(actions)
|
||||
|
||||
if new_review_items:
|
||||
actions.append("process_review_comment")
|
||||
|
||||
has_failed_pr_checks = checks_summary["failed_count"] > 0
|
||||
if has_failed_pr_checks:
|
||||
if checks_summary["all_terminal"] and retries_used >= max_retries:
|
||||
actions.append("stop_exhausted_retries")
|
||||
else:
|
||||
actions.append("diagnose_ci_failure")
|
||||
if checks_summary["all_terminal"] and failed_runs and retries_used < max_retries:
|
||||
actions.append("retry_failed_checks")
|
||||
|
||||
if not actions:
|
||||
actions.append("idle")
|
||||
return unique_actions(actions)
|
||||
|
||||
|
||||
def collect_snapshot(args):
|
||||
pr = resolve_pr(args.pr, repo_override=args.repo)
|
||||
state_path = Path(args.state_file) if args.state_file else default_state_file_for(pr)
|
||||
state, fresh_state = load_state(state_path)
|
||||
|
||||
if not state.get("started_at"):
|
||||
state["started_at"] = int(time.time())
|
||||
|
||||
# `gh pr checks -R <repo>` requires an explicit PR/branch/url argument.
|
||||
# After resolving `--pr auto`, reuse the concrete PR number.
|
||||
checks = get_pr_checks(str(pr["number"]), repo=pr["repo"])
|
||||
checks_summary = summarize_checks(checks)
|
||||
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
|
||||
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
|
||||
authenticated_login = get_authenticated_login()
|
||||
new_review_items = fetch_new_review_items(
|
||||
pr,
|
||||
state,
|
||||
fresh_state=fresh_state,
|
||||
authenticated_login=authenticated_login,
|
||||
)
|
||||
|
||||
retries_used = current_retry_count(state, pr["head_sha"])
|
||||
actions = recommend_actions(
|
||||
pr,
|
||||
checks_summary,
|
||||
failed_runs,
|
||||
new_review_items,
|
||||
retries_used,
|
||||
args.max_flaky_retries,
|
||||
)
|
||||
|
||||
state["pr"] = {"repo": pr["repo"], "number": pr["number"]}
|
||||
state["last_seen_head_sha"] = pr["head_sha"]
|
||||
state["last_snapshot_at"] = int(time.time())
|
||||
save_state(state_path, state)
|
||||
|
||||
snapshot = {
|
||||
"pr": pr,
|
||||
"checks": checks_summary,
|
||||
"failed_runs": failed_runs,
|
||||
"new_review_items": new_review_items,
|
||||
"actions": actions,
|
||||
"retry_state": {
|
||||
"current_sha_retries_used": retries_used,
|
||||
"max_flaky_retries": args.max_flaky_retries,
|
||||
},
|
||||
}
|
||||
return snapshot, state_path
|
||||
|
||||
|
||||
def retry_failed_now(args):
|
||||
snapshot, state_path = collect_snapshot(args)
|
||||
pr = snapshot["pr"]
|
||||
checks_summary = snapshot["checks"]
|
||||
failed_runs = snapshot["failed_runs"]
|
||||
retries_used = snapshot["retry_state"]["current_sha_retries_used"]
|
||||
max_retries = snapshot["retry_state"]["max_flaky_retries"]
|
||||
|
||||
result = {
|
||||
"snapshot": snapshot,
|
||||
"state_file": str(state_path),
|
||||
"rerun_attempted": False,
|
||||
"rerun_count": 0,
|
||||
"rerun_run_ids": [],
|
||||
"reason": None,
|
||||
}
|
||||
|
||||
if pr["closed"] or pr["merged"]:
|
||||
result["reason"] = "pr_closed"
|
||||
return result
|
||||
if checks_summary["failed_count"] <= 0:
|
||||
result["reason"] = "no_failed_pr_checks"
|
||||
return result
|
||||
if not failed_runs:
|
||||
result["reason"] = "no_failed_runs"
|
||||
return result
|
||||
if not checks_summary["all_terminal"]:
|
||||
result["reason"] = "checks_still_pending"
|
||||
return result
|
||||
if retries_used >= max_retries:
|
||||
result["reason"] = "retry_budget_exhausted"
|
||||
return result
|
||||
|
||||
for run in failed_runs:
|
||||
run_id = run.get("run_id")
|
||||
if run_id in (None, ""):
|
||||
continue
|
||||
gh_text(["run", "rerun", str(run_id), "--failed"], repo=pr["repo"])
|
||||
result["rerun_run_ids"].append(run_id)
|
||||
|
||||
if result["rerun_run_ids"]:
|
||||
state, _ = load_state(state_path)
|
||||
new_count = current_retry_count(state, pr["head_sha"]) + 1
|
||||
set_retry_count(state, pr["head_sha"], new_count)
|
||||
state["last_snapshot_at"] = int(time.time())
|
||||
save_state(state_path, state)
|
||||
result["rerun_attempted"] = True
|
||||
result["rerun_count"] = len(result["rerun_run_ids"])
|
||||
result["reason"] = "rerun_triggered"
|
||||
else:
|
||||
result["reason"] = "failed_runs_missing_ids"
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def print_json(obj):
|
||||
sys.stdout.write(json.dumps(obj, sort_keys=True) + "\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def print_event(event, payload):
|
||||
print_json({"event": event, "payload": payload})
|
||||
|
||||
|
||||
def is_ci_green(snapshot):
|
||||
checks = snapshot.get("checks") or {}
|
||||
return (
|
||||
bool(checks.get("all_terminal"))
|
||||
and int(checks.get("failed_count") or 0) == 0
|
||||
and int(checks.get("pending_count") or 0) == 0
|
||||
)
|
||||
|
||||
|
||||
def snapshot_change_key(snapshot):
|
||||
pr = snapshot.get("pr") or {}
|
||||
checks = snapshot.get("checks") or {}
|
||||
review_items = snapshot.get("new_review_items") or []
|
||||
return (
|
||||
str(pr.get("head_sha") or ""),
|
||||
str(pr.get("state") or ""),
|
||||
str(pr.get("mergeable") or ""),
|
||||
str(pr.get("merge_state_status") or ""),
|
||||
str(pr.get("review_decision") or ""),
|
||||
int(checks.get("passed_count") or 0),
|
||||
int(checks.get("failed_count") or 0),
|
||||
int(checks.get("pending_count") or 0),
|
||||
tuple(
|
||||
(str(item.get("kind") or ""), str(item.get("id") or ""))
|
||||
for item in review_items
|
||||
if isinstance(item, dict)
|
||||
),
|
||||
tuple(snapshot.get("actions") or []),
|
||||
)
|
||||
|
||||
|
||||
def run_watch(args):
|
||||
poll_seconds = args.poll_seconds
|
||||
last_change_key = None
|
||||
while True:
|
||||
snapshot, state_path = collect_snapshot(args)
|
||||
print_event(
|
||||
"snapshot",
|
||||
{
|
||||
"snapshot": snapshot,
|
||||
"state_file": str(state_path),
|
||||
"next_poll_seconds": poll_seconds,
|
||||
},
|
||||
)
|
||||
actions = set(snapshot.get("actions") or [])
|
||||
if (
|
||||
"stop_pr_closed" in actions
|
||||
or "stop_exhausted_retries" in actions
|
||||
or "stop_ready_to_merge" in actions
|
||||
):
|
||||
print_event("stop", {"actions": snapshot.get("actions"), "pr": snapshot.get("pr")})
|
||||
return 0
|
||||
|
||||
current_change_key = snapshot_change_key(snapshot)
|
||||
changed = current_change_key != last_change_key
|
||||
green = is_ci_green(snapshot)
|
||||
|
||||
if not green:
|
||||
poll_seconds = args.poll_seconds
|
||||
elif changed or last_change_key is None:
|
||||
poll_seconds = args.poll_seconds
|
||||
else:
|
||||
poll_seconds = min(poll_seconds * 2, GREEN_STATE_MAX_POLL_SECONDS)
|
||||
|
||||
last_change_key = current_change_key
|
||||
time.sleep(poll_seconds)
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
try:
|
||||
if args.retry_failed_now:
|
||||
print_json(retry_failed_now(args))
|
||||
return 0
|
||||
if args.watch:
|
||||
return run_watch(args)
|
||||
snapshot, state_path = collect_snapshot(args)
|
||||
snapshot["state_file"] = str(state_path)
|
||||
print_json(snapshot)
|
||||
return 0
|
||||
except (GhCommandError, RuntimeError, ValueError) as err:
|
||||
sys.stderr.write(f"gh_pr_watch.py error: {err}\n")
|
||||
return 1
|
||||
except KeyboardInterrupt:
|
||||
sys.stderr.write("gh_pr_watch.py interrupted\n")
|
||||
return 130
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
10
.github/scripts/enable-unprivileged-userns.sh
vendored
10
.github/scripts/enable-unprivileged-userns.sh
vendored
@@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Required for bubblewrap to work on Linux CI runners.
|
||||
sudo sysctl -w kernel.unprivileged_userns_clone=1
|
||||
|
||||
# Ubuntu 24.04+ can additionally gate unprivileged user namespaces behind AppArmor.
|
||||
if sudo sysctl -a 2>/dev/null | grep -q '^kernel.apparmor_restrict_unprivileged_userns'; then
|
||||
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
|
||||
fi
|
||||
73
.github/workflows/bazel.yml
vendored
73
.github/workflows/bazel.yml
vendored
@@ -47,6 +47,11 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Node.js for js_repl tests
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: codex-rs/node-version.txt
|
||||
|
||||
# Some integration tests rely on DotSlash being installed.
|
||||
# See https://github.com/openai/codex/pull/7617.
|
||||
- name: Install DotSlash
|
||||
@@ -107,6 +112,45 @@ jobs:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -o pipefail
|
||||
|
||||
bazel_console_log="$(mktemp)"
|
||||
|
||||
print_failed_bazel_test_logs() {
|
||||
local console_log="$1"
|
||||
local testlogs_dir
|
||||
|
||||
testlogs_dir="$(bazel $BAZEL_STARTUP_ARGS info bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
|
||||
|
||||
local failed_targets=()
|
||||
while IFS= read -r target; do
|
||||
failed_targets+=("$target")
|
||||
done < <(
|
||||
grep -E '^FAIL: //' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
|
||||
| sort -u
|
||||
)
|
||||
|
||||
if [[ ${#failed_targets[@]} -eq 0 ]]; then
|
||||
echo "No failed Bazel test targets were found in console output."
|
||||
return
|
||||
fi
|
||||
|
||||
for target in "${failed_targets[@]}"; do
|
||||
local rel_path="${target#//}"
|
||||
rel_path="${rel_path/:/\/}"
|
||||
local test_log="${testlogs_dir}/${rel_path}/test.log"
|
||||
|
||||
echo "::group::Bazel test log tail for ${target}"
|
||||
if [[ -f "$test_log" ]]; then
|
||||
tail -n 200 "$test_log"
|
||||
else
|
||||
echo "Missing test log: $test_log"
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
done
|
||||
}
|
||||
|
||||
bazel_args=(
|
||||
test
|
||||
//...
|
||||
@@ -117,12 +161,28 @@ jobs:
|
||||
--build_metadata=VISIBILITY=PUBLIC
|
||||
)
|
||||
|
||||
if [[ "${RUNNER_OS:-}" != "Windows" ]]; then
|
||||
# Bazel test sandboxes on macOS may resolve an older Homebrew `node`
|
||||
# before the `actions/setup-node` runtime on PATH.
|
||||
node_bin="$(which node)"
|
||||
bazel_args+=("--test_env=CODEX_JS_REPL_NODE_PATH=${node_bin}")
|
||||
fi
|
||||
|
||||
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
echo "BuildBuddy API key is available; using remote Bazel configuration."
|
||||
# Work around Bazel 9 remote repo contents cache / overlay materialization failures
|
||||
# seen in CI (for example "is not a symlink" or permission errors while
|
||||
# materializing external repos such as rules_perl). We still use BuildBuddy for
|
||||
# remote execution/cache; this only disables the startup-level repo contents cache.
|
||||
set +e
|
||||
bazel $BAZEL_STARTUP_ARGS \
|
||||
--noexperimental_remote_repo_contents_cache \
|
||||
--bazelrc=.github/workflows/ci.bazelrc \
|
||||
"${bazel_args[@]}" \
|
||||
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY"
|
||||
"--remote_header=x-buildbuddy-api-key=$BUILDBUDDY_API_KEY" \
|
||||
2>&1 | tee "$bazel_console_log"
|
||||
bazel_status=${PIPESTATUS[0]}
|
||||
set -e
|
||||
else
|
||||
echo "BuildBuddy API key is not available; using local Bazel configuration."
|
||||
# Keep fork/community PRs on Bazel but disable remote services that are
|
||||
@@ -141,9 +201,18 @@ jobs:
|
||||
# clear remote cache/execution endpoints configured in .bazelrc.
|
||||
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_cache
|
||||
# https://bazel.build/reference/command-line-reference#common_options-flag--remote_executor
|
||||
set +e
|
||||
bazel $BAZEL_STARTUP_ARGS \
|
||||
--noexperimental_remote_repo_contents_cache \
|
||||
"${bazel_args[@]}" \
|
||||
--remote_cache= \
|
||||
--remote_executor=
|
||||
--remote_executor= \
|
||||
2>&1 | tee "$bazel_console_log"
|
||||
bazel_status=${PIPESTATUS[0]}
|
||||
set -e
|
||||
fi
|
||||
|
||||
if [[ ${bazel_status:-0} -ne 0 ]]; then
|
||||
print_failed_bazel_test_logs "$bazel_console_log"
|
||||
exit "$bazel_status"
|
||||
fi
|
||||
|
||||
55
.github/workflows/rust-ci.yml
vendored
55
.github/workflows/rust-ci.yml
vendored
@@ -451,7 +451,7 @@ jobs:
|
||||
key: apt-${{ matrix.runner }}-${{ matrix.target }}-v1
|
||||
|
||||
tests:
|
||||
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }} (shard ${{ matrix.shard_index }}/${{ matrix.shard_count }})
|
||||
name: Tests — ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runs_on || matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
needs: changed
|
||||
@@ -468,44 +468,29 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# To increase sharding (for example 2 -> 4), update both shard_count and
|
||||
# the shard_index list.
|
||||
shard_count: [2]
|
||||
shard_index: [1, 2]
|
||||
target_key:
|
||||
- linux-x64-gnu
|
||||
- macos-aarch64
|
||||
- linux-arm64-gnu
|
||||
- windows-x64
|
||||
- windows-arm64
|
||||
include:
|
||||
- target_key: linux-x64-gnu
|
||||
runner: ubuntu-24.04
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
profile: dev
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-gnu
|
||||
profile: dev
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-linux-x64
|
||||
- target_key: macos-aarch64
|
||||
runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
profile: dev
|
||||
- target_key: linux-arm64-gnu
|
||||
runner: ubuntu-24.04-arm
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-gnu
|
||||
profile: dev
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-linux-arm64
|
||||
- target_key: windows-x64
|
||||
runner: windows-x64
|
||||
- runner: windows-x64
|
||||
target: x86_64-pc-windows-msvc
|
||||
profile: dev
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-windows-x64
|
||||
- target_key: windows-arm64
|
||||
runner: windows-arm64
|
||||
- runner: windows-arm64
|
||||
target: aarch64-pc-windows-msvc
|
||||
profile: dev
|
||||
runs_on:
|
||||
@@ -514,6 +499,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- name: Set up Node.js for js_repl tests
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: codex-rs/node-version.txt
|
||||
- name: Install Linux build dependencies
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
@@ -594,20 +583,22 @@ jobs:
|
||||
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
|
||||
with:
|
||||
tool: nextest
|
||||
version: 0.9.111
|
||||
version: 0.9.103
|
||||
|
||||
- name: Enable unprivileged user namespaces (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: bash "${GITHUB_WORKSPACE}/.github/scripts/enable-unprivileged-userns.sh"
|
||||
run: |
|
||||
# Required for bubblewrap to work on Linux CI runners.
|
||||
sudo sysctl -w kernel.unprivileged_userns_clone=1
|
||||
# Ubuntu 24.04+ can additionally gate unprivileged user namespaces
|
||||
# behind AppArmor.
|
||||
if sudo sysctl -a 2>/dev/null | grep -q '^kernel.apparmor_restrict_unprivileged_userns'; then
|
||||
sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
|
||||
fi
|
||||
|
||||
- name: tests
|
||||
id: test
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cmd=(cargo nextest run --all-features --no-fail-fast --target "${{ matrix.target }}" --cargo-profile ci-test --timings)
|
||||
cmd+=(--partition "hash:${{ matrix.shard_index }}/${{ matrix.shard_count }}")
|
||||
"${cmd[@]}"
|
||||
run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test --timings
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
NEXTEST_STATUS_LEVEL: leak
|
||||
@@ -616,7 +607,7 @@ jobs:
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: cargo-timings-rust-ci-nextest-${{ matrix.target }}-${{ matrix.profile }}-shard-${{ matrix.shard_index }}-of-${{ matrix.shard_count }}
|
||||
name: cargo-timings-rust-ci-nextest-${{ matrix.target }}-${{ matrix.profile }}
|
||||
path: codex-rs/target/**/cargo-timings/cargo-timing.html
|
||||
if-no-files-found: warn
|
||||
|
||||
|
||||
11
.github/workflows/rust-release.yml
vendored
11
.github/workflows/rust-release.yml
vendored
@@ -178,6 +178,12 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Avoid problematic aws-lc jitter entropy code path on musl builders.
|
||||
echo "AWS_LC_SYS_NO_JITTER_ENTROPY=1" >> "$GITHUB_ENV"
|
||||
target_no_jitter="AWS_LC_SYS_NO_JITTER_ENTROPY_${{ matrix.target }}"
|
||||
target_no_jitter="${target_no_jitter//-/_}"
|
||||
echo "${target_no_jitter}=1" >> "$GITHUB_ENV"
|
||||
|
||||
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
|
||||
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
@@ -488,6 +494,11 @@ jobs:
|
||||
--package codex-responses-api-proxy \
|
||||
--package codex-sdk
|
||||
|
||||
- name: Stage installer scripts
|
||||
run: |
|
||||
cp scripts/install/install.sh dist/install.sh
|
||||
cp scripts/install/install.ps1 dist/install.ps1
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
|
||||
126
.github/workflows/shell-tool-mcp.yml
vendored
126
.github/workflows/shell-tool-mcp.yml
vendored
@@ -67,128 +67,6 @@ jobs:
|
||||
echo "npm_tag=${npm_tag}" >> "$GITHUB_OUTPUT"
|
||||
echo "should_publish=${should_publish}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
rust-binaries:
|
||||
name: Build Rust - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: 30
|
||||
env:
|
||||
CARGO_PROFILE_RELEASE_LTO: ${{ contains(needs.metadata.outputs.version, '-alpha') && 'thin' || 'fat' }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
- runner: macos-15-xlarge
|
||||
target: x86_64-apple-darwin
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
install_musl: true
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
install_musl: true
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install UBSan runtime (musl)
|
||||
if: ${{ matrix.install_musl }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
sudo apt-get update -y
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y libubsan1
|
||||
fi
|
||||
|
||||
- uses: dtolnay/rust-toolchain@1.93.0
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- if: ${{ matrix.install_musl }}
|
||||
name: Install Zig
|
||||
uses: mlugg/setup-zig@v2
|
||||
with:
|
||||
version: 0.14.0
|
||||
|
||||
- if: ${{ matrix.install_musl }}
|
||||
name: Install musl build dependencies
|
||||
env:
|
||||
TARGET: ${{ matrix.target }}
|
||||
run: bash "${GITHUB_WORKSPACE}/.github/scripts/install-musl-build-tools.sh"
|
||||
|
||||
- if: ${{ matrix.install_musl }}
|
||||
name: Configure rustc UBSan wrapper (musl host)
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
ubsan=""
|
||||
if command -v ldconfig >/dev/null 2>&1; then
|
||||
ubsan="$(ldconfig -p | grep -m1 'libubsan\.so\.1' | sed -E 's/.*=> (.*)$/\1/')"
|
||||
fi
|
||||
wrapper_root="${RUNNER_TEMP:-/tmp}"
|
||||
wrapper="${wrapper_root}/rustc-ubsan-wrapper"
|
||||
cat > "${wrapper}" <<EOF
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
if [[ -n "${ubsan}" ]]; then
|
||||
export LD_PRELOAD="${ubsan}\${LD_PRELOAD:+:\${LD_PRELOAD}}"
|
||||
fi
|
||||
exec "\$1" "\${@:2}"
|
||||
EOF
|
||||
chmod +x "${wrapper}"
|
||||
echo "RUSTC_WRAPPER=${wrapper}" >> "$GITHUB_ENV"
|
||||
echo "RUSTC_WORKSPACE_WRAPPER=" >> "$GITHUB_ENV"
|
||||
|
||||
- if: ${{ matrix.install_musl }}
|
||||
name: Clear sanitizer flags (musl)
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Clear global Rust flags so host/proc-macro builds don't pull in UBSan.
|
||||
echo "RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
echo "CARGO_ENCODED_RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
echo "RUSTDOCFLAGS=" >> "$GITHUB_ENV"
|
||||
# Override any runner-level Cargo config rustflags as well.
|
||||
echo "CARGO_BUILD_RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
echo "CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS=" >> "$GITHUB_ENV"
|
||||
|
||||
sanitize_flags() {
|
||||
local input="$1"
|
||||
input="${input//-fsanitize=undefined/}"
|
||||
input="${input//-fno-sanitize-recover=undefined/}"
|
||||
input="${input//-fno-sanitize-trap=undefined/}"
|
||||
echo "$input"
|
||||
}
|
||||
|
||||
cflags="$(sanitize_flags "${CFLAGS-}")"
|
||||
cxxflags="$(sanitize_flags "${CXXFLAGS-}")"
|
||||
echo "CFLAGS=${cflags}" >> "$GITHUB_ENV"
|
||||
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Build exec server binaries
|
||||
run: cargo build --release --target ${{ matrix.target }} --bin codex-exec-mcp-server --bin codex-execve-wrapper
|
||||
|
||||
- name: Stage exec server binaries
|
||||
run: |
|
||||
dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}"
|
||||
mkdir -p "$dest"
|
||||
cp "target/${{ matrix.target }}/release/codex-exec-mcp-server" "$dest/"
|
||||
cp "target/${{ matrix.target }}/release/codex-execve-wrapper" "$dest/"
|
||||
|
||||
- uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: shell-tool-mcp-rust-${{ matrix.target }}
|
||||
path: artifacts/**
|
||||
if-no-files-found: error
|
||||
|
||||
bash-linux:
|
||||
name: Build Bash (Linux) - ${{ matrix.variant }} - ${{ matrix.target }}
|
||||
needs: metadata
|
||||
@@ -537,7 +415,6 @@ jobs:
|
||||
name: Package npm module
|
||||
needs:
|
||||
- metadata
|
||||
- rust-binaries
|
||||
- bash-linux
|
||||
- bash-darwin
|
||||
- zsh-linux
|
||||
@@ -579,7 +456,6 @@ jobs:
|
||||
mkdir -p "$staging" "$staging/vendor"
|
||||
cp shell-tool-mcp/README.md "$staging/"
|
||||
cp shell-tool-mcp/package.json "$staging/"
|
||||
cp -R shell-tool-mcp/bin "$staging/"
|
||||
|
||||
found_vendor="false"
|
||||
shopt -s nullglob
|
||||
@@ -613,8 +489,6 @@ jobs:
|
||||
set -euo pipefail
|
||||
staging="${{ steps.staging.outputs.dir }}"
|
||||
chmod +x \
|
||||
"$staging"/vendor/*/codex-exec-mcp-server \
|
||||
"$staging"/vendor/*/codex-execve-wrapper \
|
||||
"$staging"/vendor/*/bash/*/bash \
|
||||
"$staging"/vendor/*/zsh/*/zsh
|
||||
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
load("@apple_support//xcode:xcode_config.bzl", "xcode_config")
|
||||
load("@rules_cc//cc:defs.bzl", "cc_shared_library")
|
||||
|
||||
cc_shared_library(
|
||||
name = "clang",
|
||||
deps = ["@llvm-project//clang:libclang"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
xcode_config(name = "disable_xcode")
|
||||
|
||||
|
||||
57
MODULE.bazel
57
MODULE.bazel
@@ -1,5 +1,7 @@
|
||||
module(name = "codex")
|
||||
|
||||
bazel_dep(name = "platforms", version = "1.0.0")
|
||||
bazel_dep(name = "toolchains_llvm_bootstrapped", version = "0.5.3")
|
||||
bazel_dep(name = "toolchains_llvm_bootstrapped", version = "0.5.6")
|
||||
single_version_override(
|
||||
module_name = "toolchains_llvm_bootstrapped",
|
||||
patch_strip = 1,
|
||||
@@ -8,6 +10,8 @@ single_version_override(
|
||||
],
|
||||
)
|
||||
|
||||
register_toolchains("@toolchains_llvm_bootstrapped//toolchain:all")
|
||||
|
||||
osx = use_extension("@toolchains_llvm_bootstrapped//extensions:osx.bzl", "osx")
|
||||
osx.framework(name = "ApplicationServices")
|
||||
osx.framework(name = "AppKit")
|
||||
@@ -16,8 +20,12 @@ osx.framework(name = "CoreFoundation")
|
||||
osx.framework(name = "CoreGraphics")
|
||||
osx.framework(name = "CoreServices")
|
||||
osx.framework(name = "CoreText")
|
||||
osx.framework(name = "AudioToolbox")
|
||||
osx.framework(name = "CFNetwork")
|
||||
osx.framework(name = "FontServices")
|
||||
osx.framework(name = "AudioUnit")
|
||||
osx.framework(name = "CoreAudio")
|
||||
osx.framework(name = "CoreAudioTypes")
|
||||
osx.framework(name = "Foundation")
|
||||
osx.framework(name = "ImageIO")
|
||||
osx.framework(name = "IOKit")
|
||||
@@ -25,10 +33,7 @@ osx.framework(name = "Kernel")
|
||||
osx.framework(name = "OSLog")
|
||||
osx.framework(name = "Security")
|
||||
osx.framework(name = "SystemConfiguration")
|
||||
|
||||
register_toolchains(
|
||||
"@toolchains_llvm_bootstrapped//toolchain:all",
|
||||
)
|
||||
use_repo(osx, "macosx15.4.sdk")
|
||||
|
||||
# Needed to disable xcode...
|
||||
bazel_dep(name = "apple_support", version = "2.1.0")
|
||||
@@ -39,9 +44,9 @@ bazel_dep(name = "rules_rs", version = "0.0.23")
|
||||
# Special toolchains branch
|
||||
archive_override(
|
||||
module_name = "rules_rs",
|
||||
integrity = "sha256-YbDRjZos4UmfIPY98znK1BgBWRQ1/ui3CtL6RqxE30I=",
|
||||
strip_prefix = "rules_rs-6cf3d940fdc48baf3ebd6c37daf8e0be8fc73ecb",
|
||||
url = "https://github.com/dzbarsky/rules_rs/archive/6cf3d940fdc48baf3ebd6c37daf8e0be8fc73ecb.tar.gz",
|
||||
integrity = "sha256-O34UF4H7b1Qacu3vlu2Od4ILGVApzg5j1zl952SFL3w=",
|
||||
strip_prefix = "rules_rs-097123c2aa72672e371e69e7035869f5a45c7b2b",
|
||||
url = "https://github.com/dzbarsky/rules_rs/archive/097123c2aa72672e371e69e7035869f5a45c7b2b.tar.gz",
|
||||
)
|
||||
|
||||
rules_rust = use_extension("@rules_rs//rs/experimental:rules_rust.bzl", "rules_rust")
|
||||
@@ -134,6 +139,9 @@ crate.annotation(
|
||||
"OPENSSL_NO_VENDOR": "1",
|
||||
"OPENSSL_STATIC": "1",
|
||||
},
|
||||
crate_features = [
|
||||
"dep:openssl-src",
|
||||
],
|
||||
crate = "openssl-sys",
|
||||
data = ["@openssl//:gen_dir"],
|
||||
)
|
||||
@@ -145,6 +153,28 @@ crate.annotation(
|
||||
workspace_cargo_toml = "rust/runfiles/Cargo.toml",
|
||||
)
|
||||
|
||||
llvm = use_extension("@toolchains_llvm_bootstrapped//extensions:llvm.bzl", "llvm")
|
||||
use_repo(llvm, "llvm-project")
|
||||
|
||||
crate.annotation(
|
||||
# Provide the hermetic SDK path so the build script doesn't try to invoke an unhermetic `xcrun --show-sdk-path`.
|
||||
build_script_data = [
|
||||
"@macosx15.4.sdk//sysroot",
|
||||
],
|
||||
build_script_env = {
|
||||
"BINDGEN_EXTRA_CLANG_ARGS": "-isystem $(location @toolchains_llvm_bootstrapped//:builtin_headers)",
|
||||
"COREAUDIO_SDK_PATH": "$(location @macosx15.4.sdk//sysroot)",
|
||||
"LIBCLANG_PATH": "$(location @codex//:clang)",
|
||||
},
|
||||
build_script_tools = [
|
||||
"@codex//:clang",
|
||||
"@toolchains_llvm_bootstrapped//:builtin_headers",
|
||||
],
|
||||
crate = "coreaudio-sys",
|
||||
)
|
||||
|
||||
inject_repo(crate, "codex", "toolchains_llvm_bootstrapped", "macosx15.4.sdk")
|
||||
|
||||
# Fix readme inclusions
|
||||
crate.annotation(
|
||||
crate = "windows-link",
|
||||
@@ -175,6 +205,17 @@ crate.annotation(
|
||||
gen_build_script = "off",
|
||||
deps = [":windows_import_lib"],
|
||||
)
|
||||
|
||||
bazel_dep(name = "alsa_lib", version = "1.2.9.bcr.4")
|
||||
|
||||
crate.annotation(
|
||||
crate = "alsa-sys",
|
||||
gen_build_script = "off",
|
||||
deps = ["@alsa_lib"],
|
||||
)
|
||||
|
||||
inject_repo(crate, "alsa_lib")
|
||||
|
||||
use_repo(crate, "crates")
|
||||
|
||||
rbe_platform_repository = use_repo_rule("//:rbe.bzl", "rbe_platform_repository")
|
||||
|
||||
61
MODULE.bazel.lock
generated
61
MODULE.bazel.lock
generated
File diff suppressed because one or more lines are too long
16
README.md
16
README.md
@@ -1,4 +1,4 @@
|
||||
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
|
||||
<p align="center"><code>curl -fsSL https://chatgpt.com/codex/install.sh | sh</code><br />or <code>npm i -g @openai/codex</code><br />or <code>brew install --cask codex</code></p>
|
||||
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
|
||||
<p align="center">
|
||||
<img src="https://github.com/openai/codex/blob/main/.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />
|
||||
@@ -14,7 +14,19 @@ If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="http
|
||||
|
||||
### Installing and running Codex CLI
|
||||
|
||||
Install globally with your preferred package manager:
|
||||
Install the latest Codex release directly:
|
||||
|
||||
```shell
|
||||
# Install on macOS, Linux, or WSL
|
||||
curl -fsSL https://chatgpt.com/codex/install.sh | sh
|
||||
```
|
||||
|
||||
```powershell
|
||||
# Install on Windows
|
||||
powershell -c "irm https://chatgpt.com/codex/install.ps1|iex"
|
||||
```
|
||||
|
||||
You can also install with your preferred package manager:
|
||||
|
||||
```shell
|
||||
# Install using npm
|
||||
|
||||
473
codex-rs/Cargo.lock
generated
473
codex-rs/Cargo.lock
generated
@@ -291,6 +291,28 @@ version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
||||
|
||||
[[package]]
|
||||
name = "alsa"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed7572b7ba83a31e20d1b48970ee402d2e3e0537dcfe0a3ff4d6eb7508617d43"
|
||||
dependencies = [
|
||||
"alsa-sys",
|
||||
"bitflags 2.10.0",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alsa-sys"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db8fee663d06c4e303404ef5f40488a53e062f89ba8bfed81f42325aafad1527"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "android_system_properties"
|
||||
version = "0.1.5"
|
||||
@@ -852,6 +874,33 @@ version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1"
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "1.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bindgen"
|
||||
version = "0.72.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"itertools 0.13.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"regex",
|
||||
"rustc-hash 2.1.1",
|
||||
"shlex",
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
version = "0.5.3"
|
||||
@@ -1089,6 +1138,15 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
|
||||
|
||||
[[package]]
|
||||
name = "cexpr"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
|
||||
dependencies = [
|
||||
"nom 7.1.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.4"
|
||||
@@ -1173,6 +1231,17 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clang-sys"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
|
||||
dependencies = [
|
||||
"glob",
|
||||
"libc",
|
||||
"libloading",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.58"
|
||||
@@ -1310,6 +1379,7 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"codex-rmcp-client",
|
||||
"codex-shell-command",
|
||||
"codex-state",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-cli",
|
||||
@@ -1398,8 +1468,9 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-apply-patch",
|
||||
"codex-core",
|
||||
"codex-linux-sandbox",
|
||||
"codex-shell-escalation",
|
||||
"codex-utils-home-dir",
|
||||
"dotenvy",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1644,16 +1715,21 @@ dependencies = [
|
||||
"codex-rmcp-client",
|
||||
"codex-secrets",
|
||||
"codex-shell-command",
|
||||
"codex-shell-escalation",
|
||||
"codex-skills",
|
||||
"codex-state",
|
||||
"codex-test-macros",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"codex-utils-home-dir",
|
||||
"codex-utils-pty",
|
||||
"codex-utils-readiness",
|
||||
"codex-utils-stream-parser",
|
||||
"codex-utils-string",
|
||||
"codex-windows-sandbox",
|
||||
"core-foundation 0.9.4",
|
||||
"core_test_support",
|
||||
"csv",
|
||||
"ctor 0.6.3",
|
||||
"dirs",
|
||||
"dunce",
|
||||
@@ -1663,9 +1739,7 @@ dependencies = [
|
||||
"futures",
|
||||
"http 1.4.0",
|
||||
"image",
|
||||
"include_dir",
|
||||
"indexmap 2.13.0",
|
||||
"indoc",
|
||||
"insta",
|
||||
"keyring",
|
||||
"landlock",
|
||||
@@ -1735,6 +1809,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
"clap",
|
||||
"codex-apply-patch",
|
||||
"codex-arg0",
|
||||
"codex-cloud-requirements",
|
||||
"codex-core",
|
||||
@@ -1765,35 +1840,6 @@ dependencies = [
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-exec-server"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"clap",
|
||||
"codex-core",
|
||||
"codex-execpolicy",
|
||||
"codex-protocol",
|
||||
"codex-shell-command",
|
||||
"codex-utils-cargo-bin",
|
||||
"exec_server_test_support",
|
||||
"libc",
|
||||
"maplit",
|
||||
"path-absolutize",
|
||||
"pretty_assertions",
|
||||
"rmcp",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shlex",
|
||||
"socket2 0.6.2",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-execpolicy"
|
||||
version = "0.0.0"
|
||||
@@ -1920,9 +1966,11 @@ dependencies = [
|
||||
"pkg-config",
|
||||
"pretty_assertions",
|
||||
"seccompiler",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1995,8 +2043,10 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-home-dir",
|
||||
"codex-utils-rustls-provider",
|
||||
"globset",
|
||||
"pretty_assertions",
|
||||
@@ -2046,6 +2096,7 @@ dependencies = [
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-string",
|
||||
"eventsource-stream",
|
||||
"gethostname",
|
||||
"http 1.4.0",
|
||||
"opentelemetry",
|
||||
"opentelemetry-appender-tracing",
|
||||
@@ -2188,6 +2239,35 @@ dependencies = [
|
||||
"which",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-shell-escalation"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"clap",
|
||||
"codex-utils-absolute-path",
|
||||
"libc",
|
||||
"pretty_assertions",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"socket2 0.6.2",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-skills"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"codex-utils-absolute-path",
|
||||
"include_dir",
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-state"
|
||||
version = "0.0.0"
|
||||
@@ -2222,6 +2302,15 @@ dependencies = [
|
||||
"uds_windows",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-test-macros"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-tui"
|
||||
version = "0.0.0"
|
||||
@@ -2259,11 +2348,13 @@ dependencies = [
|
||||
"codex-utils-sleep-inhibitor",
|
||||
"codex-windows-sandbox",
|
||||
"color-eyre",
|
||||
"cpal",
|
||||
"crossterm",
|
||||
"derive_more 2.1.1",
|
||||
"diffy",
|
||||
"dirs",
|
||||
"dunce",
|
||||
"hound",
|
||||
"image",
|
||||
"insta",
|
||||
"itertools 0.14.0",
|
||||
@@ -2285,6 +2376,7 @@ dependencies = [
|
||||
"strum 0.27.2",
|
||||
"strum_macros 0.27.2",
|
||||
"supports-color 3.0.2",
|
||||
"syntect",
|
||||
"tempfile",
|
||||
"textwrap 0.16.2",
|
||||
"thiserror 2.0.18",
|
||||
@@ -2295,8 +2387,7 @@ dependencies = [
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-subscriber",
|
||||
"tree-sitter-bash",
|
||||
"tree-sitter-highlight",
|
||||
"two-face",
|
||||
"unicode-segmentation",
|
||||
"unicode-width 0.2.1",
|
||||
"url",
|
||||
@@ -2454,7 +2545,16 @@ name = "codex-utils-sleep-inhibitor"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"core-foundation 0.9.4",
|
||||
"libc",
|
||||
"tracing",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codex-utils-stream-parser"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"pretty_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2462,6 +2562,7 @@ name = "codex-utils-string"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"pretty_assertions",
|
||||
"regex-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2694,6 +2795,49 @@ dependencies = [
|
||||
"zstd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "coreaudio-rs"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "321077172d79c662f64f5071a03120748d5bb652f5231570141be24cfcd2bace"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"core-foundation-sys",
|
||||
"coreaudio-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "coreaudio-sys"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ceec7a6067e62d6f931a2baf6f3a751f4a892595bcec1461a3c94ef9949864b6"
|
||||
dependencies = [
|
||||
"bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cpal"
|
||||
version = "0.15.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "873dab07c8f743075e57f524c583985fbaf745602acbe916a01539364369a779"
|
||||
dependencies = [
|
||||
"alsa",
|
||||
"core-foundation-sys",
|
||||
"coreaudio-rs",
|
||||
"dasp_sample",
|
||||
"jni",
|
||||
"js-sys",
|
||||
"libc",
|
||||
"mach2",
|
||||
"ndk",
|
||||
"ndk-context",
|
||||
"oboe",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-futures",
|
||||
"web-sys",
|
||||
"windows 0.54.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.17"
|
||||
@@ -2994,6 +3138,12 @@ dependencies = [
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dasp_sample"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c87e182de0887fd5361989c677c4e8f5000cd9491d6d563161a8f3a5519fc7f"
|
||||
|
||||
[[package]]
|
||||
name = "data-encoding"
|
||||
version = "2.10.0"
|
||||
@@ -3540,19 +3690,6 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "exec_server_test_support"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"codex-utils-cargo-bin",
|
||||
"rmcp",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "eyre"
|
||||
version = "0.6.12"
|
||||
@@ -4032,6 +4169,12 @@ version = "0.32.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7"
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"
|
||||
|
||||
[[package]]
|
||||
name = "globset"
|
||||
version = "0.4.18"
|
||||
@@ -4248,6 +4391,12 @@ dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hound"
|
||||
version = "3.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62adaabb884c94955b19907d60019f4e145d091c75345379e70d1ee696f7854f"
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.12"
|
||||
@@ -5093,9 +5242,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.180"
|
||||
version = "0.2.182"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
|
||||
checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112"
|
||||
|
||||
[[package]]
|
||||
name = "libdbus-sys"
|
||||
@@ -5106,6 +5255,16 @@ dependencies = [
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
version = "0.8.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.16"
|
||||
@@ -5145,6 +5304,12 @@ dependencies = [
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linked-hash-map"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
|
||||
|
||||
[[package]]
|
||||
name = "linux-keyutils"
|
||||
version = "0.2.4"
|
||||
@@ -5291,6 +5456,15 @@ dependencies = [
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mach2"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "maplit"
|
||||
version = "1.0.2"
|
||||
@@ -5474,12 +5648,35 @@ dependencies = [
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ndk"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2076a31b7010b17a38c01907c45b945e8f11495ee4dd588309718901b1f7a5b7"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"jni-sys",
|
||||
"log",
|
||||
"ndk-sys",
|
||||
"num_enum",
|
||||
"thiserror 1.0.69",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ndk-context"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
|
||||
|
||||
[[package]]
|
||||
name = "ndk-sys"
|
||||
version = "0.5.0+25.2.9519653"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c196769dd60fd4f363e11d948139556a344e79d451aeb2fa2fd040738ef7691"
|
||||
dependencies = [
|
||||
"jni-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "new_debug_unreachable"
|
||||
version = "1.0.6"
|
||||
@@ -5667,6 +5864,17 @@ version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
|
||||
|
||||
[[package]]
|
||||
name = "num-derive"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.46"
|
||||
@@ -5718,6 +5926,28 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c"
|
||||
dependencies = [
|
||||
"num_enum_derive",
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_enum_derive"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7"
|
||||
dependencies = [
|
||||
"proc-macro-crate",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_threads"
|
||||
version = "0.1.7"
|
||||
@@ -5927,6 +6157,29 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oboe"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8b61bebd49e5d43f5f8cc7ee2891c16e0f41ec7954d36bcb6c14c5e0de867fb"
|
||||
dependencies = [
|
||||
"jni",
|
||||
"ndk",
|
||||
"ndk-context",
|
||||
"num-derive",
|
||||
"num-traits",
|
||||
"oboe-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oboe-sys"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c8bb09a4a2b1d668170cfe0a7d5bc103f8999fb316c98099b6a9939c9f2e79d"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oid-registry"
|
||||
version = "0.8.1"
|
||||
@@ -5952,6 +6205,28 @@ version = "1.70.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe"
|
||||
|
||||
[[package]]
|
||||
name = "onig"
|
||||
version = "6.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "336b9c63443aceef14bea841b899035ae3abe89b7c486aaf4c5bd8aafedac3f0"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"onig_sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "onig_sys"
|
||||
version = "69.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7f86c6eef3d6df15f23bcfb6af487cbd2fed4e5581d58d5bf1f5f8b7f6727dc"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opaque-debug"
|
||||
version = "0.3.1"
|
||||
@@ -6161,9 +6436,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "owo-colors"
|
||||
version = "4.2.3"
|
||||
version = "4.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52"
|
||||
checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d"
|
||||
dependencies = [
|
||||
"supports-color 2.1.0",
|
||||
"supports-color 3.0.2",
|
||||
@@ -6369,6 +6644,19 @@ version = "0.3.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "plist"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "740ebea15c5d1428f910cd1a5f52cebf8d25006245ed8ade92702f4943d91e07"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"indexmap 2.13.0",
|
||||
"quick-xml",
|
||||
"serde",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "png"
|
||||
version = "0.18.0"
|
||||
@@ -7320,6 +7608,7 @@ dependencies = [
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"native-tls",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
@@ -8874,6 +9163,27 @@ dependencies = [
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syntect"
|
||||
version = "5.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "656b45c05d95a5704399aeef6bd0ddec7b2b3531b7c9e900abbf7c4d2190c925"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"flate2",
|
||||
"fnv",
|
||||
"once_cell",
|
||||
"onig",
|
||||
"plist",
|
||||
"regex-syntax 0.8.8",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"thiserror 2.0.18",
|
||||
"walkdir",
|
||||
"yaml-rust",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sys-locale"
|
||||
version = "0.3.2"
|
||||
@@ -9610,18 +9920,6 @@ dependencies = [
|
||||
"tree-sitter-language",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tree-sitter-highlight"
|
||||
version = "0.25.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adc5f880ad8d8f94e88cb81c3557024cf1a8b75e3b504c50481ed4f5a6006ff3"
|
||||
dependencies = [
|
||||
"regex",
|
||||
"streaming-iterator",
|
||||
"thiserror 2.0.18",
|
||||
"tree-sitter",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tree-sitter-language"
|
||||
version = "0.1.7"
|
||||
@@ -9689,6 +9987,17 @@ dependencies = [
|
||||
"utf-8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "two-face"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b285c51f8a6ade109ed4566d33ac4fb289fb5d6cf87ed70908a5eaf65e948e34"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"syntect",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "type-map"
|
||||
version = "0.5.1"
|
||||
@@ -10302,6 +10611,16 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows"
|
||||
version = "0.54.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49"
|
||||
dependencies = [
|
||||
"windows-core 0.54.0",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows"
|
||||
version = "0.58.0"
|
||||
@@ -10333,6 +10652,16 @@ dependencies = [
|
||||
"windows-core 0.62.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-core"
|
||||
version = "0.54.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65"
|
||||
dependencies = [
|
||||
"windows-result 0.1.2",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-core"
|
||||
version = "0.58.0"
|
||||
@@ -10441,6 +10770,15 @@ dependencies = [
|
||||
"windows-strings 0.5.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8"
|
||||
dependencies = [
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.2.0"
|
||||
@@ -10953,6 +11291,15 @@ dependencies = [
|
||||
"lzma-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yaml-rust"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
|
||||
dependencies = [
|
||||
"linked-hash-map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yansi"
|
||||
version = "1.0.1"
|
||||
|
||||
@@ -17,11 +17,12 @@ members = [
|
||||
"cli",
|
||||
"config",
|
||||
"shell-command",
|
||||
"shell-escalation",
|
||||
"skills",
|
||||
"core",
|
||||
"hooks",
|
||||
"secrets",
|
||||
"exec",
|
||||
"exec-server",
|
||||
"execpolicy",
|
||||
"execpolicy-legacy",
|
||||
"keyring-store",
|
||||
@@ -57,10 +58,12 @@ members = [
|
||||
"utils/approval-presets",
|
||||
"utils/oss",
|
||||
"utils/fuzzy-match",
|
||||
"utils/stream-parser",
|
||||
"codex-client",
|
||||
"codex-api",
|
||||
"state",
|
||||
"codex-experimental-api-macros",
|
||||
"test-macros",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -112,7 +115,10 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
|
||||
codex-rmcp-client = { path = "rmcp-client" }
|
||||
codex-secrets = { path = "secrets" }
|
||||
codex-shell-command = { path = "shell-command" }
|
||||
codex-shell-escalation = { path = "shell-escalation" }
|
||||
codex-skills = { path = "skills" }
|
||||
codex-state = { path = "state" }
|
||||
codex-test-macros = { path = "test-macros" }
|
||||
codex-stdio-to-uds = { path = "stdio-to-uds" }
|
||||
codex-tui = { path = "tui" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
@@ -132,9 +138,9 @@ codex-utils-rustls-provider = { path = "utils/rustls-provider" }
|
||||
codex-utils-sandbox-summary = { path = "utils/sandbox-summary" }
|
||||
codex-utils-sleep-inhibitor = { path = "utils/sleep-inhibitor" }
|
||||
codex-utils-string = { path = "utils/string" }
|
||||
codex-utils-stream-parser = { path = "utils/stream-parser" }
|
||||
codex-windows-sandbox = { path = "windows-sandbox-rs" }
|
||||
core_test_support = { path = "core/tests/common" }
|
||||
exec_server_test_support = { path = "exec-server/tests/common" }
|
||||
mcp_test_support = { path = "mcp-server/tests/common" }
|
||||
|
||||
# External
|
||||
@@ -158,6 +164,7 @@ clap = "4"
|
||||
clap_complete = "4"
|
||||
color-eyre = "0.6.3"
|
||||
crossbeam-channel = "0.5.15"
|
||||
csv = "1.3.1"
|
||||
crossterm = "0.28.1"
|
||||
ctor = "0.6.3"
|
||||
derive_more = "2"
|
||||
@@ -171,6 +178,7 @@ env_logger = "0.11.9"
|
||||
eventsource-stream = "0.2.3"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
globset = "0.4"
|
||||
gethostname = "1.1.0"
|
||||
http = "1.3.1"
|
||||
icu_decimal = "2.1"
|
||||
icu_locale_core = "2.1"
|
||||
@@ -179,14 +187,13 @@ ignore = "0.4.23"
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
include_dir = "0.7.4"
|
||||
indexmap = "2.12.0"
|
||||
indoc = "2.0"
|
||||
insta = "1.46.3"
|
||||
inventory = "0.3.19"
|
||||
itertools = "0.14.0"
|
||||
keyring = { version = "3.6", default-features = false }
|
||||
landlock = "0.4.4"
|
||||
lazy_static = "1"
|
||||
libc = "0.2.177"
|
||||
libc = "0.2.182"
|
||||
log = "0.4"
|
||||
lru = "0.16.3"
|
||||
maplit = "1.0.2"
|
||||
@@ -202,7 +209,7 @@ opentelemetry-otlp = "0.31.0"
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
opentelemetry_sdk = "0.31.0"
|
||||
os_info = "3.12.0"
|
||||
owo-colors = "4.2.0"
|
||||
owo-colors = "4.3.0"
|
||||
path-absolutize = "3.1.1"
|
||||
pathdiff = "0.2"
|
||||
portable-pty = "0.9.0"
|
||||
@@ -274,7 +281,7 @@ tracing-subscriber = "0.3.22"
|
||||
tracing-test = "0.2.5"
|
||||
tree-sitter = "0.25.10"
|
||||
tree-sitter-bash = "0.25"
|
||||
tree-sitter-highlight = "0.25.10"
|
||||
syntect = "5"
|
||||
ts-rs = "11"
|
||||
tungstenite = { version = "0.27.0", features = ["deflate", "proxy"] }
|
||||
uds_windows = "1.1.0"
|
||||
|
||||
@@ -4,14 +4,21 @@ We provide Codex CLI as a standalone, native executable to ensure a zero-depende
|
||||
|
||||
## Installing Codex
|
||||
|
||||
Today, the easiest way to install Codex is via `npm`:
|
||||
Install the latest Codex release directly:
|
||||
|
||||
```shell
|
||||
npm i -g @openai/codex
|
||||
# macOS, Linux, or WSL
|
||||
curl -fsSL https://chatgpt.com/codex/install.sh | sh
|
||||
codex
|
||||
```
|
||||
|
||||
You can also install via Homebrew (`brew install --cask codex`) or download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases).
|
||||
```powershell
|
||||
# Windows
|
||||
powershell -c "irm https://chatgpt.com/codex/install.ps1|iex"
|
||||
codex
|
||||
```
|
||||
|
||||
You can also install via npm (`npm i -g @openai/codex`), Homebrew (`brew install --cask codex`), or download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases).
|
||||
|
||||
## Documentation quickstart
|
||||
|
||||
|
||||
@@ -1,6 +1,28 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"NetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"$ref": "#/definitions/NetworkPolicyRuleAction"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"host"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkPolicyRuleAction": {
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ReviewDecision": {
|
||||
"description": "User's decision in response to an ExecApprovalRequest.",
|
||||
"oneOf": [
|
||||
@@ -43,6 +65,28 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"additionalProperties": false,
|
||||
"description": "User chose to persist a network policy rule (allow/deny) for future requests to the same host.",
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"title": "NetworkPolicyAmendmentReviewDecision",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "User has denied this command and the agent should not execute it, but it should continue the session and try something else.",
|
||||
"enum": [
|
||||
|
||||
@@ -376,6 +376,70 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigDetectParams": {
|
||||
"properties": {
|
||||
"cwds": {
|
||||
"description": "Zero or more working directories to include for repo-scoped detection.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHome": {
|
||||
"description": "If true, include detection under the user's home (~/.claude, ~/.codex, etc.).",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigImportParams": {
|
||||
"properties": {
|
||||
"migrationItems": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigMigrationItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"migrationItems"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigMigrationItem": {
|
||||
"properties": {
|
||||
"cwd": {
|
||||
"description": "Null or empty means home-scoped migration; non-empty means repo-scoped migration.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"itemType": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigMigrationItemType"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"description",
|
||||
"itemType"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigMigrationItemType": {
|
||||
"enum": [
|
||||
"AGENTS_MD",
|
||||
"CONFIG",
|
||||
"SKILLS",
|
||||
"MCP_SERVER_CONFIG"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"FeedbackUploadParams": {
|
||||
"properties": {
|
||||
"classification": {
|
||||
@@ -1920,6 +1984,13 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"searchTerm": {
|
||||
"description": "Optional substring filter for the extracted thread title.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortKey": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -2190,6 +2261,12 @@
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"serviceName": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3390,6 +3467,54 @@
|
||||
"title": "Config/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"externalAgentConfig/detect"
|
||||
],
|
||||
"title": "ExternalAgentConfig/detectRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigDetectParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "ExternalAgentConfig/detectRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"externalAgentConfig/import"
|
||||
],
|
||||
"title": "ExternalAgentConfig/importRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigImportParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "ExternalAgentConfig/importRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
|
||||
@@ -1,6 +1,97 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"AdditionalFileSystemPermissions": {
|
||||
"properties": {
|
||||
"read": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"write": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"AdditionalMacOsPermissions": {
|
||||
"properties": {
|
||||
"accessibility": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"automations": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsAutomationValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"calendar": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"preferences": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsPreferencesValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"AdditionalPermissionProfile": {
|
||||
"properties": {
|
||||
"fileSystem": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AdditionalFileSystemPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"macos": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AdditionalMacOsPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"network": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"CommandAction": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -111,6 +202,29 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"MacOsAutomationValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
]
|
||||
},
|
||||
"MacOsPreferencesValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
@@ -134,6 +248,28 @@
|
||||
"socks5Udp"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"$ref": "#/definitions/NetworkPolicyRuleAction"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"host"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkPolicyRuleAction": {
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
@@ -180,7 +316,7 @@
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional context for managed-network approval prompts."
|
||||
"description": "Optional context for a managed-network approval prompt."
|
||||
},
|
||||
"proposedExecpolicyAmendment": {
|
||||
"description": "Optional proposed execpolicy amendment to allow similar commands without prompting.",
|
||||
@@ -192,6 +328,16 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"proposedNetworkPolicyAmendments": {
|
||||
"description": "Optional proposed network policy amendments (allow/deny host) for future requests.",
|
||||
"items": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"reason": {
|
||||
"description": "Optional explanatory reason (e.g. request for network access).",
|
||||
"type": [
|
||||
|
||||
@@ -42,6 +42,28 @@
|
||||
"title": "AcceptWithExecpolicyAmendmentCommandExecutionApprovalDecision",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"additionalProperties": false,
|
||||
"description": "User chose a persistent network policy rule (allow/deny) for this host.",
|
||||
"properties": {
|
||||
"applyNetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"applyNetworkPolicyAmendment"
|
||||
],
|
||||
"title": "ApplyNetworkPolicyAmendmentCommandExecutionApprovalDecision",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "User denied the command. The agent will continue the turn.",
|
||||
"enum": [
|
||||
@@ -57,6 +79,28 @@
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"NetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"$ref": "#/definitions/NetworkPolicyRuleAction"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"host"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkPolicyRuleAction": {
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
|
||||
@@ -1613,6 +1613,17 @@
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"additional_permissions": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PermissionProfile"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional additional filesystem permissions requested for this command."
|
||||
},
|
||||
"approval_id": {
|
||||
"description": "Identifier for this specific approval callback.\n\nWhen absent, the approval is for the command item itself (`call_id`). This is present for subcommand approvals (via execve intercept).",
|
||||
"type": [
|
||||
@@ -1662,6 +1673,16 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"proposed_network_policy_amendments": {
|
||||
"description": "Proposed network policy amendments (for example allow/deny this host in future).",
|
||||
"items": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"reason": {
|
||||
"description": "Optional human-readable reason for the approval (e.g. retry without sandbox).",
|
||||
"type": [
|
||||
@@ -1755,6 +1776,30 @@
|
||||
"title": "DynamicToolCallRequestEventMsg",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"item_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"skill_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"skill_request_approval"
|
||||
],
|
||||
"title": "SkillRequestApprovalEventMsgType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"item_id",
|
||||
"skill_name",
|
||||
"type"
|
||||
],
|
||||
"title": "SkillRequestApprovalEventMsg",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -3268,6 +3313,29 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"FileSystemPermissions": {
|
||||
"properties": {
|
||||
"read": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"write": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"FunctionCallOutputBody": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -3461,6 +3529,66 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"MacOsAutomationValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
]
|
||||
},
|
||||
"MacOsPermissions": {
|
||||
"properties": {
|
||||
"accessibility": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"automations": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsAutomationValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"calendar": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"preferences": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsPreferencesValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"MacOsPreferencesValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"McpAuthStatus": {
|
||||
"enum": [
|
||||
"unsupported",
|
||||
@@ -3637,6 +3765,28 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"$ref": "#/definitions/NetworkPolicyRuleAction"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"host"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkPolicyRuleAction": {
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -3756,6 +3906,37 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"PermissionProfile": {
|
||||
"properties": {
|
||||
"file_system": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/FileSystemPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"macos": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"network": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"PlanItemArg": {
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
@@ -6858,6 +7039,17 @@
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"additional_permissions": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PermissionProfile"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional additional filesystem permissions requested for this command."
|
||||
},
|
||||
"approval_id": {
|
||||
"description": "Identifier for this specific approval callback.\n\nWhen absent, the approval is for the command item itself (`call_id`). This is present for subcommand approvals (via execve intercept).",
|
||||
"type": [
|
||||
@@ -6907,6 +7099,16 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"proposed_network_policy_amendments": {
|
||||
"description": "Proposed network policy amendments (for example allow/deny this host in future).",
|
||||
"items": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"reason": {
|
||||
"description": "Optional human-readable reason for the approval (e.g. retry without sandbox).",
|
||||
"type": [
|
||||
@@ -7000,6 +7202,30 @@
|
||||
"title": "DynamicToolCallRequestEventMsg",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"item_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"skill_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"skill_request_approval"
|
||||
],
|
||||
"title": "SkillRequestApprovalEventMsgType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"item_id",
|
||||
"skill_name",
|
||||
"type"
|
||||
],
|
||||
"title": "SkillRequestApprovalEventMsg",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
|
||||
@@ -1,6 +1,28 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"NetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"$ref": "#/definitions/NetworkPolicyRuleAction"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"host"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkPolicyRuleAction": {
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ReviewDecision": {
|
||||
"description": "User's decision in response to an ExecApprovalRequest.",
|
||||
"oneOf": [
|
||||
@@ -43,6 +65,28 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"additionalProperties": false,
|
||||
"description": "User chose to persist a network policy rule (allow/deny) for future requests to the same host.",
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"title": "NetworkPolicyAmendmentReviewDecision",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "User has denied this command and the agent should not execute it, but it should continue the session and try something else.",
|
||||
"enum": [
|
||||
|
||||
@@ -1,6 +1,97 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"AdditionalFileSystemPermissions": {
|
||||
"properties": {
|
||||
"read": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"write": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"AdditionalMacOsPermissions": {
|
||||
"properties": {
|
||||
"accessibility": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"automations": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsAutomationValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"calendar": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"preferences": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsPreferencesValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"AdditionalPermissionProfile": {
|
||||
"properties": {
|
||||
"fileSystem": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AdditionalFileSystemPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"macos": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AdditionalMacOsPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"network": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ApplyPatchApprovalParams": {
|
||||
"properties": {
|
||||
"callId": {
|
||||
@@ -222,7 +313,7 @@
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional context for managed-network approval prompts."
|
||||
"description": "Optional context for a managed-network approval prompt."
|
||||
},
|
||||
"proposedExecpolicyAmendment": {
|
||||
"description": "Optional proposed execpolicy amendment to allow similar commands without prompting.",
|
||||
@@ -234,6 +325,16 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"proposedNetworkPolicyAmendments": {
|
||||
"description": "Optional proposed network policy amendments (allow/deny host) for future requests.",
|
||||
"items": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"reason": {
|
||||
"description": "Optional explanatory reason (e.g. request for network access).",
|
||||
"type": [
|
||||
@@ -430,6 +531,29 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"MacOsAutomationValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
]
|
||||
},
|
||||
"MacOsPreferencesValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"NetworkApprovalContext": {
|
||||
"properties": {
|
||||
"host": {
|
||||
@@ -454,6 +578,28 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"$ref": "#/definitions/NetworkPolicyRuleAction"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"host"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkPolicyRuleAction": {
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -576,6 +722,21 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"SkillRequestApprovalParams": {
|
||||
"properties": {
|
||||
"itemId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"itemId",
|
||||
"skillName"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ThreadId": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -737,6 +898,30 @@
|
||||
"title": "Item/tool/requestUserInputRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"skill/requestApproval"
|
||||
],
|
||||
"title": "Skill/requestApprovalRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/SkillRequestApprovalParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Skill/requestApprovalRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Execute a dynamic tool call on the client.",
|
||||
"properties": {
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"itemId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"itemId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "SkillRequestApprovalParams",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"SkillApprovalDecision": {
|
||||
"enum": [
|
||||
"approve",
|
||||
"decline"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"decision": {
|
||||
"$ref": "#/definitions/SkillApprovalDecision"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"decision"
|
||||
],
|
||||
"title": "SkillRequestApprovalResponse",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -1,6 +1,97 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"AdditionalFileSystemPermissions": {
|
||||
"properties": {
|
||||
"read": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"write": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"AdditionalMacOsPermissions": {
|
||||
"properties": {
|
||||
"accessibility": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"automations": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsAutomationValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"calendar": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"preferences": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsPreferencesValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"AdditionalPermissionProfile": {
|
||||
"properties": {
|
||||
"fileSystem": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AdditionalFileSystemPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"macos": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AdditionalMacOsPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"network": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"AgentMessageContent": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -1078,6 +1169,54 @@
|
||||
"title": "Config/readRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"externalAgentConfig/detect"
|
||||
],
|
||||
"title": "ExternalAgentConfig/detectRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ExternalAgentConfigDetectParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "ExternalAgentConfig/detectRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"externalAgentConfig/import"
|
||||
],
|
||||
"title": "ExternalAgentConfig/importRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/v2/ExternalAgentConfigImportParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "ExternalAgentConfig/importRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -1310,6 +1449,28 @@
|
||||
"title": "AcceptWithExecpolicyAmendmentCommandExecutionApprovalDecision",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"additionalProperties": false,
|
||||
"description": "User chose a persistent network policy rule (allow/deny) for this host.",
|
||||
"properties": {
|
||||
"applyNetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"applyNetworkPolicyAmendment"
|
||||
],
|
||||
"title": "ApplyNetworkPolicyAmendmentCommandExecutionApprovalDecision",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "User denied the command. The agent will continue the turn.",
|
||||
"enum": [
|
||||
@@ -1372,7 +1533,7 @@
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional context for managed-network approval prompts."
|
||||
"description": "Optional context for a managed-network approval prompt."
|
||||
},
|
||||
"proposedExecpolicyAmendment": {
|
||||
"description": "Optional proposed execpolicy amendment to allow similar commands without prompting.",
|
||||
@@ -1384,6 +1545,16 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"proposedNetworkPolicyAmendments": {
|
||||
"description": "Optional proposed network policy amendments (allow/deny host) for future requests.",
|
||||
"items": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"reason": {
|
||||
"description": "Optional explanatory reason (e.g. request for network access).",
|
||||
"type": [
|
||||
@@ -2674,6 +2845,17 @@
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"additional_permissions": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/PermissionProfile"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"description": "Optional additional filesystem permissions requested for this command."
|
||||
},
|
||||
"approval_id": {
|
||||
"description": "Identifier for this specific approval callback.\n\nWhen absent, the approval is for the command item itself (`call_id`). This is present for subcommand approvals (via execve intercept).",
|
||||
"type": [
|
||||
@@ -2723,6 +2905,16 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"proposed_network_policy_amendments": {
|
||||
"description": "Proposed network policy amendments (for example allow/deny this host in future).",
|
||||
"items": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"reason": {
|
||||
"description": "Optional human-readable reason for the approval (e.g. retry without sandbox).",
|
||||
"type": [
|
||||
@@ -2816,6 +3008,30 @@
|
||||
"title": "DynamicToolCallRequestEventMsg",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"item_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"skill_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"enum": [
|
||||
"skill_request_approval"
|
||||
],
|
||||
"title": "SkillRequestApprovalEventMsgType",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"item_id",
|
||||
"skill_name",
|
||||
"type"
|
||||
],
|
||||
"title": "SkillRequestApprovalEventMsg",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
@@ -4472,6 +4688,29 @@
|
||||
"title": "FileChangeRequestApprovalResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"FileSystemPermissions": {
|
||||
"properties": {
|
||||
"read": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"write": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"FuzzyFileSearchParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
@@ -4772,6 +5011,66 @@
|
||||
"title": "JSONRPCResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"MacOsAutomationValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
]
|
||||
},
|
||||
"MacOsPermissions": {
|
||||
"properties": {
|
||||
"accessibility": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"automations": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsAutomationValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"calendar": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"preferences": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsPreferencesValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"MacOsPreferencesValue": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"McpInvocation": {
|
||||
"properties": {
|
||||
"arguments": {
|
||||
@@ -4898,6 +5197,28 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"NetworkPolicyAmendment": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"$ref": "#/definitions/NetworkPolicyRuleAction"
|
||||
},
|
||||
"host": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"host"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"NetworkPolicyRuleAction": {
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"ParsedCommand": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -5009,6 +5330,37 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"PermissionProfile": {
|
||||
"properties": {
|
||||
"file_system": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/FileSystemPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"macos": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/MacOsPermissions"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"network": {
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"PlanItemArg": {
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
@@ -5310,6 +5662,28 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"additionalProperties": false,
|
||||
"description": "User chose to persist a network policy rule (allow/deny) for future requests to the same host.",
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"properties": {
|
||||
"network_policy_amendment": {
|
||||
"$ref": "#/definitions/NetworkPolicyAmendment"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"network_policy_amendment"
|
||||
],
|
||||
"title": "NetworkPolicyAmendmentReviewDecision",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "User has denied this command and the agent should not execute it, but it should continue the session and try something else.",
|
||||
"enum": [
|
||||
@@ -6194,6 +6568,30 @@
|
||||
"title": "Item/tool/requestUserInputRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/definitions/RequestId"
|
||||
},
|
||||
"method": {
|
||||
"enum": [
|
||||
"skill/requestApproval"
|
||||
],
|
||||
"title": "Skill/requestApprovalRequestMethod",
|
||||
"type": "string"
|
||||
},
|
||||
"params": {
|
||||
"$ref": "#/definitions/SkillRequestApprovalParams"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"method",
|
||||
"params"
|
||||
],
|
||||
"title": "Skill/requestApprovalRequest",
|
||||
"type": "object"
|
||||
},
|
||||
{
|
||||
"description": "Execute a dynamic tool call on the client.",
|
||||
"properties": {
|
||||
@@ -6315,6 +6713,43 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"SkillApprovalDecision": {
|
||||
"enum": [
|
||||
"approve",
|
||||
"decline"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"SkillRequestApprovalParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"itemId": {
|
||||
"type": "string"
|
||||
},
|
||||
"skillName": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"itemId",
|
||||
"skillName"
|
||||
],
|
||||
"title": "SkillRequestApprovalParams",
|
||||
"type": "object"
|
||||
},
|
||||
"SkillRequestApprovalResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"decision": {
|
||||
"$ref": "#/definitions/SkillApprovalDecision"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"decision"
|
||||
],
|
||||
"title": "SkillRequestApprovalResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"StepStatus": {
|
||||
"enum": [
|
||||
"pending",
|
||||
@@ -8738,6 +9173,95 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"ExternalAgentConfigDetectParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"cwds": {
|
||||
"description": "Zero or more working directories to include for repo-scoped detection.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHome": {
|
||||
"description": "If true, include detection under the user's home (~/.claude, ~/.codex, etc.).",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"title": "ExternalAgentConfigDetectParams",
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigDetectResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"items": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/ExternalAgentConfigMigrationItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"items"
|
||||
],
|
||||
"title": "ExternalAgentConfigDetectResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigImportParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"migrationItems": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/v2/ExternalAgentConfigMigrationItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"migrationItems"
|
||||
],
|
||||
"title": "ExternalAgentConfigImportParams",
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigImportResponse": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "ExternalAgentConfigImportResponse",
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigMigrationItem": {
|
||||
"properties": {
|
||||
"cwd": {
|
||||
"description": "Null or empty means home-scoped migration; non-empty means repo-scoped migration.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"itemType": {
|
||||
"$ref": "#/definitions/v2/ExternalAgentConfigMigrationItemType"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"description",
|
||||
"itemType"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigMigrationItemType": {
|
||||
"enum": [
|
||||
"AGENTS_MD",
|
||||
"CONFIG",
|
||||
"SKILLS",
|
||||
"MCP_SERVER_CONFIG"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"FeedbackUploadParams": {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
@@ -12509,6 +13033,13 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"searchTerm": {
|
||||
"description": "Optional substring filter for the extracted thread title.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortKey": {
|
||||
"anyOf": [
|
||||
{
|
||||
@@ -12936,6 +13467,12 @@
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"serviceName": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "ThreadStartParams",
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"cwds": {
|
||||
"description": "Zero or more working directories to include for repo-scoped detection.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"includeHome": {
|
||||
"description": "If true, include detection under the user's home (~/.claude, ~/.codex, etc.).",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"title": "ExternalAgentConfigDetectParams",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"ExternalAgentConfigMigrationItem": {
|
||||
"properties": {
|
||||
"cwd": {
|
||||
"description": "Null or empty means home-scoped migration; non-empty means repo-scoped migration.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"itemType": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigMigrationItemType"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"description",
|
||||
"itemType"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigMigrationItemType": {
|
||||
"enum": [
|
||||
"AGENTS_MD",
|
||||
"CONFIG",
|
||||
"SKILLS",
|
||||
"MCP_SERVER_CONFIG"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"items": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigMigrationItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"items"
|
||||
],
|
||||
"title": "ExternalAgentConfigDetectResponse",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"definitions": {
|
||||
"ExternalAgentConfigMigrationItem": {
|
||||
"properties": {
|
||||
"cwd": {
|
||||
"description": "Null or empty means home-scoped migration; non-empty means repo-scoped migration.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"itemType": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigMigrationItemType"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"description",
|
||||
"itemType"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"ExternalAgentConfigMigrationItemType": {
|
||||
"enum": [
|
||||
"AGENTS_MD",
|
||||
"CONFIG",
|
||||
"SKILLS",
|
||||
"MCP_SERVER_CONFIG"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"migrationItems": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ExternalAgentConfigMigrationItem"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"migrationItems"
|
||||
],
|
||||
"title": "ExternalAgentConfigImportParams",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "ExternalAgentConfigImportResponse",
|
||||
"type": "object"
|
||||
}
|
||||
@@ -65,6 +65,13 @@
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"searchTerm": {
|
||||
"description": "Optional substring filter for the extracted thread title.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"sortKey": {
|
||||
"anyOf": [
|
||||
{
|
||||
|
||||
@@ -150,6 +150,12 @@
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"serviceName": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"title": "ThreadStartParams",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -56,6 +56,7 @@ import type { RemoteSkillDownloadedEvent } from "./RemoteSkillDownloadedEvent";
|
||||
import type { RequestUserInputEvent } from "./RequestUserInputEvent";
|
||||
import type { ReviewRequest } from "./ReviewRequest";
|
||||
import type { SessionConfiguredEvent } from "./SessionConfiguredEvent";
|
||||
import type { SkillRequestApprovalEvent } from "./SkillRequestApprovalEvent";
|
||||
import type { StreamErrorEvent } from "./StreamErrorEvent";
|
||||
import type { TerminalInteractionEvent } from "./TerminalInteractionEvent";
|
||||
import type { ThreadNameUpdatedEvent } from "./ThreadNameUpdatedEvent";
|
||||
@@ -78,4 +79,4 @@ import type { WebSearchEndEvent } from "./WebSearchEndEvent";
|
||||
* Response event from the agent
|
||||
* NOTE: Make sure none of these values have optional types, as it will mess up the extension code-gen.
|
||||
*/
|
||||
export type EventMsg = { "type": "error" } & ErrorEvent | { "type": "warning" } & WarningEvent | { "type": "realtime_conversation_started" } & RealtimeConversationStartedEvent | { "type": "realtime_conversation_realtime" } & RealtimeConversationRealtimeEvent | { "type": "realtime_conversation_closed" } & RealtimeConversationClosedEvent | { "type": "model_reroute" } & ModelRerouteEvent | { "type": "context_compacted" } & ContextCompactedEvent | { "type": "thread_rolled_back" } & ThreadRolledBackEvent | { "type": "task_started" } & TurnStartedEvent | { "type": "task_complete" } & TurnCompleteEvent | { "type": "token_count" } & TokenCountEvent | { "type": "agent_message" } & AgentMessageEvent | { "type": "user_message" } & UserMessageEvent | { "type": "agent_message_delta" } & AgentMessageDeltaEvent | { "type": "agent_reasoning" } & AgentReasoningEvent | { "type": "agent_reasoning_delta" } & AgentReasoningDeltaEvent | { "type": "agent_reasoning_raw_content" } & AgentReasoningRawContentEvent | { "type": "agent_reasoning_raw_content_delta" } & AgentReasoningRawContentDeltaEvent | { "type": "agent_reasoning_section_break" } & AgentReasoningSectionBreakEvent | { "type": "session_configured" } & SessionConfiguredEvent | { "type": "thread_name_updated" } & ThreadNameUpdatedEvent | { "type": "mcp_startup_update" } & McpStartupUpdateEvent | { "type": "mcp_startup_complete" } & McpStartupCompleteEvent | { "type": "mcp_tool_call_begin" } & McpToolCallBeginEvent | { "type": "mcp_tool_call_end" } & McpToolCallEndEvent | { "type": "web_search_begin" } & WebSearchBeginEvent | { "type": "web_search_end" } & WebSearchEndEvent | { "type": "exec_command_begin" } & ExecCommandBeginEvent | { "type": "exec_command_output_delta" } & ExecCommandOutputDeltaEvent | { "type": "terminal_interaction" } & TerminalInteractionEvent | { "type": "exec_command_end" } & ExecCommandEndEvent | { "type": "view_image_tool_call" } & ViewImageToolCallEvent | { "type": "exec_approval_request" } & ExecApprovalRequestEvent | { "type": "request_user_input" } & RequestUserInputEvent | { "type": "dynamic_tool_call_request" } & DynamicToolCallRequest | { "type": "elicitation_request" } & ElicitationRequestEvent | { "type": "apply_patch_approval_request" } & ApplyPatchApprovalRequestEvent | { "type": "deprecation_notice" } & DeprecationNoticeEvent | { "type": "background_event" } & BackgroundEventEvent | { "type": "undo_started" } & UndoStartedEvent | { "type": "undo_completed" } & UndoCompletedEvent | { "type": "stream_error" } & StreamErrorEvent | { "type": "patch_apply_begin" } & PatchApplyBeginEvent | { "type": "patch_apply_end" } & PatchApplyEndEvent | { "type": "turn_diff" } & TurnDiffEvent | { "type": "get_history_entry_response" } & GetHistoryEntryResponseEvent | { "type": "mcp_list_tools_response" } & McpListToolsResponseEvent | { "type": "list_custom_prompts_response" } & ListCustomPromptsResponseEvent | { "type": "list_skills_response" } & ListSkillsResponseEvent | { "type": "list_remote_skills_response" } & ListRemoteSkillsResponseEvent | { "type": "remote_skill_downloaded" } & RemoteSkillDownloadedEvent | { "type": "skills_update_available" } | { "type": "plan_update" } & UpdatePlanArgs | { "type": "turn_aborted" } & TurnAbortedEvent | { "type": "shutdown_complete" } | { "type": "entered_review_mode" } & ReviewRequest | { "type": "exited_review_mode" } & ExitedReviewModeEvent | { "type": "raw_response_item" } & RawResponseItemEvent | { "type": "item_started" } & ItemStartedEvent | { "type": "item_completed" } & ItemCompletedEvent | { "type": "agent_message_content_delta" } & AgentMessageContentDeltaEvent | { "type": "plan_delta" } & PlanDeltaEvent | { "type": "reasoning_content_delta" } & ReasoningContentDeltaEvent | { "type": "reasoning_raw_content_delta" } & ReasoningRawContentDeltaEvent | { "type": "collab_agent_spawn_begin" } & CollabAgentSpawnBeginEvent | { "type": "collab_agent_spawn_end" } & CollabAgentSpawnEndEvent | { "type": "collab_agent_interaction_begin" } & CollabAgentInteractionBeginEvent | { "type": "collab_agent_interaction_end" } & CollabAgentInteractionEndEvent | { "type": "collab_waiting_begin" } & CollabWaitingBeginEvent | { "type": "collab_waiting_end" } & CollabWaitingEndEvent | { "type": "collab_close_begin" } & CollabCloseBeginEvent | { "type": "collab_close_end" } & CollabCloseEndEvent | { "type": "collab_resume_begin" } & CollabResumeBeginEvent | { "type": "collab_resume_end" } & CollabResumeEndEvent;
|
||||
export type EventMsg = { "type": "error" } & ErrorEvent | { "type": "warning" } & WarningEvent | { "type": "realtime_conversation_started" } & RealtimeConversationStartedEvent | { "type": "realtime_conversation_realtime" } & RealtimeConversationRealtimeEvent | { "type": "realtime_conversation_closed" } & RealtimeConversationClosedEvent | { "type": "model_reroute" } & ModelRerouteEvent | { "type": "context_compacted" } & ContextCompactedEvent | { "type": "thread_rolled_back" } & ThreadRolledBackEvent | { "type": "task_started" } & TurnStartedEvent | { "type": "task_complete" } & TurnCompleteEvent | { "type": "token_count" } & TokenCountEvent | { "type": "agent_message" } & AgentMessageEvent | { "type": "user_message" } & UserMessageEvent | { "type": "agent_message_delta" } & AgentMessageDeltaEvent | { "type": "agent_reasoning" } & AgentReasoningEvent | { "type": "agent_reasoning_delta" } & AgentReasoningDeltaEvent | { "type": "agent_reasoning_raw_content" } & AgentReasoningRawContentEvent | { "type": "agent_reasoning_raw_content_delta" } & AgentReasoningRawContentDeltaEvent | { "type": "agent_reasoning_section_break" } & AgentReasoningSectionBreakEvent | { "type": "session_configured" } & SessionConfiguredEvent | { "type": "thread_name_updated" } & ThreadNameUpdatedEvent | { "type": "mcp_startup_update" } & McpStartupUpdateEvent | { "type": "mcp_startup_complete" } & McpStartupCompleteEvent | { "type": "mcp_tool_call_begin" } & McpToolCallBeginEvent | { "type": "mcp_tool_call_end" } & McpToolCallEndEvent | { "type": "web_search_begin" } & WebSearchBeginEvent | { "type": "web_search_end" } & WebSearchEndEvent | { "type": "exec_command_begin" } & ExecCommandBeginEvent | { "type": "exec_command_output_delta" } & ExecCommandOutputDeltaEvent | { "type": "terminal_interaction" } & TerminalInteractionEvent | { "type": "exec_command_end" } & ExecCommandEndEvent | { "type": "view_image_tool_call" } & ViewImageToolCallEvent | { "type": "exec_approval_request" } & ExecApprovalRequestEvent | { "type": "request_user_input" } & RequestUserInputEvent | { "type": "dynamic_tool_call_request" } & DynamicToolCallRequest | { "type": "skill_request_approval" } & SkillRequestApprovalEvent | { "type": "elicitation_request" } & ElicitationRequestEvent | { "type": "apply_patch_approval_request" } & ApplyPatchApprovalRequestEvent | { "type": "deprecation_notice" } & DeprecationNoticeEvent | { "type": "background_event" } & BackgroundEventEvent | { "type": "undo_started" } & UndoStartedEvent | { "type": "undo_completed" } & UndoCompletedEvent | { "type": "stream_error" } & StreamErrorEvent | { "type": "patch_apply_begin" } & PatchApplyBeginEvent | { "type": "patch_apply_end" } & PatchApplyEndEvent | { "type": "turn_diff" } & TurnDiffEvent | { "type": "get_history_entry_response" } & GetHistoryEntryResponseEvent | { "type": "mcp_list_tools_response" } & McpListToolsResponseEvent | { "type": "list_custom_prompts_response" } & ListCustomPromptsResponseEvent | { "type": "list_skills_response" } & ListSkillsResponseEvent | { "type": "list_remote_skills_response" } & ListRemoteSkillsResponseEvent | { "type": "remote_skill_downloaded" } & RemoteSkillDownloadedEvent | { "type": "skills_update_available" } | { "type": "plan_update" } & UpdatePlanArgs | { "type": "turn_aborted" } & TurnAbortedEvent | { "type": "shutdown_complete" } | { "type": "entered_review_mode" } & ReviewRequest | { "type": "exited_review_mode" } & ExitedReviewModeEvent | { "type": "raw_response_item" } & RawResponseItemEvent | { "type": "item_started" } & ItemStartedEvent | { "type": "item_completed" } & ItemCompletedEvent | { "type": "agent_message_content_delta" } & AgentMessageContentDeltaEvent | { "type": "plan_delta" } & PlanDeltaEvent | { "type": "reasoning_content_delta" } & ReasoningContentDeltaEvent | { "type": "reasoning_raw_content_delta" } & ReasoningRawContentDeltaEvent | { "type": "collab_agent_spawn_begin" } & CollabAgentSpawnBeginEvent | { "type": "collab_agent_spawn_end" } & CollabAgentSpawnEndEvent | { "type": "collab_agent_interaction_begin" } & CollabAgentInteractionBeginEvent | { "type": "collab_agent_interaction_end" } & CollabAgentInteractionEndEvent | { "type": "collab_waiting_begin" } & CollabWaitingBeginEvent | { "type": "collab_waiting_end" } & CollabWaitingEndEvent | { "type": "collab_close_begin" } & CollabCloseBeginEvent | { "type": "collab_close_end" } & CollabCloseEndEvent | { "type": "collab_resume_begin" } & CollabResumeBeginEvent | { "type": "collab_resume_end" } & CollabResumeEndEvent;
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExecPolicyAmendment } from "./ExecPolicyAmendment";
|
||||
import type { NetworkApprovalContext } from "./NetworkApprovalContext";
|
||||
import type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment";
|
||||
import type { ParsedCommand } from "./ParsedCommand";
|
||||
import type { PermissionProfile } from "./PermissionProfile";
|
||||
|
||||
export type ExecApprovalRequestEvent = {
|
||||
/**
|
||||
@@ -41,4 +43,12 @@ network_approval_context?: NetworkApprovalContext,
|
||||
/**
|
||||
* Proposed execpolicy amendment that can be applied to allow future runs.
|
||||
*/
|
||||
proposed_execpolicy_amendment?: ExecPolicyAmendment, parsed_cmd: Array<ParsedCommand>, };
|
||||
proposed_execpolicy_amendment?: ExecPolicyAmendment,
|
||||
/**
|
||||
* Proposed network policy amendments (for example allow/deny this host in future).
|
||||
*/
|
||||
proposed_network_policy_amendments?: Array<NetworkPolicyAmendment>,
|
||||
/**
|
||||
* Optional additional filesystem permissions requested for this command.
|
||||
*/
|
||||
additional_permissions?: PermissionProfile, parsed_cmd: Array<ParsedCommand>, };
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type FileSystemPermissions = { read: Array<string> | null, write: Array<string> | null, };
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type MacOsAutomationValue = boolean | Array<string>;
|
||||
@@ -0,0 +1,7 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { MacOsAutomationValue } from "./MacOsAutomationValue";
|
||||
import type { MacOsPreferencesValue } from "./MacOsPreferencesValue";
|
||||
|
||||
export type MacOsPermissions = { preferences: MacOsPreferencesValue | null, automations: MacOsAutomationValue | null, accessibility: boolean | null, calendar: boolean | null, };
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type MacOsPreferencesValue = boolean | string;
|
||||
@@ -0,0 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { NetworkPolicyRuleAction } from "./NetworkPolicyRuleAction";
|
||||
|
||||
export type NetworkPolicyAmendment = { host: string, action: NetworkPolicyRuleAction, };
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type NetworkPolicyRuleAction = "allow" | "deny";
|
||||
@@ -0,0 +1,7 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { FileSystemPermissions } from "./FileSystemPermissions";
|
||||
import type { MacOsPermissions } from "./MacOsPermissions";
|
||||
|
||||
export type PermissionProfile = { network: boolean | null, file_system: FileSystemPermissions | null, macos: MacOsPermissions | null, };
|
||||
@@ -2,8 +2,9 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExecPolicyAmendment } from "./ExecPolicyAmendment";
|
||||
import type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment";
|
||||
|
||||
/**
|
||||
* User's decision in response to an ExecApprovalRequest.
|
||||
*/
|
||||
export type ReviewDecision = "approved" | { "approved_execpolicy_amendment": { proposed_execpolicy_amendment: ExecPolicyAmendment, } } | "approved_for_session" | "denied" | "abort";
|
||||
export type ReviewDecision = "approved" | { "approved_execpolicy_amendment": { proposed_execpolicy_amendment: ExecPolicyAmendment, } } | "approved_for_session" | { "network_policy_amendment": { network_policy_amendment: NetworkPolicyAmendment, } } | "denied" | "abort";
|
||||
|
||||
@@ -8,9 +8,10 @@ import type { ChatgptAuthTokensRefreshParams } from "./v2/ChatgptAuthTokensRefre
|
||||
import type { CommandExecutionRequestApprovalParams } from "./v2/CommandExecutionRequestApprovalParams";
|
||||
import type { DynamicToolCallParams } from "./v2/DynamicToolCallParams";
|
||||
import type { FileChangeRequestApprovalParams } from "./v2/FileChangeRequestApprovalParams";
|
||||
import type { SkillRequestApprovalParams } from "./v2/SkillRequestApprovalParams";
|
||||
import type { ToolRequestUserInputParams } from "./v2/ToolRequestUserInputParams";
|
||||
|
||||
/**
|
||||
* Request initiated from the server and sent to the client.
|
||||
*/
|
||||
export type ServerRequest = { "method": "item/commandExecution/requestApproval", id: RequestId, params: CommandExecutionRequestApprovalParams, } | { "method": "item/fileChange/requestApproval", id: RequestId, params: FileChangeRequestApprovalParams, } | { "method": "item/tool/requestUserInput", id: RequestId, params: ToolRequestUserInputParams, } | { "method": "item/tool/call", id: RequestId, params: DynamicToolCallParams, } | { "method": "account/chatgptAuthTokens/refresh", id: RequestId, params: ChatgptAuthTokensRefreshParams, } | { "method": "applyPatchApproval", id: RequestId, params: ApplyPatchApprovalParams, } | { "method": "execCommandApproval", id: RequestId, params: ExecCommandApprovalParams, };
|
||||
export type ServerRequest = { "method": "item/commandExecution/requestApproval", id: RequestId, params: CommandExecutionRequestApprovalParams, } | { "method": "item/fileChange/requestApproval", id: RequestId, params: FileChangeRequestApprovalParams, } | { "method": "item/tool/requestUserInput", id: RequestId, params: ToolRequestUserInputParams, } | { "method": "skill/requestApproval", id: RequestId, params: SkillRequestApprovalParams, } | { "method": "item/tool/call", id: RequestId, params: DynamicToolCallParams, } | { "method": "account/chatgptAuthTokens/refresh", id: RequestId, params: ChatgptAuthTokensRefreshParams, } | { "method": "applyPatchApproval", id: RequestId, params: ApplyPatchApprovalParams, } | { "method": "execCommandApproval", id: RequestId, params: ExecCommandApprovalParams, };
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type SkillRequestApprovalEvent = { item_id: string, skill_name: string, };
|
||||
@@ -71,6 +71,7 @@ export type { ExecOutputStream } from "./ExecOutputStream";
|
||||
export type { ExecPolicyAmendment } from "./ExecPolicyAmendment";
|
||||
export type { ExitedReviewModeEvent } from "./ExitedReviewModeEvent";
|
||||
export type { FileChange } from "./FileChange";
|
||||
export type { FileSystemPermissions } from "./FileSystemPermissions";
|
||||
export type { ForcedLoginMethod } from "./ForcedLoginMethod";
|
||||
export type { ForkConversationParams } from "./ForkConversationParams";
|
||||
export type { ForkConversationResponse } from "./ForkConversationResponse";
|
||||
@@ -116,6 +117,9 @@ export type { LoginApiKeyResponse } from "./LoginApiKeyResponse";
|
||||
export type { LoginChatGptCompleteNotification } from "./LoginChatGptCompleteNotification";
|
||||
export type { LoginChatGptResponse } from "./LoginChatGptResponse";
|
||||
export type { LogoutChatGptResponse } from "./LogoutChatGptResponse";
|
||||
export type { MacOsAutomationValue } from "./MacOsAutomationValue";
|
||||
export type { MacOsPermissions } from "./MacOsPermissions";
|
||||
export type { MacOsPreferencesValue } from "./MacOsPreferencesValue";
|
||||
export type { McpAuthStatus } from "./McpAuthStatus";
|
||||
export type { McpInvocation } from "./McpInvocation";
|
||||
export type { McpListToolsResponseEvent } from "./McpListToolsResponseEvent";
|
||||
@@ -132,12 +136,15 @@ export type { ModelRerouteReason } from "./ModelRerouteReason";
|
||||
export type { NetworkAccess } from "./NetworkAccess";
|
||||
export type { NetworkApprovalContext } from "./NetworkApprovalContext";
|
||||
export type { NetworkApprovalProtocol } from "./NetworkApprovalProtocol";
|
||||
export type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment";
|
||||
export type { NetworkPolicyRuleAction } from "./NetworkPolicyRuleAction";
|
||||
export type { NewConversationParams } from "./NewConversationParams";
|
||||
export type { NewConversationResponse } from "./NewConversationResponse";
|
||||
export type { ParsedCommand } from "./ParsedCommand";
|
||||
export type { PatchApplyBeginEvent } from "./PatchApplyBeginEvent";
|
||||
export type { PatchApplyEndEvent } from "./PatchApplyEndEvent";
|
||||
export type { PatchApplyStatus } from "./PatchApplyStatus";
|
||||
export type { PermissionProfile } from "./PermissionProfile";
|
||||
export type { Personality } from "./Personality";
|
||||
export type { PlanDeltaEvent } from "./PlanDeltaEvent";
|
||||
export type { PlanItem } from "./PlanItem";
|
||||
@@ -201,6 +208,7 @@ export type { SkillDependencies } from "./SkillDependencies";
|
||||
export type { SkillErrorInfo } from "./SkillErrorInfo";
|
||||
export type { SkillInterface } from "./SkillInterface";
|
||||
export type { SkillMetadata } from "./SkillMetadata";
|
||||
export type { SkillRequestApprovalEvent } from "./SkillRequestApprovalEvent";
|
||||
export type { SkillScope } from "./SkillScope";
|
||||
export type { SkillToolDependency } from "./SkillToolDependency";
|
||||
export type { SkillsListEntry } from "./SkillsListEntry";
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type AdditionalFileSystemPermissions = { read: Array<string> | null, write: Array<string> | null, };
|
||||
@@ -0,0 +1,7 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { MacOsAutomationValue } from "../MacOsAutomationValue";
|
||||
import type { MacOsPreferencesValue } from "../MacOsPreferencesValue";
|
||||
|
||||
export type AdditionalMacOsPermissions = { preferences: MacOsPreferencesValue | null, automations: MacOsAutomationValue | null, accessibility: boolean | null, calendar: boolean | null, };
|
||||
@@ -0,0 +1,7 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { AdditionalFileSystemPermissions } from "./AdditionalFileSystemPermissions";
|
||||
import type { AdditionalMacOsPermissions } from "./AdditionalMacOsPermissions";
|
||||
|
||||
export type AdditionalPermissionProfile = { network: boolean | null, fileSystem: AdditionalFileSystemPermissions | null, macos: AdditionalMacOsPermissions | null, };
|
||||
@@ -2,5 +2,6 @@
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExecPolicyAmendment } from "./ExecPolicyAmendment";
|
||||
import type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment";
|
||||
|
||||
export type CommandExecutionApprovalDecision = "accept" | "acceptForSession" | { "acceptWithExecpolicyAmendment": { execpolicy_amendment: ExecPolicyAmendment, } } | "decline" | "cancel";
|
||||
export type CommandExecutionApprovalDecision = "accept" | "acceptForSession" | { "acceptWithExecpolicyAmendment": { execpolicy_amendment: ExecPolicyAmendment, } } | { "applyNetworkPolicyAmendment": { network_policy_amendment: NetworkPolicyAmendment, } } | "decline" | "cancel";
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { AdditionalPermissionProfile } from "./AdditionalPermissionProfile";
|
||||
import type { CommandAction } from "./CommandAction";
|
||||
import type { ExecPolicyAmendment } from "./ExecPolicyAmendment";
|
||||
import type { NetworkApprovalContext } from "./NetworkApprovalContext";
|
||||
import type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment";
|
||||
|
||||
export type CommandExecutionRequestApprovalParams = { threadId: string, turnId: string, itemId: string,
|
||||
/**
|
||||
@@ -21,7 +23,7 @@ approvalId?: string | null,
|
||||
*/
|
||||
reason?: string | null,
|
||||
/**
|
||||
* Optional context for managed-network approval prompts.
|
||||
* Optional context for a managed-network approval prompt.
|
||||
*/
|
||||
networkApprovalContext?: NetworkApprovalContext | null,
|
||||
/**
|
||||
@@ -36,7 +38,15 @@ cwd?: string | null,
|
||||
* Best-effort parsed command actions for friendly display.
|
||||
*/
|
||||
commandActions?: Array<CommandAction> | null,
|
||||
/**
|
||||
* Optional additional permissions requested for this command.
|
||||
*/
|
||||
additionalPermissions?: AdditionalPermissionProfile | null,
|
||||
/**
|
||||
* Optional proposed execpolicy amendment to allow similar commands without prompting.
|
||||
*/
|
||||
proposedExecpolicyAmendment?: ExecPolicyAmendment | null, };
|
||||
proposedExecpolicyAmendment?: ExecPolicyAmendment | null,
|
||||
/**
|
||||
* Optional proposed network policy amendments (allow/deny host) for future requests.
|
||||
*/
|
||||
proposedNetworkPolicyAmendments?: Array<NetworkPolicyAmendment> | null, };
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type ExternalAgentConfigDetectParams = {
|
||||
/**
|
||||
* If true, include detection under the user's home (~/.claude, ~/.codex, etc.).
|
||||
*/
|
||||
includeHome?: boolean,
|
||||
/**
|
||||
* Zero or more working directories to include for repo-scoped detection.
|
||||
*/
|
||||
cwds?: Array<string> | null, };
|
||||
@@ -0,0 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExternalAgentConfigMigrationItem } from "./ExternalAgentConfigMigrationItem";
|
||||
|
||||
export type ExternalAgentConfigDetectResponse = { items: Array<ExternalAgentConfigMigrationItem>, };
|
||||
@@ -0,0 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExternalAgentConfigMigrationItem } from "./ExternalAgentConfigMigrationItem";
|
||||
|
||||
export type ExternalAgentConfigImportParams = { migrationItems: Array<ExternalAgentConfigMigrationItem>, };
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type ExternalAgentConfigImportResponse = Record<string, never>;
|
||||
@@ -0,0 +1,10 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { ExternalAgentConfigMigrationItemType } from "./ExternalAgentConfigMigrationItemType";
|
||||
|
||||
export type ExternalAgentConfigMigrationItem = { itemType: ExternalAgentConfigMigrationItemType, description: string,
|
||||
/**
|
||||
* Null or empty means home-scoped migration; non-empty means repo-scoped migration.
|
||||
*/
|
||||
cwd: string | null, };
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type ExternalAgentConfigMigrationItemType = "AGENTS_MD" | "CONFIG" | "SKILLS" | "MCP_SERVER_CONFIG";
|
||||
@@ -0,0 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { NetworkPolicyRuleAction } from "./NetworkPolicyRuleAction";
|
||||
|
||||
export type NetworkPolicyAmendment = { host: string, action: NetworkPolicyRuleAction, };
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type NetworkPolicyRuleAction = "allow" | "deny";
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type SkillApprovalDecision = "approve" | "decline";
|
||||
@@ -0,0 +1,5 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type SkillRequestApprovalParams = { itemId: string, skillName: string, };
|
||||
@@ -0,0 +1,6 @@
|
||||
// GENERATED CODE! DO NOT MODIFY BY HAND!
|
||||
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
import type { SkillApprovalDecision } from "./SkillApprovalDecision";
|
||||
|
||||
export type SkillRequestApprovalResponse = { decision: SkillApprovalDecision, };
|
||||
@@ -36,4 +36,8 @@ archived?: boolean | null,
|
||||
* Optional cwd filter; when set, only threads whose session cwd exactly
|
||||
* matches this path are returned.
|
||||
*/
|
||||
cwd?: string | null, };
|
||||
cwd?: string | null,
|
||||
/**
|
||||
* Optional substring filter for the extracted thread title.
|
||||
*/
|
||||
searchTerm?: string | null, };
|
||||
|
||||
@@ -6,7 +6,7 @@ import type { JsonValue } from "../serde_json/JsonValue";
|
||||
import type { AskForApproval } from "./AskForApproval";
|
||||
import type { SandboxMode } from "./SandboxMode";
|
||||
|
||||
export type ThreadStartParams = {model?: string | null, modelProvider?: string | null, cwd?: string | null, approvalPolicy?: AskForApproval | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null, ephemeral?: boolean | null, /**
|
||||
export type ThreadStartParams = {model?: string | null, modelProvider?: string | null, cwd?: string | null, approvalPolicy?: AskForApproval | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, serviceName?: string | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null, ephemeral?: boolean | null, /**
|
||||
* If true, opt into emitting raw Responses API items on the event stream.
|
||||
* This is for internal use only (e.g. Codex Cloud).
|
||||
*/
|
||||
|
||||
@@ -4,6 +4,9 @@ export type { Account } from "./Account";
|
||||
export type { AccountLoginCompletedNotification } from "./AccountLoginCompletedNotification";
|
||||
export type { AccountRateLimitsUpdatedNotification } from "./AccountRateLimitsUpdatedNotification";
|
||||
export type { AccountUpdatedNotification } from "./AccountUpdatedNotification";
|
||||
export type { AdditionalFileSystemPermissions } from "./AdditionalFileSystemPermissions";
|
||||
export type { AdditionalMacOsPermissions } from "./AdditionalMacOsPermissions";
|
||||
export type { AdditionalPermissionProfile } from "./AdditionalPermissionProfile";
|
||||
export type { AgentMessageDeltaNotification } from "./AgentMessageDeltaNotification";
|
||||
export type { AnalyticsConfig } from "./AnalyticsConfig";
|
||||
export type { AppBranding } from "./AppBranding";
|
||||
@@ -65,6 +68,12 @@ export type { ExperimentalFeature } from "./ExperimentalFeature";
|
||||
export type { ExperimentalFeatureListParams } from "./ExperimentalFeatureListParams";
|
||||
export type { ExperimentalFeatureListResponse } from "./ExperimentalFeatureListResponse";
|
||||
export type { ExperimentalFeatureStage } from "./ExperimentalFeatureStage";
|
||||
export type { ExternalAgentConfigDetectParams } from "./ExternalAgentConfigDetectParams";
|
||||
export type { ExternalAgentConfigDetectResponse } from "./ExternalAgentConfigDetectResponse";
|
||||
export type { ExternalAgentConfigImportParams } from "./ExternalAgentConfigImportParams";
|
||||
export type { ExternalAgentConfigImportResponse } from "./ExternalAgentConfigImportResponse";
|
||||
export type { ExternalAgentConfigMigrationItem } from "./ExternalAgentConfigMigrationItem";
|
||||
export type { ExternalAgentConfigMigrationItemType } from "./ExternalAgentConfigMigrationItemType";
|
||||
export type { FeedbackUploadParams } from "./FeedbackUploadParams";
|
||||
export type { FeedbackUploadResponse } from "./FeedbackUploadResponse";
|
||||
export type { FileChangeApprovalDecision } from "./FileChangeApprovalDecision";
|
||||
@@ -103,6 +112,8 @@ export type { ModelReroutedNotification } from "./ModelReroutedNotification";
|
||||
export type { NetworkAccess } from "./NetworkAccess";
|
||||
export type { NetworkApprovalContext } from "./NetworkApprovalContext";
|
||||
export type { NetworkApprovalProtocol } from "./NetworkApprovalProtocol";
|
||||
export type { NetworkPolicyAmendment } from "./NetworkPolicyAmendment";
|
||||
export type { NetworkPolicyRuleAction } from "./NetworkPolicyRuleAction";
|
||||
export type { NetworkRequirements } from "./NetworkRequirements";
|
||||
export type { OverriddenMetadata } from "./OverriddenMetadata";
|
||||
export type { PatchApplyStatus } from "./PatchApplyStatus";
|
||||
@@ -128,10 +139,13 @@ export type { SandboxMode } from "./SandboxMode";
|
||||
export type { SandboxPolicy } from "./SandboxPolicy";
|
||||
export type { SandboxWorkspaceWrite } from "./SandboxWorkspaceWrite";
|
||||
export type { SessionSource } from "./SessionSource";
|
||||
export type { SkillApprovalDecision } from "./SkillApprovalDecision";
|
||||
export type { SkillDependencies } from "./SkillDependencies";
|
||||
export type { SkillErrorInfo } from "./SkillErrorInfo";
|
||||
export type { SkillInterface } from "./SkillInterface";
|
||||
export type { SkillMetadata } from "./SkillMetadata";
|
||||
export type { SkillRequestApprovalParams } from "./SkillRequestApprovalParams";
|
||||
export type { SkillRequestApprovalResponse } from "./SkillRequestApprovalResponse";
|
||||
export type { SkillScope } from "./SkillScope";
|
||||
export type { SkillToolDependency } from "./SkillToolDependency";
|
||||
export type { SkillsConfigWriteParams } from "./SkillsConfigWriteParams";
|
||||
|
||||
@@ -1947,6 +1947,15 @@ mod tests {
|
||||
let thread_start_ts =
|
||||
fs::read_to_string(output_dir.join("v2").join("ThreadStartParams.ts"))?;
|
||||
assert_eq!(thread_start_ts.contains("mockExperimentalField"), true);
|
||||
let command_execution_request_approval_ts = fs::read_to_string(
|
||||
output_dir
|
||||
.join("v2")
|
||||
.join("CommandExecutionRequestApprovalParams.ts"),
|
||||
)?;
|
||||
assert_eq!(
|
||||
command_execution_request_approval_ts.contains("additionalPermissions"),
|
||||
true
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -2083,6 +2092,12 @@ export type Config = { stableField: Keep, unstableField: string | null } & ({ [k
|
||||
let thread_start_json =
|
||||
fs::read_to_string(output_dir.join("v2").join("ThreadStartParams.json"))?;
|
||||
assert_eq!(thread_start_json.contains("mockExperimentalField"), false);
|
||||
let command_execution_request_approval_json =
|
||||
fs::read_to_string(output_dir.join("CommandExecutionRequestApprovalParams.json"))?;
|
||||
assert_eq!(
|
||||
command_execution_request_approval_json.contains("additionalPermissions"),
|
||||
false
|
||||
);
|
||||
|
||||
let client_request_json = fs::read_to_string(output_dir.join("ClientRequest.json"))?;
|
||||
assert_eq!(
|
||||
@@ -2093,6 +2108,7 @@ export type Config = { stableField: Keep, unstableField: string | null } & ({ [k
|
||||
let bundle_json =
|
||||
fs::read_to_string(output_dir.join("codex_app_server_protocol.schemas.json"))?;
|
||||
assert_eq!(bundle_json.contains("mockExperimentalField"), false);
|
||||
assert_eq!(bundle_json.contains("additionalPermissions"), false);
|
||||
assert_eq!(bundle_json.contains("MockExperimentalMethodParams"), false);
|
||||
assert_eq!(
|
||||
bundle_json.contains("MockExperimentalMethodResponse"),
|
||||
|
||||
@@ -350,6 +350,14 @@ client_request_definitions! {
|
||||
params: v2::ConfigReadParams,
|
||||
response: v2::ConfigReadResponse,
|
||||
},
|
||||
ExternalAgentConfigDetect => "externalAgentConfig/detect" {
|
||||
params: v2::ExternalAgentConfigDetectParams,
|
||||
response: v2::ExternalAgentConfigDetectResponse,
|
||||
},
|
||||
ExternalAgentConfigImport => "externalAgentConfig/import" {
|
||||
params: v2::ExternalAgentConfigImportParams,
|
||||
response: v2::ExternalAgentConfigImportResponse,
|
||||
},
|
||||
ConfigValueWrite => "config/value/write" {
|
||||
params: v2::ConfigValueWriteParams,
|
||||
response: v2::ConfigWriteResponse,
|
||||
@@ -501,6 +509,7 @@ macro_rules! server_request_definitions {
|
||||
) => {
|
||||
/// Request initiated from the server and sent to the client.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[serde(tag = "method", rename_all = "camelCase")]
|
||||
pub enum ServerRequest {
|
||||
$(
|
||||
@@ -515,6 +524,7 @@ macro_rules! server_request_definitions {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, JsonSchema)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum ServerRequestPayload {
|
||||
$( $variant($params), )*
|
||||
}
|
||||
@@ -671,6 +681,11 @@ server_request_definitions! {
|
||||
response: v2::ToolRequestUserInputResponse,
|
||||
},
|
||||
|
||||
SkillRequestApproval => "skill/requestApproval" {
|
||||
params: v2::SkillRequestApprovalParams,
|
||||
response: v2::SkillRequestApprovalResponse,
|
||||
},
|
||||
|
||||
/// Execute a dynamic tool call on the client.
|
||||
DynamicToolCall => "item/tool/call" {
|
||||
params: v2::DynamicToolCallParams,
|
||||
@@ -1366,4 +1381,34 @@ mod tests {
|
||||
let reason = crate::experimental_api::ExperimentalApi::experimental_reason(&request);
|
||||
assert_eq!(reason, Some("mock/experimentalMethod"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_execution_request_approval_additional_permissions_is_marked_experimental() {
|
||||
let params = v2::CommandExecutionRequestApprovalParams {
|
||||
thread_id: "thr_123".to_string(),
|
||||
turn_id: "turn_123".to_string(),
|
||||
item_id: "call_123".to_string(),
|
||||
approval_id: None,
|
||||
reason: None,
|
||||
network_approval_context: None,
|
||||
command: Some("cat file".to_string()),
|
||||
cwd: None,
|
||||
command_actions: None,
|
||||
additional_permissions: Some(v2::AdditionalPermissionProfile {
|
||||
network: None,
|
||||
file_system: Some(v2::AdditionalFileSystemPermissions {
|
||||
read: Some(vec![std::path::PathBuf::from("/tmp/allowed")]),
|
||||
write: None,
|
||||
}),
|
||||
macos: None,
|
||||
}),
|
||||
proposed_execpolicy_amendment: None,
|
||||
proposed_network_policy_amendments: None,
|
||||
};
|
||||
let reason = crate::experimental_api::ExperimentalApi::experimental_reason(¶ms);
|
||||
assert_eq!(
|
||||
reason,
|
||||
Some("item/commandExecution/requestApproval.additionalPermissions")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ use codex_protocol::account::PlanType;
|
||||
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
|
||||
use codex_protocol::approvals::NetworkApprovalContext as CoreNetworkApprovalContext;
|
||||
use codex_protocol::approvals::NetworkApprovalProtocol as CoreNetworkApprovalProtocol;
|
||||
use codex_protocol::approvals::NetworkPolicyAmendment as CoreNetworkPolicyAmendment;
|
||||
use codex_protocol::approvals::NetworkPolicyRuleAction as CoreNetworkPolicyRuleAction;
|
||||
use codex_protocol::config_types::CollaborationMode;
|
||||
use codex_protocol::config_types::CollaborationModeMask;
|
||||
use codex_protocol::config_types::ForcedLoginMethod;
|
||||
@@ -20,7 +22,12 @@ use codex_protocol::items::TurnItem as CoreTurnItem;
|
||||
use codex_protocol::mcp::Resource as McpResource;
|
||||
use codex_protocol::mcp::ResourceTemplate as McpResourceTemplate;
|
||||
use codex_protocol::mcp::Tool as McpTool;
|
||||
use codex_protocol::models::FileSystemPermissions as CoreFileSystemPermissions;
|
||||
use codex_protocol::models::MacOsAutomationValue as CoreMacOsAutomationValue;
|
||||
use codex_protocol::models::MacOsPermissions as CoreMacOsPermissions;
|
||||
use codex_protocol::models::MacOsPreferencesValue as CoreMacOsPreferencesValue;
|
||||
use codex_protocol::models::MessagePhase;
|
||||
use codex_protocol::models::PermissionProfile as CorePermissionProfile;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::InputModality;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
@@ -633,6 +640,64 @@ pub struct ConfigRequirementsReadResponse {
|
||||
pub requirements: Option<ConfigRequirements>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash, JsonSchema, TS)]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum ExternalAgentConfigMigrationItemType {
|
||||
#[serde(rename = "AGENTS_MD")]
|
||||
#[ts(rename = "AGENTS_MD")]
|
||||
AgentsMd,
|
||||
#[serde(rename = "CONFIG")]
|
||||
#[ts(rename = "CONFIG")]
|
||||
Config,
|
||||
#[serde(rename = "SKILLS")]
|
||||
#[ts(rename = "SKILLS")]
|
||||
Skills,
|
||||
#[serde(rename = "MCP_SERVER_CONFIG")]
|
||||
#[ts(rename = "MCP_SERVER_CONFIG")]
|
||||
McpServerConfig,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ExternalAgentConfigMigrationItem {
|
||||
pub item_type: ExternalAgentConfigMigrationItemType,
|
||||
pub description: String,
|
||||
/// Null or empty means home-scoped migration; non-empty means repo-scoped migration.
|
||||
pub cwd: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ExternalAgentConfigDetectResponse {
|
||||
pub items: Vec<ExternalAgentConfigMigrationItem>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ExternalAgentConfigDetectParams {
|
||||
/// If true, include detection under the user's home (~/.claude, ~/.codex, etc.).
|
||||
#[serde(default, skip_serializing_if = "std::ops::Not::not")]
|
||||
pub include_home: bool,
|
||||
/// Zero or more working directories to include for repo-scoped detection.
|
||||
#[ts(optional = nullable)]
|
||||
pub cwds: Option<Vec<PathBuf>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ExternalAgentConfigImportParams {
|
||||
pub migration_items: Vec<ExternalAgentConfigMigrationItem>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct ExternalAgentConfigImportResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -681,6 +746,10 @@ pub enum CommandExecutionApprovalDecision {
|
||||
AcceptWithExecpolicyAmendment {
|
||||
execpolicy_amendment: ExecPolicyAmendment,
|
||||
},
|
||||
/// User chose a persistent network policy rule (allow/deny) for this host.
|
||||
ApplyNetworkPolicyAmendment {
|
||||
network_policy_amendment: NetworkPolicyAmendment,
|
||||
},
|
||||
/// User denied the command. The agent will continue the turn.
|
||||
Decline,
|
||||
/// User denied the command. The turn will also be immediately interrupted.
|
||||
@@ -713,6 +782,63 @@ impl From<CoreNetworkApprovalContext> for NetworkApprovalContext {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct AdditionalFileSystemPermissions {
|
||||
pub read: Option<Vec<PathBuf>>,
|
||||
pub write: Option<Vec<PathBuf>>,
|
||||
}
|
||||
|
||||
impl From<CoreFileSystemPermissions> for AdditionalFileSystemPermissions {
|
||||
fn from(value: CoreFileSystemPermissions) -> Self {
|
||||
Self {
|
||||
read: value.read,
|
||||
write: value.write,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct AdditionalMacOsPermissions {
|
||||
pub preferences: Option<CoreMacOsPreferencesValue>,
|
||||
pub automations: Option<CoreMacOsAutomationValue>,
|
||||
pub accessibility: Option<bool>,
|
||||
pub calendar: Option<bool>,
|
||||
}
|
||||
|
||||
impl From<CoreMacOsPermissions> for AdditionalMacOsPermissions {
|
||||
fn from(value: CoreMacOsPermissions) -> Self {
|
||||
Self {
|
||||
preferences: value.preferences,
|
||||
automations: value.automations,
|
||||
accessibility: value.accessibility,
|
||||
calendar: value.calendar,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct AdditionalPermissionProfile {
|
||||
pub network: Option<bool>,
|
||||
pub file_system: Option<AdditionalFileSystemPermissions>,
|
||||
pub macos: Option<AdditionalMacOsPermissions>,
|
||||
}
|
||||
|
||||
impl From<CorePermissionProfile> for AdditionalPermissionProfile {
|
||||
fn from(value: CorePermissionProfile) -> Self {
|
||||
Self {
|
||||
network: value.network,
|
||||
file_system: value.file_system.map(AdditionalFileSystemPermissions::from),
|
||||
macos: value.macos.map(AdditionalMacOsPermissions::from),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
@@ -910,6 +1036,38 @@ impl From<CoreExecPolicyAmendment> for ExecPolicyAmendment {
|
||||
}
|
||||
}
|
||||
|
||||
v2_enum_from_core!(
|
||||
pub enum NetworkPolicyRuleAction from CoreNetworkPolicyRuleAction {
|
||||
Allow, Deny
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct NetworkPolicyAmendment {
|
||||
pub host: String,
|
||||
pub action: NetworkPolicyRuleAction,
|
||||
}
|
||||
|
||||
impl NetworkPolicyAmendment {
|
||||
pub fn into_core(self) -> CoreNetworkPolicyAmendment {
|
||||
CoreNetworkPolicyAmendment {
|
||||
host: self.host,
|
||||
action: self.action.to_core(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CoreNetworkPolicyAmendment> for NetworkPolicyAmendment {
|
||||
fn from(value: CoreNetworkPolicyAmendment) -> Self {
|
||||
Self {
|
||||
host: value.host,
|
||||
action: NetworkPolicyRuleAction::from(value.action),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(tag = "type", rename_all = "camelCase")]
|
||||
#[ts(tag = "type")]
|
||||
@@ -1561,6 +1719,8 @@ pub struct ThreadStartParams {
|
||||
#[ts(optional = nullable)]
|
||||
pub config: Option<HashMap<String, JsonValue>>,
|
||||
#[ts(optional = nullable)]
|
||||
pub service_name: Option<String>,
|
||||
#[ts(optional = nullable)]
|
||||
pub base_instructions: Option<String>,
|
||||
#[ts(optional = nullable)]
|
||||
pub developer_instructions: Option<String>,
|
||||
@@ -1861,6 +2021,9 @@ pub struct ThreadListParams {
|
||||
/// matches this path are returned.
|
||||
#[ts(optional = nullable)]
|
||||
pub cwd: Option<String>,
|
||||
/// Optional substring filter for the extracted thread title.
|
||||
#[ts(optional = nullable)]
|
||||
pub search_term: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
@@ -3359,7 +3522,7 @@ pub struct ContextCompactedNotification {
|
||||
pub turn_id: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS, ExperimentalApi)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct CommandExecutionRequestApprovalParams {
|
||||
@@ -3380,7 +3543,7 @@ pub struct CommandExecutionRequestApprovalParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
pub reason: Option<String>,
|
||||
/// Optional context for managed-network approval prompts.
|
||||
/// Optional context for a managed-network approval prompt.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
pub network_approval_context: Option<NetworkApprovalContext>,
|
||||
@@ -3396,10 +3559,28 @@ pub struct CommandExecutionRequestApprovalParams {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
pub command_actions: Option<Vec<CommandAction>>,
|
||||
/// Optional additional permissions requested for this command.
|
||||
#[experimental("item/commandExecution/requestApproval.additionalPermissions")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
pub additional_permissions: Option<AdditionalPermissionProfile>,
|
||||
/// Optional proposed execpolicy amendment to allow similar commands without prompting.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
|
||||
/// Optional proposed network policy amendments (allow/deny host) for future requests.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
#[ts(optional = nullable)]
|
||||
pub proposed_network_policy_amendments: Option<Vec<NetworkPolicyAmendment>>,
|
||||
}
|
||||
|
||||
impl CommandExecutionRequestApprovalParams {
|
||||
pub fn strip_experimental_fields(&mut self) {
|
||||
// TODO: Avoid hardcoding individual experimental fields here.
|
||||
// We need a generic outbound compatibility design for stripping or
|
||||
// otherwise handling experimental server->client payloads.
|
||||
self.additional_permissions = None;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
@@ -3431,6 +3612,29 @@ pub struct FileChangeRequestApprovalResponse {
|
||||
pub decision: FileChangeApprovalDecision,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS, ExperimentalApi)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SkillRequestApprovalParams {
|
||||
pub item_id: String,
|
||||
pub skill_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub enum SkillApprovalDecision {
|
||||
Approve,
|
||||
Decline,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct SkillRequestApprovalResponse {
|
||||
pub decision: SkillApprovalDecision,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
|
||||
@@ -57,6 +57,9 @@ use codex_app_server_protocol::SendUserMessageParams;
|
||||
use codex_app_server_protocol::SendUserMessageResponse;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use codex_app_server_protocol::SkillApprovalDecision;
|
||||
use codex_app_server_protocol::SkillRequestApprovalParams;
|
||||
use codex_app_server_protocol::SkillRequestApprovalResponse;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadListParams;
|
||||
use codex_app_server_protocol::ThreadListResponse;
|
||||
@@ -168,6 +171,9 @@ enum CliCommand {
|
||||
},
|
||||
/// Send a user message through the app-server V2 thread/turn APIs.
|
||||
SendMessageV2 {
|
||||
/// Opt into experimental app-server methods and fields.
|
||||
#[arg(long)]
|
||||
experimental_api: bool,
|
||||
/// User message to send to Codex.
|
||||
user_message: String,
|
||||
},
|
||||
@@ -257,9 +263,18 @@ pub fn run() -> Result<()> {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
send_message(&endpoint, &config_overrides, user_message)
|
||||
}
|
||||
CliCommand::SendMessageV2 { user_message } => {
|
||||
CliCommand::SendMessageV2 {
|
||||
experimental_api,
|
||||
user_message,
|
||||
} => {
|
||||
let endpoint = resolve_endpoint(codex_bin, url)?;
|
||||
send_message_v2_endpoint(&endpoint, &config_overrides, user_message, &dynamic_tools)
|
||||
send_message_v2_endpoint(
|
||||
&endpoint,
|
||||
&config_overrides,
|
||||
user_message,
|
||||
experimental_api,
|
||||
&dynamic_tools,
|
||||
)
|
||||
}
|
||||
CliCommand::ResumeMessageV2 {
|
||||
thread_id,
|
||||
@@ -505,19 +520,31 @@ pub fn send_message_v2(
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
let endpoint = Endpoint::SpawnCodex(codex_bin.to_path_buf());
|
||||
send_message_v2_endpoint(&endpoint, config_overrides, user_message, dynamic_tools)
|
||||
send_message_v2_endpoint(
|
||||
&endpoint,
|
||||
config_overrides,
|
||||
user_message,
|
||||
true,
|
||||
dynamic_tools,
|
||||
)
|
||||
}
|
||||
|
||||
fn send_message_v2_endpoint(
|
||||
endpoint: &Endpoint,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
experimental_api: bool,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
if dynamic_tools.is_some() && !experimental_api {
|
||||
bail!("--dynamic-tools requires --experimental-api for send-message-v2");
|
||||
}
|
||||
|
||||
send_message_v2_with_policies(
|
||||
endpoint,
|
||||
config_overrides,
|
||||
user_message,
|
||||
experimental_api,
|
||||
None,
|
||||
None,
|
||||
dynamic_tools,
|
||||
@@ -687,6 +714,7 @@ fn trigger_cmd_approval(
|
||||
endpoint,
|
||||
config_overrides,
|
||||
message,
|
||||
true,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly {
|
||||
access: ReadOnlyAccess::FullAccess,
|
||||
@@ -708,6 +736,7 @@ fn trigger_patch_approval(
|
||||
endpoint,
|
||||
config_overrides,
|
||||
message,
|
||||
true,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ReadOnly {
|
||||
access: ReadOnlyAccess::FullAccess,
|
||||
@@ -726,6 +755,7 @@ fn no_trigger_cmd_approval(
|
||||
endpoint,
|
||||
config_overrides,
|
||||
prompt.to_string(),
|
||||
true,
|
||||
None,
|
||||
None,
|
||||
dynamic_tools,
|
||||
@@ -736,13 +766,14 @@ fn send_message_v2_with_policies(
|
||||
endpoint: &Endpoint,
|
||||
config_overrides: &[String],
|
||||
user_message: String,
|
||||
experimental_api: bool,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
dynamic_tools: &Option<Vec<DynamicToolSpec>>,
|
||||
) -> Result<()> {
|
||||
let mut client = CodexClient::connect(endpoint, config_overrides)?;
|
||||
|
||||
let initialize = client.initialize()?;
|
||||
let initialize = client.initialize_with_experimental_api(experimental_api)?;
|
||||
println!("< initialize response: {initialize:?}");
|
||||
|
||||
let thread_response = client.thread_start(ThreadStartParams {
|
||||
@@ -885,6 +916,7 @@ fn thread_list(endpoint: &Endpoint, config_overrides: &[String], limit: u32) ->
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: None,
|
||||
search_term: None,
|
||||
})?;
|
||||
println!("< thread/list response: {response:?}");
|
||||
|
||||
@@ -1029,6 +1061,13 @@ impl CodexClient {
|
||||
}
|
||||
|
||||
fn initialize(&mut self) -> Result<InitializeResponse> {
|
||||
self.initialize_with_experimental_api(true)
|
||||
}
|
||||
|
||||
fn initialize_with_experimental_api(
|
||||
&mut self,
|
||||
experimental_api: bool,
|
||||
) -> Result<InitializeResponse> {
|
||||
let request_id = self.request_id();
|
||||
let request = ClientRequest::Initialize {
|
||||
request_id: request_id.clone(),
|
||||
@@ -1039,7 +1078,7 @@ impl CodexClient {
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
},
|
||||
capabilities: Some(InitializeCapabilities {
|
||||
experimental_api: true,
|
||||
experimental_api,
|
||||
opt_out_notification_methods: Some(
|
||||
NOTIFICATIONS_TO_OPT_OUT
|
||||
.iter()
|
||||
@@ -1472,6 +1511,9 @@ impl CodexClient {
|
||||
ServerRequest::FileChangeRequestApproval { request_id, params } => {
|
||||
self.approve_file_change_request(request_id, params)?;
|
||||
}
|
||||
ServerRequest::SkillRequestApproval { request_id, params } => {
|
||||
self.approve_skill_request(request_id, params)?;
|
||||
}
|
||||
other => {
|
||||
bail!("received unsupported server request: {other:?}");
|
||||
}
|
||||
@@ -1495,7 +1537,9 @@ impl CodexClient {
|
||||
command,
|
||||
cwd,
|
||||
command_actions,
|
||||
additional_permissions,
|
||||
proposed_execpolicy_amendment,
|
||||
proposed_network_policy_amendments,
|
||||
} = params;
|
||||
|
||||
println!(
|
||||
@@ -1521,9 +1565,15 @@ impl CodexClient {
|
||||
{
|
||||
println!("< command actions: {command_actions:?}");
|
||||
}
|
||||
if let Some(additional_permissions) = additional_permissions.as_ref() {
|
||||
println!("< additional permissions: {additional_permissions:?}");
|
||||
}
|
||||
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
|
||||
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
|
||||
}
|
||||
if let Some(network_policy_amendments) = proposed_network_policy_amendments.as_ref() {
|
||||
println!("< proposed network policy amendments: {network_policy_amendments:?}");
|
||||
}
|
||||
|
||||
let decision = match self.command_approval_behavior {
|
||||
CommandApprovalBehavior::AlwaysAccept => CommandExecutionApprovalDecision::Accept,
|
||||
@@ -1543,6 +1593,22 @@ impl CodexClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn approve_skill_request(
|
||||
&mut self,
|
||||
request_id: RequestId,
|
||||
params: SkillRequestApprovalParams,
|
||||
) -> Result<()> {
|
||||
println!(
|
||||
"\n< skill approval requested for item {}, skill {}",
|
||||
params.item_id, params.skill_name
|
||||
);
|
||||
let response = SkillRequestApprovalResponse {
|
||||
decision: SkillApprovalDecision::Approve,
|
||||
};
|
||||
self.send_server_request_response(request_id, &response)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn approve_file_change_request(
|
||||
&mut self,
|
||||
request_id: RequestId,
|
||||
|
||||
@@ -65,6 +65,7 @@ axum = { workspace = true, default-features = false, features = [
|
||||
base64 = { workspace = true }
|
||||
codex-execpolicy = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
os_info = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -122,7 +122,7 @@ Example with notification opt-out:
|
||||
- `thread/start` — create a new thread; emits `thread/started` and auto-subscribes you to turn/item events for that thread.
|
||||
- `thread/resume` — reopen an existing thread by id so subsequent `turn/start` calls append to it.
|
||||
- `thread/fork` — fork an existing thread into a new thread id by copying the stored history; emits `thread/started` and auto-subscribes you to turn/item events for the new thread.
|
||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders`, `sourceKinds`, `archived`, and `cwd` filters. Each returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded.
|
||||
- `thread/list` — page through stored rollouts; supports cursor-based pagination and optional `modelProviders`, `sourceKinds`, `archived`, `cwd`, and `searchTerm` filters. Each returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded.
|
||||
- `thread/loaded/list` — list the thread ids currently loaded in memory.
|
||||
- `thread/read` — read a stored thread by id without resuming it; optionally include turns via `includeTurns`. The returned `thread` includes `status` (`ThreadStatus`), defaulting to `notLoaded` when the thread is not currently loaded.
|
||||
- `thread/status/changed` — notification emitted when a loaded thread’s status changes (`threadId` + new `status`).
|
||||
@@ -153,6 +153,8 @@ Example with notification opt-out:
|
||||
- `feedback/upload` — submit a feedback report (classification + optional reason/logs, conversation_id, and optional `extraLogFiles` attachments array); returns the tracking thread id.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
- `config/read` — fetch the effective config on disk after resolving config layering.
|
||||
- `externalAgentConfig/detect` — detect migratable external-agent artifacts with `includeHome` and optional `cwds`; each detected item includes `cwd` (`null` for home).
|
||||
- `externalAgentConfig/import` — apply selected external-agent migration items by passing explicit `migrationItems` with `cwd` (`null` for home).
|
||||
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
|
||||
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
|
||||
- `configRequirements/read` — fetch loaded requirements constraints from `requirements.toml` and/or MDM (or `null` if none are configured), including allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`), `enforceResidency`, and `network` constraints.
|
||||
@@ -170,6 +172,7 @@ Start a fresh thread when you need a new Codex conversation.
|
||||
"approvalPolicy": "never",
|
||||
"sandbox": "workspaceWrite",
|
||||
"personality": "friendly",
|
||||
"serviceName": "my_app_server_client", // optional metrics tag (`service_name`)
|
||||
// Experimental: requires opt-in
|
||||
"dynamicTools": [
|
||||
{
|
||||
@@ -229,6 +232,7 @@ Experimental API: `thread/start`, `thread/resume`, and `thread/fork` accept `per
|
||||
- `sourceKinds` — restrict results to specific sources; omit or pass `[]` for interactive sessions only (`cli`, `vscode`).
|
||||
- `archived` — when `true`, list archived threads only. When `false` or `null`, list non-archived threads (default).
|
||||
- `cwd` — restrict results to threads whose session cwd exactly matches this path.
|
||||
- `searchTerm` — restrict results to threads whose extracted title contains this substring (case-sensitive).
|
||||
- Responses include `agentNickname` and `agentRole` for AgentControl-spawned thread sub-agents when available.
|
||||
|
||||
Example:
|
||||
@@ -660,15 +664,15 @@ When an upstream HTTP status is available (for example, from the Responses API o
|
||||
Certain actions (shell commands or modifying files) may require explicit user approval depending on the user's config. When `turn/start` is used, the app-server drives an approval flow by sending a server-initiated JSON-RPC request to the client. The client must respond to tell Codex whether to proceed. UIs should present these requests inline with the active turn so users can review the proposed command or diff before choosing.
|
||||
|
||||
- Requests include `threadId` and `turnId`—use them to scope UI state to the active conversation.
|
||||
- Respond with a single `{ "decision": "accept" | "decline" }` payload (plus optional `acceptSettings` on command executions). The server resumes or declines the work and ends the item with `item/completed`.
|
||||
- Respond with a single `{ "decision": ... }` payload. Command approvals support `accept`, `acceptForSession`, `acceptWithExecpolicyAmendment`, `applyNetworkPolicyAmendment`, `decline`, or `cancel`. The server resumes or declines the work and ends the item with `item/completed`.
|
||||
|
||||
### Command execution approvals
|
||||
|
||||
Order of messages:
|
||||
|
||||
1. `item/started` — shows the pending `commandExecution` item with `command`, `cwd`, and other fields so you can render the proposed action.
|
||||
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `approvalId` (for subcommand callbacks), and `reason`. For normal command approvals, it also includes `command`, `cwd`, and `commandActions` for friendly display. For network-only approvals, those command fields may be omitted and `networkApprovalContext` is provided instead.
|
||||
3. Client response — `{ "decision": "accept", "acceptSettings": { "forSession": false } }` or `{ "decision": "decline" }`.
|
||||
2. `item/commandExecution/requestApproval` (request) — carries the same `itemId`, `threadId`, `turnId`, optionally `approvalId` (for subcommand callbacks), and `reason`. For normal command approvals, it also includes `command`, `cwd`, and `commandActions` for friendly display. When `initialize.params.capabilities.experimentalApi = true`, it may also include experimental `additionalPermissions` describing requested per-command sandbox access. For network-only approvals, those command fields may be omitted and `networkApprovalContext` is provided instead. Optional persistence hints may also be included via `proposedExecpolicyAmendment` and `proposedNetworkPolicyAmendments`.
|
||||
3. Client response — for example `{ "decision": "accept" }`, `{ "decision": "acceptForSession" }`, `{ "decision": { "acceptWithExecpolicyAmendment": { "execpolicy_amendment": [...] } } }`, `{ "decision": { "applyNetworkPolicyAmendment": { "network_policy_amendment": { "host": "example.com", "action": "allow" } } } }`, `{ "decision": "decline" }`, or `{ "decision": "cancel" }`.
|
||||
4. `item/completed` — final `commandExecution` item with `status: "completed" | "failed" | "declined"` and execution output. Render this as the authoritative result.
|
||||
|
||||
### File change approvals
|
||||
@@ -1072,6 +1076,8 @@ At runtime, clients must send `initialize` with `capabilities.experimentalApi =
|
||||
|
||||
3. In `app-server-protocol/src/protocol/common.rs`, keep the method stable and use `inspect_params: true` when only some fields are experimental (like `thread/start`). If the entire method is experimental, annotate the method variant with `#[experimental("method/name")]`.
|
||||
|
||||
For server-initiated request payloads, annotate the field the same way so schema generation treats it as experimental, and make sure app-server omits that field when the client did not opt into `experimentalApi`.
|
||||
|
||||
4. Regenerate protocol fixtures:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -11,6 +11,7 @@ use crate::thread_state::TurnSummary;
|
||||
use crate::thread_status::ThreadWatchActiveGuard;
|
||||
use crate::thread_status::ThreadWatchManager;
|
||||
use codex_app_server_protocol::AccountRateLimitsUpdatedNotification;
|
||||
use codex_app_server_protocol::AdditionalPermissionProfile as V2AdditionalPermissionProfile;
|
||||
use codex_app_server_protocol::AgentMessageDeltaNotification;
|
||||
use codex_app_server_protocol::ApplyPatchApprovalParams;
|
||||
use codex_app_server_protocol::ApplyPatchApprovalResponse;
|
||||
@@ -45,6 +46,8 @@ use codex_app_server_protocol::McpToolCallResult;
|
||||
use codex_app_server_protocol::McpToolCallStatus;
|
||||
use codex_app_server_protocol::ModelReroutedNotification;
|
||||
use codex_app_server_protocol::NetworkApprovalContext as V2NetworkApprovalContext;
|
||||
use codex_app_server_protocol::NetworkPolicyAmendment as V2NetworkPolicyAmendment;
|
||||
use codex_app_server_protocol::NetworkPolicyRuleAction as V2NetworkPolicyRuleAction;
|
||||
use codex_app_server_protocol::PatchApplyStatus;
|
||||
use codex_app_server_protocol::PlanDeltaNotification;
|
||||
use codex_app_server_protocol::RawResponseItemCompletedNotification;
|
||||
@@ -53,6 +56,9 @@ use codex_app_server_protocol::ReasoningSummaryTextDeltaNotification;
|
||||
use codex_app_server_protocol::ReasoningTextDeltaNotification;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequestPayload;
|
||||
use codex_app_server_protocol::SkillApprovalDecision as V2SkillApprovalDecision;
|
||||
use codex_app_server_protocol::SkillRequestApprovalParams;
|
||||
use codex_app_server_protocol::SkillRequestApprovalResponse;
|
||||
use codex_app_server_protocol::TerminalInteractionNotification;
|
||||
use codex_app_server_protocol::ThreadItem;
|
||||
use codex_app_server_protocol::ThreadNameUpdatedNotification;
|
||||
@@ -97,6 +103,7 @@ use codex_protocol::protocol::TokenCountEvent;
|
||||
use codex_protocol::protocol::TurnDiffEvent;
|
||||
use codex_protocol::request_user_input::RequestUserInputAnswer as CoreRequestUserInputAnswer;
|
||||
use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestUserInputResponse;
|
||||
use codex_protocol::skill_approval::SkillApprovalResponse as CoreSkillApprovalResponse;
|
||||
use codex_shell_command::parse_command::shlex_join;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
@@ -263,6 +270,8 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
reason,
|
||||
network_approval_context,
|
||||
proposed_execpolicy_amendment,
|
||||
proposed_network_policy_amendments,
|
||||
additional_permissions,
|
||||
parsed_cmd,
|
||||
..
|
||||
} = ev;
|
||||
@@ -325,6 +334,15 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
};
|
||||
let proposed_execpolicy_amendment_v2 =
|
||||
proposed_execpolicy_amendment.map(V2ExecPolicyAmendment::from);
|
||||
let proposed_network_policy_amendments_v2 = proposed_network_policy_amendments
|
||||
.map(|amendments| {
|
||||
amendments
|
||||
.into_iter()
|
||||
.map(V2NetworkPolicyAmendment::from)
|
||||
.collect()
|
||||
});
|
||||
let additional_permissions =
|
||||
additional_permissions.map(V2AdditionalPermissionProfile::from);
|
||||
|
||||
let params = CommandExecutionRequestApprovalParams {
|
||||
thread_id: conversation_id.to_string(),
|
||||
@@ -336,7 +354,9 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
command,
|
||||
cwd,
|
||||
command_actions,
|
||||
additional_permissions,
|
||||
proposed_execpolicy_amendment: proposed_execpolicy_amendment_v2,
|
||||
proposed_network_policy_amendments: proposed_network_policy_amendments_v2,
|
||||
};
|
||||
let rx = outgoing
|
||||
.send_request(ServerRequestPayload::CommandExecutionRequestApproval(
|
||||
@@ -423,6 +443,37 @@ pub(crate) async fn apply_bespoke_event_handling(
|
||||
}
|
||||
}
|
||||
}
|
||||
EventMsg::SkillRequestApproval(request) => {
|
||||
if matches!(api_version, ApiVersion::V2) {
|
||||
let item_id = request.item_id;
|
||||
let skill_name = request.skill_name;
|
||||
let params = SkillRequestApprovalParams {
|
||||
item_id: item_id.clone(),
|
||||
skill_name,
|
||||
};
|
||||
let rx = outgoing
|
||||
.send_request(ServerRequestPayload::SkillRequestApproval(params))
|
||||
.await;
|
||||
tokio::spawn(async move {
|
||||
let approved = match rx.await {
|
||||
Ok(Ok(value)) => {
|
||||
serde_json::from_value::<SkillRequestApprovalResponse>(value)
|
||||
.map(|response| {
|
||||
matches!(response.decision, V2SkillApprovalDecision::Approve)
|
||||
})
|
||||
.unwrap_or(false)
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
let _ = conversation
|
||||
.submit(Op::SkillApproval {
|
||||
id: item_id,
|
||||
response: CoreSkillApprovalResponse { approved },
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
}
|
||||
EventMsg::DynamicToolCallRequest(request) => {
|
||||
if matches!(api_version, ApiVersion::V2) {
|
||||
let call_id = request.call_id;
|
||||
@@ -1875,6 +1926,20 @@ async fn on_command_execution_request_approval_response(
|
||||
},
|
||||
None,
|
||||
),
|
||||
CommandExecutionApprovalDecision::ApplyNetworkPolicyAmendment {
|
||||
network_policy_amendment,
|
||||
} => {
|
||||
let completion_status = match network_policy_amendment.action {
|
||||
V2NetworkPolicyRuleAction::Allow => None,
|
||||
V2NetworkPolicyRuleAction::Deny => Some(CommandExecutionStatus::Declined),
|
||||
};
|
||||
(
|
||||
ReviewDecision::NetworkPolicyAmendment {
|
||||
network_policy_amendment: network_policy_amendment.into_core(),
|
||||
},
|
||||
completion_status,
|
||||
)
|
||||
}
|
||||
CommandExecutionApprovalDecision::Decline => (
|
||||
ReviewDecision::Denied,
|
||||
Some(CommandExecutionStatus::Declined),
|
||||
|
||||
@@ -168,6 +168,7 @@ use codex_app_server_protocol::WindowsSandboxSetupMode;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartParams;
|
||||
use codex_app_server_protocol::WindowsSandboxSetupStartResponse;
|
||||
use codex_app_server_protocol::build_turns_from_rollout_items;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_backend_client::Client as BackendClient;
|
||||
use codex_chatgpt::connectors;
|
||||
use codex_cloud_requirements::cloud_requirements_loader;
|
||||
@@ -189,6 +190,7 @@ use codex_core::auth::login_with_chatgpt_auth_tokens;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::ConfigService;
|
||||
use codex_core::config::NetworkProxyAuditMetadata;
|
||||
use codex_core::config::edit::ConfigEdit;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
use codex_core::config::types::McpServerTransportConfig;
|
||||
@@ -269,6 +271,7 @@ use std::time::SystemTime;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::watch;
|
||||
use toml::Value as TomlValue;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
@@ -288,6 +291,7 @@ struct ThreadListFilters {
|
||||
source_kinds: Option<Vec<ThreadSourceKind>>,
|
||||
archived: bool,
|
||||
cwd: Option<PathBuf>,
|
||||
search_term: Option<String>,
|
||||
}
|
||||
|
||||
// Duration before a ChatGPT login attempt is abandoned.
|
||||
@@ -337,7 +341,7 @@ pub(crate) struct CodexMessageProcessor {
|
||||
auth_manager: Arc<AuthManager>,
|
||||
thread_manager: Arc<ThreadManager>,
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
config: Arc<Config>,
|
||||
single_client_mode: bool,
|
||||
cli_overrides: Vec<(String, TomlValue)>,
|
||||
@@ -361,7 +365,7 @@ pub(crate) struct CodexMessageProcessorArgs {
|
||||
pub(crate) auth_manager: Arc<AuthManager>,
|
||||
pub(crate) thread_manager: Arc<ThreadManager>,
|
||||
pub(crate) outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub(crate) arg0_paths: Arg0DispatchPaths,
|
||||
pub(crate) config: Arc<Config>,
|
||||
pub(crate) cli_overrides: Vec<(String, TomlValue)>,
|
||||
pub(crate) cloud_requirements: Arc<RwLock<CloudRequirementsLoader>>,
|
||||
@@ -398,7 +402,7 @@ impl CodexMessageProcessor {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing,
|
||||
codex_linux_sandbox_exe,
|
||||
arg0_paths,
|
||||
config,
|
||||
cli_overrides,
|
||||
cloud_requirements,
|
||||
@@ -409,7 +413,7 @@ impl CodexMessageProcessor {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing: outgoing.clone(),
|
||||
codex_linux_sandbox_exe,
|
||||
arg0_paths,
|
||||
config,
|
||||
single_client_mode,
|
||||
cli_overrides,
|
||||
@@ -425,7 +429,7 @@ impl CodexMessageProcessor {
|
||||
|
||||
async fn load_latest_config(&self) -> Result<Config, JSONRPCErrorError> {
|
||||
let cloud_requirements = self.current_cloud_requirements();
|
||||
codex_core::config::ConfigBuilder::default()
|
||||
let mut config = codex_core::config::ConfigBuilder::default()
|
||||
.cli_overrides(self.cli_overrides.clone())
|
||||
.cloud_requirements(cloud_requirements)
|
||||
.build()
|
||||
@@ -434,7 +438,10 @@ impl CodexMessageProcessor {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to reload config: {err}"),
|
||||
data: None,
|
||||
})
|
||||
})?;
|
||||
config.codex_linux_sandbox_exe = self.arg0_paths.codex_linux_sandbox_exe.clone();
|
||||
config.main_execve_wrapper_exe = self.arg0_paths.main_execve_wrapper_exe.clone();
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
fn current_cloud_requirements(&self) -> CloudRequirementsLoader {
|
||||
@@ -816,6 +823,10 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::ConfigRequirementsRead { .. } => {
|
||||
warn!("ConfigRequirementsRead request reached CodexMessageProcessor unexpectedly");
|
||||
}
|
||||
ClientRequest::ExternalAgentConfigDetect { .. }
|
||||
| ClientRequest::ExternalAgentConfigImport { .. } => {
|
||||
warn!("ExternalAgentConfig request reached CodexMessageProcessor unexpectedly");
|
||||
}
|
||||
ClientRequest::GetAccountRateLimits {
|
||||
request_id,
|
||||
params: _,
|
||||
@@ -1741,6 +1752,7 @@ impl CodexMessageProcessor {
|
||||
None,
|
||||
None,
|
||||
managed_network_requirements_enabled,
|
||||
NetworkProxyAuditMetadata::default(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -1758,8 +1770,10 @@ impl CodexMessageProcessor {
|
||||
None => None,
|
||||
};
|
||||
let windows_sandbox_level = WindowsSandboxLevel::from_config(&self.config);
|
||||
let command = params.command;
|
||||
let exec_params = ExecParams {
|
||||
command: params.command,
|
||||
original_command: command.join(" "),
|
||||
command,
|
||||
cwd,
|
||||
expiration: timeout_ms.into(),
|
||||
env,
|
||||
@@ -1789,7 +1803,7 @@ impl CodexMessageProcessor {
|
||||
None => self.config.permissions.sandbox_policy.get().clone(),
|
||||
};
|
||||
|
||||
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
|
||||
let codex_linux_sandbox_exe = self.arg0_paths.codex_linux_sandbox_exe.clone();
|
||||
let outgoing = self.outgoing.clone();
|
||||
let request_for_task = request;
|
||||
let sandbox_cwd = self.config.cwd.clone();
|
||||
@@ -1854,7 +1868,8 @@ impl CodexMessageProcessor {
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
model_provider,
|
||||
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
@@ -1948,6 +1963,7 @@ impl CodexMessageProcessor {
|
||||
approval_policy,
|
||||
sandbox,
|
||||
config,
|
||||
service_name,
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
dynamic_tools,
|
||||
@@ -2015,7 +2031,12 @@ impl CodexMessageProcessor {
|
||||
|
||||
match self
|
||||
.thread_manager
|
||||
.start_thread_with_tools(config, core_dynamic_tools, persist_extended_history)
|
||||
.start_thread_with_tools_and_service_name(
|
||||
config,
|
||||
core_dynamic_tools,
|
||||
persist_extended_history,
|
||||
service_name,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(new_conv) => {
|
||||
@@ -2108,7 +2129,8 @@ impl CodexMessageProcessor {
|
||||
approval_policy: approval_policy
|
||||
.map(codex_app_server_protocol::AskForApproval::to_core),
|
||||
sandbox_mode: sandbox.map(SandboxMode::to_core),
|
||||
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
personality,
|
||||
@@ -2522,6 +2544,7 @@ impl CodexMessageProcessor {
|
||||
source_kinds,
|
||||
archived,
|
||||
cwd,
|
||||
search_term,
|
||||
} = params;
|
||||
|
||||
let requested_page_size = limit
|
||||
@@ -2542,6 +2565,7 @@ impl CodexMessageProcessor {
|
||||
source_kinds,
|
||||
archived: archived.unwrap_or(false),
|
||||
cwd: cwd.map(PathBuf::from),
|
||||
search_term,
|
||||
},
|
||||
)
|
||||
.await
|
||||
@@ -2794,6 +2818,10 @@ impl CodexMessageProcessor {
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(crate) fn subscribe_running_assistant_turn_count(&self) -> watch::Receiver<usize> {
|
||||
self.thread_watch_manager.subscribe_running_turn_count()
|
||||
}
|
||||
|
||||
/// Best-effort: ensure initialized connections are subscribed to this thread.
|
||||
pub(crate) async fn try_attach_thread_listener(
|
||||
&mut self,
|
||||
@@ -3599,6 +3627,7 @@ impl CodexMessageProcessor {
|
||||
source_kinds: None,
|
||||
archived: false,
|
||||
cwd: None,
|
||||
search_term: None,
|
||||
},
|
||||
)
|
||||
.await
|
||||
@@ -3625,6 +3654,7 @@ impl CodexMessageProcessor {
|
||||
source_kinds,
|
||||
archived,
|
||||
cwd,
|
||||
search_term,
|
||||
} = filters;
|
||||
let mut cursor_obj: Option<RolloutCursor> = match cursor.as_ref() {
|
||||
Some(cursor_str) => {
|
||||
@@ -3667,6 +3697,7 @@ impl CodexMessageProcessor {
|
||||
allowed_sources,
|
||||
model_provider_filter.as_deref(),
|
||||
fallback_provider.as_str(),
|
||||
search_term.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
@@ -3683,6 +3714,7 @@ impl CodexMessageProcessor {
|
||||
allowed_sources,
|
||||
model_provider_filter.as_deref(),
|
||||
fallback_provider.as_str(),
|
||||
search_term.as_deref(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| JSONRPCErrorError {
|
||||
@@ -4318,7 +4350,8 @@ impl CodexMessageProcessor {
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
model_provider,
|
||||
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
@@ -4329,7 +4362,8 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
None => (
|
||||
ConfigOverrides {
|
||||
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
None,
|
||||
@@ -4513,7 +4547,8 @@ impl CodexMessageProcessor {
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
model_provider,
|
||||
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
base_instructions,
|
||||
developer_instructions,
|
||||
compact_prompt,
|
||||
@@ -4525,7 +4560,8 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
None => (
|
||||
ConfigOverrides {
|
||||
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
|
||||
codex_linux_sandbox_exe: self.arg0_paths.codex_linux_sandbox_exe.clone(),
|
||||
main_execve_wrapper_exe: self.arg0_paths.main_execve_wrapper_exe.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
None,
|
||||
@@ -5876,7 +5912,7 @@ impl CodexMessageProcessor {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = &mut cancel_rx => {
|
||||
// User has unsubscribed, so exit this task.
|
||||
// Listener was superseded or the thread is being torn down.
|
||||
break;
|
||||
}
|
||||
event = conversation.next_event() => {
|
||||
@@ -5897,6 +5933,11 @@ impl CodexMessageProcessor {
|
||||
EventMsg::TurnComplete(_) => "task_complete",
|
||||
_ => &event.msg.to_string(),
|
||||
};
|
||||
let request_event_name = format!("codex/event/{event_formatted}");
|
||||
tracing::trace!(
|
||||
conversation_id = %conversation_id,
|
||||
"app-server event: {request_event_name}"
|
||||
);
|
||||
let mut params = match serde_json::to_value(event.clone()) {
|
||||
Ok(serde_json::Value::Object(map)) => map,
|
||||
Ok(_) => {
|
||||
@@ -5931,7 +5972,7 @@ impl CodexMessageProcessor {
|
||||
.send_notification_to_connections(
|
||||
&subscribed_connection_ids,
|
||||
OutgoingNotification {
|
||||
method: format!("codex/event/{event_formatted}"),
|
||||
method: request_event_name,
|
||||
params: Some(params.into()),
|
||||
},
|
||||
)
|
||||
@@ -6296,6 +6337,14 @@ async fn handle_pending_thread_resume_request(
|
||||
let state = thread_state.lock().await;
|
||||
state.active_turn_snapshot()
|
||||
};
|
||||
tracing::debug!(
|
||||
thread_id = %conversation_id,
|
||||
request_id = ?pending.request_id,
|
||||
active_turn_present = active_turn.is_some(),
|
||||
active_turn_id = ?active_turn.as_ref().map(|turn| turn.id.as_str()),
|
||||
active_turn_status = ?active_turn.as_ref().map(|turn| &turn.status),
|
||||
"composing running thread resume response"
|
||||
);
|
||||
let mut has_in_progress_turn = active_turn
|
||||
.as_ref()
|
||||
.is_some_and(|turn| matches!(turn.status, TurnStatus::InProgress));
|
||||
@@ -6506,7 +6555,7 @@ fn skills_to_info(
|
||||
skills
|
||||
.iter()
|
||||
.map(|skill| {
|
||||
let enabled = !disabled_paths.contains(&skill.path);
|
||||
let enabled = !disabled_paths.contains(&skill.path_to_skills_md);
|
||||
codex_app_server_protocol::SkillMetadata {
|
||||
name: skill.name.clone(),
|
||||
description: skill.description.clone(),
|
||||
@@ -6537,7 +6586,7 @@ fn skills_to_info(
|
||||
.collect(),
|
||||
}
|
||||
}),
|
||||
path: skill.path.clone(),
|
||||
path: skill.path_to_skills_md.clone(),
|
||||
scope: skill.scope.into(),
|
||||
enabled,
|
||||
}
|
||||
@@ -7344,8 +7393,8 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn removing_one_listener_does_not_cancel_other_subscriptions_for_same_thread()
|
||||
-> Result<()> {
|
||||
async fn removing_listeners_retains_thread_listener_when_last_subscriber_leaves() -> Result<()>
|
||||
{
|
||||
let mut manager = ThreadStateManager::new();
|
||||
let thread_id = ThreadId::from_string("ad7f0408-99b8-4f6e-a46f-bd0eec433370")?;
|
||||
let listener_a = Uuid::new_v4();
|
||||
@@ -7372,7 +7421,13 @@ mod tests {
|
||||
.is_err()
|
||||
);
|
||||
assert_eq!(manager.remove_listener(listener_b).await, Some(thread_id));
|
||||
assert_eq!(cancel_rx.await, Ok(()));
|
||||
assert!(
|
||||
tokio::time::timeout(Duration::from_millis(20), &mut cancel_rx)
|
||||
.await
|
||||
.is_err()
|
||||
);
|
||||
let state = manager.thread_state(thread_id);
|
||||
assert!(state.lock().await.subscribed_connection_ids().is_empty());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -7424,28 +7479,79 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn removing_connection_clears_subscription_and_listener_when_last_subscriber()
|
||||
async fn removing_connection_retains_listener_and_active_turn_when_last_subscriber_disconnects()
|
||||
-> Result<()> {
|
||||
let mut manager = ThreadStateManager::new();
|
||||
let thread_id = ThreadId::from_string("ad7f0408-99b8-4f6e-a46f-bd0eec433370")?;
|
||||
let listener = Uuid::new_v4();
|
||||
let connection = ConnectionId(1);
|
||||
let (cancel_tx, cancel_rx) = oneshot::channel();
|
||||
let (cancel_tx, mut cancel_rx) = oneshot::channel();
|
||||
|
||||
manager
|
||||
.set_listener(listener, thread_id, connection, false)
|
||||
.await;
|
||||
{
|
||||
let state = manager.thread_state(thread_id);
|
||||
state.lock().await.cancel_tx = Some(cancel_tx);
|
||||
let mut state = state.lock().await;
|
||||
state.cancel_tx = Some(cancel_tx);
|
||||
state.track_current_turn_event(&EventMsg::TurnStarted(
|
||||
codex_protocol::protocol::TurnStartedEvent {
|
||||
turn_id: "turn-1".to_string(),
|
||||
model_context_window: None,
|
||||
collaboration_mode_kind: Default::default(),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
manager.remove_connection(connection).await;
|
||||
assert_eq!(cancel_rx.await, Ok(()));
|
||||
assert!(
|
||||
tokio::time::timeout(Duration::from_millis(20), &mut cancel_rx)
|
||||
.await
|
||||
.is_err()
|
||||
);
|
||||
assert_eq!(manager.remove_listener(listener).await, None);
|
||||
|
||||
let state = manager.thread_state(thread_id);
|
||||
assert!(state.lock().await.subscribed_connection_ids().is_empty());
|
||||
let state = state.lock().await;
|
||||
assert!(state.subscribed_connection_ids().is_empty());
|
||||
assert!(state.cancel_tx.is_some());
|
||||
let active_turn = state.active_turn_snapshot().expect("active turn snapshot");
|
||||
assert_eq!(active_turn.id, "turn-1");
|
||||
assert_eq!(active_turn.status, TurnStatus::InProgress);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn removing_thread_state_clears_listener_and_active_turn_history() -> Result<()> {
|
||||
let mut manager = ThreadStateManager::new();
|
||||
let thread_id = ThreadId::from_string("ad7f0408-99b8-4f6e-a46f-bd0eec433370")?;
|
||||
let connection = ConnectionId(1);
|
||||
let (cancel_tx, cancel_rx) = oneshot::channel();
|
||||
|
||||
manager
|
||||
.ensure_connection_subscribed(thread_id, connection, false)
|
||||
.await;
|
||||
{
|
||||
let state = manager.thread_state(thread_id);
|
||||
let mut state = state.lock().await;
|
||||
state.cancel_tx = Some(cancel_tx);
|
||||
state.track_current_turn_event(&EventMsg::TurnStarted(
|
||||
codex_protocol::protocol::TurnStartedEvent {
|
||||
turn_id: "turn-1".to_string(),
|
||||
model_context_window: None,
|
||||
collaboration_mode_kind: Default::default(),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
manager.remove_thread_state(thread_id).await;
|
||||
assert_eq!(cancel_rx.await, Ok(()));
|
||||
|
||||
let state = manager.thread_state(thread_id);
|
||||
let state = state.lock().await;
|
||||
assert!(state.subscribed_connection_ids().is_empty());
|
||||
assert!(state.cancel_tx.is_none());
|
||||
assert!(state.active_turn_snapshot().is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
106
codex-rs/app-server/src/external_agent_config_api.rs
Normal file
106
codex-rs/app-server/src/external_agent_config_api.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use crate::error_code::INTERNAL_ERROR_CODE;
|
||||
use codex_app_server_protocol::ExternalAgentConfigDetectParams;
|
||||
use codex_app_server_protocol::ExternalAgentConfigDetectResponse;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportParams;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportResponse;
|
||||
use codex_app_server_protocol::ExternalAgentConfigMigrationItem;
|
||||
use codex_app_server_protocol::ExternalAgentConfigMigrationItemType;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_core::external_agent_config::ExternalAgentConfigDetectOptions;
|
||||
use codex_core::external_agent_config::ExternalAgentConfigMigrationItem as CoreMigrationItem;
|
||||
use codex_core::external_agent_config::ExternalAgentConfigMigrationItemType as CoreMigrationItemType;
|
||||
use codex_core::external_agent_config::ExternalAgentConfigService;
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ExternalAgentConfigApi {
|
||||
migration_service: ExternalAgentConfigService,
|
||||
}
|
||||
|
||||
impl ExternalAgentConfigApi {
|
||||
pub(crate) fn new(codex_home: PathBuf) -> Self {
|
||||
Self {
|
||||
migration_service: ExternalAgentConfigService::new(codex_home),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn detect(
|
||||
&self,
|
||||
params: ExternalAgentConfigDetectParams,
|
||||
) -> Result<ExternalAgentConfigDetectResponse, JSONRPCErrorError> {
|
||||
let items = self
|
||||
.migration_service
|
||||
.detect(ExternalAgentConfigDetectOptions {
|
||||
include_home: params.include_home,
|
||||
cwds: params.cwds,
|
||||
})
|
||||
.map_err(map_io_error)?;
|
||||
|
||||
Ok(ExternalAgentConfigDetectResponse {
|
||||
items: items
|
||||
.into_iter()
|
||||
.map(|migration_item| ExternalAgentConfigMigrationItem {
|
||||
item_type: match migration_item.item_type {
|
||||
CoreMigrationItemType::Config => {
|
||||
ExternalAgentConfigMigrationItemType::Config
|
||||
}
|
||||
CoreMigrationItemType::Skills => {
|
||||
ExternalAgentConfigMigrationItemType::Skills
|
||||
}
|
||||
CoreMigrationItemType::AgentsMd => {
|
||||
ExternalAgentConfigMigrationItemType::AgentsMd
|
||||
}
|
||||
CoreMigrationItemType::McpServerConfig => {
|
||||
ExternalAgentConfigMigrationItemType::McpServerConfig
|
||||
}
|
||||
},
|
||||
description: migration_item.description,
|
||||
cwd: migration_item.cwd,
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn import(
|
||||
&self,
|
||||
params: ExternalAgentConfigImportParams,
|
||||
) -> Result<ExternalAgentConfigImportResponse, JSONRPCErrorError> {
|
||||
self.migration_service
|
||||
.import(
|
||||
params
|
||||
.migration_items
|
||||
.into_iter()
|
||||
.map(|migration_item| CoreMigrationItem {
|
||||
item_type: match migration_item.item_type {
|
||||
ExternalAgentConfigMigrationItemType::Config => {
|
||||
CoreMigrationItemType::Config
|
||||
}
|
||||
ExternalAgentConfigMigrationItemType::Skills => {
|
||||
CoreMigrationItemType::Skills
|
||||
}
|
||||
ExternalAgentConfigMigrationItemType::AgentsMd => {
|
||||
CoreMigrationItemType::AgentsMd
|
||||
}
|
||||
ExternalAgentConfigMigrationItemType::McpServerConfig => {
|
||||
CoreMigrationItemType::McpServerConfig
|
||||
}
|
||||
},
|
||||
description: migration_item.description,
|
||||
cwd: migration_item.cwd,
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
.map_err(map_io_error)?;
|
||||
|
||||
Ok(ExternalAgentConfigImportResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
fn map_io_error(err: io::Error) -> JSONRPCErrorError {
|
||||
JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: err.to_string(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||||
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_cloud_requirements::cloud_requirements_loader;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::config::Config;
|
||||
@@ -12,7 +13,6 @@ use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Result as IoResult;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -57,6 +57,7 @@ mod codex_message_processor;
|
||||
mod config_api;
|
||||
mod dynamic_tools;
|
||||
mod error_code;
|
||||
mod external_agent_config_api;
|
||||
mod filters;
|
||||
mod fuzzy_file_search;
|
||||
mod message_processor;
|
||||
@@ -94,10 +95,77 @@ enum OutboundControlEvent {
|
||||
writer: mpsc::Sender<crate::outgoing_message::OutgoingMessage>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
initialized: Arc<AtomicBool>,
|
||||
experimental_api_enabled: Arc<AtomicBool>,
|
||||
opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
},
|
||||
/// Remove state for a closed/disconnected connection.
|
||||
Closed { connection_id: ConnectionId },
|
||||
/// Disconnect all connection-oriented clients during graceful restart.
|
||||
DisconnectAll,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ShutdownState {
|
||||
requested: bool,
|
||||
forced: bool,
|
||||
last_logged_running_turn_count: Option<usize>,
|
||||
}
|
||||
|
||||
enum ShutdownAction {
|
||||
Noop,
|
||||
Finish,
|
||||
}
|
||||
|
||||
impl ShutdownState {
|
||||
fn requested(&self) -> bool {
|
||||
self.requested
|
||||
}
|
||||
|
||||
fn forced(&self) -> bool {
|
||||
self.forced
|
||||
}
|
||||
|
||||
fn on_ctrl_c(&mut self, connection_count: usize, running_turn_count: usize) {
|
||||
if self.requested {
|
||||
self.forced = true;
|
||||
return;
|
||||
}
|
||||
|
||||
self.requested = true;
|
||||
self.last_logged_running_turn_count = None;
|
||||
info!(
|
||||
"received Ctrl-C; entering graceful restart drain (connections={}, runningAssistantTurns={}, requests still accepted until no assistant turns are running)",
|
||||
connection_count, running_turn_count,
|
||||
);
|
||||
}
|
||||
|
||||
fn update(&mut self, running_turn_count: usize, connection_count: usize) -> ShutdownAction {
|
||||
if !self.requested {
|
||||
return ShutdownAction::Noop;
|
||||
}
|
||||
|
||||
if self.forced || running_turn_count == 0 {
|
||||
if self.forced {
|
||||
info!(
|
||||
"received second Ctrl-C; forcing restart with {running_turn_count} running assistant turn(s) and {connection_count} connection(s)"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"Ctrl-C restart: no assistant turns running; stopping acceptor and disconnecting {connection_count} connection(s)"
|
||||
);
|
||||
}
|
||||
return ShutdownAction::Finish;
|
||||
}
|
||||
|
||||
if self.last_logged_running_turn_count != Some(running_turn_count) {
|
||||
info!(
|
||||
"Ctrl-C restart: waiting for {running_turn_count} running assistant turn(s) to finish"
|
||||
);
|
||||
self.last_logged_running_turn_count = Some(running_turn_count);
|
||||
}
|
||||
|
||||
ShutdownAction::Noop
|
||||
}
|
||||
}
|
||||
|
||||
fn config_warning_from_error(
|
||||
@@ -225,13 +293,13 @@ fn log_format_from_env() -> LogFormat {
|
||||
}
|
||||
|
||||
pub async fn run_main(
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
loader_overrides: LoaderOverrides,
|
||||
default_analytics_enabled: bool,
|
||||
) -> IoResult<()> {
|
||||
run_main_with_transport(
|
||||
codex_linux_sandbox_exe,
|
||||
arg0_paths,
|
||||
cli_config_overrides,
|
||||
loader_overrides,
|
||||
default_analytics_enabled,
|
||||
@@ -241,7 +309,7 @@ pub async fn run_main(
|
||||
}
|
||||
|
||||
pub async fn run_main_with_transport(
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
arg0_paths: Arg0DispatchPaths,
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
loader_overrides: LoaderOverrides,
|
||||
default_analytics_enabled: bool,
|
||||
@@ -253,19 +321,37 @@ pub async fn run_main_with_transport(
|
||||
let (outbound_control_tx, mut outbound_control_rx) =
|
||||
mpsc::channel::<OutboundControlEvent>(CHANNEL_CAPACITY);
|
||||
|
||||
enum TransportRuntime {
|
||||
Stdio,
|
||||
WebSocket {
|
||||
accept_handle: JoinHandle<()>,
|
||||
shutdown_token: CancellationToken,
|
||||
},
|
||||
}
|
||||
|
||||
let mut stdio_handles = Vec::<JoinHandle<()>>::new();
|
||||
let mut websocket_accept_handle = None;
|
||||
match transport {
|
||||
let transport_runtime = match transport {
|
||||
AppServerTransport::Stdio => {
|
||||
start_stdio_connection(transport_event_tx.clone(), &mut stdio_handles).await?;
|
||||
TransportRuntime::Stdio
|
||||
}
|
||||
AppServerTransport::WebSocket { bind_address } => {
|
||||
websocket_accept_handle =
|
||||
Some(start_websocket_acceptor(bind_address, transport_event_tx.clone()).await?);
|
||||
let shutdown_token = CancellationToken::new();
|
||||
let accept_handle = start_websocket_acceptor(
|
||||
bind_address,
|
||||
transport_event_tx.clone(),
|
||||
shutdown_token.clone(),
|
||||
)
|
||||
.await?;
|
||||
TransportRuntime::WebSocket {
|
||||
accept_handle,
|
||||
shutdown_token,
|
||||
}
|
||||
}
|
||||
}
|
||||
let single_client_mode = matches!(transport, AppServerTransport::Stdio);
|
||||
};
|
||||
let single_client_mode = matches!(&transport_runtime, TransportRuntime::Stdio);
|
||||
let shutdown_when_no_connections = single_client_mode;
|
||||
let graceful_ctrl_c_restart_enabled = !single_client_mode;
|
||||
|
||||
// Parse CLI overrides once and derive the base Config eagerly so later
|
||||
// components do not need to work with raw TOML values.
|
||||
@@ -419,6 +505,7 @@ pub async fn run_main_with_transport(
|
||||
writer,
|
||||
disconnect_sender,
|
||||
initialized,
|
||||
experimental_api_enabled,
|
||||
opted_out_notification_methods,
|
||||
} => {
|
||||
outbound_connections.insert(
|
||||
@@ -426,6 +513,7 @@ pub async fn run_main_with_transport(
|
||||
OutboundConnectionState::new(
|
||||
writer,
|
||||
initialized,
|
||||
experimental_api_enabled,
|
||||
opted_out_notification_methods,
|
||||
disconnect_sender,
|
||||
),
|
||||
@@ -434,6 +522,16 @@ pub async fn run_main_with_transport(
|
||||
OutboundControlEvent::Closed { connection_id } => {
|
||||
outbound_connections.remove(&connection_id);
|
||||
}
|
||||
OutboundControlEvent::DisconnectAll => {
|
||||
info!(
|
||||
"disconnecting {} outbound websocket connection(s) for graceful restart",
|
||||
outbound_connections.len()
|
||||
);
|
||||
for connection_state in outbound_connections.values() {
|
||||
connection_state.request_disconnect();
|
||||
}
|
||||
outbound_connections.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
envelope = outgoing_rx.recv() => {
|
||||
@@ -454,7 +552,7 @@ pub async fn run_main_with_transport(
|
||||
let loader_overrides = loader_overrides_for_config_api;
|
||||
let mut processor = MessageProcessor::new(MessageProcessorArgs {
|
||||
outgoing: outgoing_message_sender,
|
||||
codex_linux_sandbox_exe,
|
||||
arg0_paths,
|
||||
config: Arc::new(config),
|
||||
single_client_mode,
|
||||
cli_overrides,
|
||||
@@ -464,11 +562,46 @@ pub async fn run_main_with_transport(
|
||||
config_warnings,
|
||||
});
|
||||
let mut thread_created_rx = processor.thread_created_receiver();
|
||||
let mut running_turn_count_rx = processor.subscribe_running_assistant_turn_count();
|
||||
let mut connections = HashMap::<ConnectionId, ConnectionState>::new();
|
||||
let websocket_accept_shutdown = match &transport_runtime {
|
||||
TransportRuntime::WebSocket { shutdown_token, .. } => Some(shutdown_token.clone()),
|
||||
TransportRuntime::Stdio => None,
|
||||
};
|
||||
async move {
|
||||
let mut listen_for_threads = true;
|
||||
let mut shutdown_state = ShutdownState::default();
|
||||
loop {
|
||||
let running_turn_count = {
|
||||
let running_turn_count = running_turn_count_rx.borrow();
|
||||
*running_turn_count
|
||||
};
|
||||
if matches!(
|
||||
shutdown_state.update(running_turn_count, connections.len()),
|
||||
ShutdownAction::Finish
|
||||
) {
|
||||
if let Some(shutdown_token) = &websocket_accept_shutdown {
|
||||
shutdown_token.cancel();
|
||||
}
|
||||
let _ = outbound_control_tx
|
||||
.send(OutboundControlEvent::DisconnectAll)
|
||||
.await;
|
||||
break;
|
||||
}
|
||||
|
||||
tokio::select! {
|
||||
ctrl_c_result = tokio::signal::ctrl_c(), if graceful_ctrl_c_restart_enabled && !shutdown_state.forced() => {
|
||||
if let Err(err) = ctrl_c_result {
|
||||
warn!("failed to listen for Ctrl-C during graceful restart drain: {err}");
|
||||
}
|
||||
let running_turn_count = *running_turn_count_rx.borrow();
|
||||
shutdown_state.on_ctrl_c(connections.len(), running_turn_count);
|
||||
}
|
||||
changed = running_turn_count_rx.changed(), if graceful_ctrl_c_restart_enabled && shutdown_state.requested() => {
|
||||
if changed.is_err() {
|
||||
warn!("running-turn watcher closed during graceful restart drain");
|
||||
}
|
||||
}
|
||||
event = transport_event_rx.recv() => {
|
||||
let Some(event) = event else {
|
||||
break;
|
||||
@@ -480,6 +613,8 @@ pub async fn run_main_with_transport(
|
||||
disconnect_sender,
|
||||
} => {
|
||||
let outbound_initialized = Arc::new(AtomicBool::new(false));
|
||||
let outbound_experimental_api_enabled =
|
||||
Arc::new(AtomicBool::new(false));
|
||||
let outbound_opted_out_notification_methods =
|
||||
Arc::new(RwLock::new(HashSet::new()));
|
||||
if outbound_control_tx
|
||||
@@ -488,6 +623,9 @@ pub async fn run_main_with_transport(
|
||||
writer,
|
||||
disconnect_sender,
|
||||
initialized: Arc::clone(&outbound_initialized),
|
||||
experimental_api_enabled: Arc::clone(
|
||||
&outbound_experimental_api_enabled,
|
||||
),
|
||||
opted_out_notification_methods: Arc::clone(
|
||||
&outbound_opted_out_notification_methods,
|
||||
),
|
||||
@@ -501,6 +639,7 @@ pub async fn run_main_with_transport(
|
||||
connection_id,
|
||||
ConnectionState::new(
|
||||
outbound_initialized,
|
||||
outbound_experimental_api_enabled,
|
||||
outbound_opted_out_notification_methods,
|
||||
),
|
||||
);
|
||||
@@ -550,6 +689,12 @@ pub async fn run_main_with_transport(
|
||||
"failed to update outbound opted-out notifications"
|
||||
);
|
||||
}
|
||||
connection_state
|
||||
.outbound_experimental_api_enabled
|
||||
.store(
|
||||
connection_state.session.experimental_api_enabled,
|
||||
std::sync::atomic::Ordering::Release,
|
||||
);
|
||||
if !was_initialized && connection_state.session.initialized {
|
||||
processor.send_initialize_notifications().await;
|
||||
}
|
||||
@@ -619,8 +764,13 @@ pub async fn run_main_with_transport(
|
||||
let _ = processor_handle.await;
|
||||
let _ = outbound_handle.await;
|
||||
|
||||
if let Some(handle) = websocket_accept_handle {
|
||||
handle.abort();
|
||||
if let TransportRuntime::WebSocket {
|
||||
accept_handle,
|
||||
shutdown_token,
|
||||
} = transport_runtime
|
||||
{
|
||||
shutdown_token.cancel();
|
||||
let _ = accept_handle.await;
|
||||
}
|
||||
|
||||
for handle in stdio_handles {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use clap::Parser;
|
||||
use codex_app_server::AppServerTransport;
|
||||
use codex_app_server::run_main_with_transport;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_arg0::arg0_dispatch_or_else;
|
||||
use codex_core::config_loader::LoaderOverrides;
|
||||
use codex_utils_cli::CliConfigOverrides;
|
||||
@@ -23,10 +24,7 @@ struct AppServerArgs {
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
if codex_core::maybe_run_zsh_exec_wrapper_mode()? {
|
||||
return Ok(());
|
||||
}
|
||||
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
|
||||
arg0_dispatch_or_else(|arg0_paths: Arg0DispatchPaths| async move {
|
||||
let args = AppServerArgs::parse();
|
||||
let managed_config_path = managed_config_path_from_debug_env();
|
||||
let loader_overrides = LoaderOverrides {
|
||||
@@ -36,7 +34,7 @@ fn main() -> anyhow::Result<()> {
|
||||
let transport = args.listen;
|
||||
|
||||
run_main_with_transport(
|
||||
codex_linux_sandbox_exe,
|
||||
arg0_paths,
|
||||
CliConfigOverrides::default(),
|
||||
loader_overrides,
|
||||
false,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -9,6 +8,7 @@ use crate::codex_message_processor::CodexMessageProcessor;
|
||||
use crate::codex_message_processor::CodexMessageProcessorArgs;
|
||||
use crate::config_api::ConfigApi;
|
||||
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
|
||||
use crate::external_agent_config_api::ExternalAgentConfigApi;
|
||||
use crate::outgoing_message::ConnectionId;
|
||||
use crate::outgoing_message::ConnectionRequestId;
|
||||
use crate::outgoing_message::OutgoingMessageSender;
|
||||
@@ -23,6 +23,8 @@ use codex_app_server_protocol::ConfigReadParams;
|
||||
use codex_app_server_protocol::ConfigValueWriteParams;
|
||||
use codex_app_server_protocol::ConfigWarningNotification;
|
||||
use codex_app_server_protocol::ExperimentalApi;
|
||||
use codex_app_server_protocol::ExternalAgentConfigDetectParams;
|
||||
use codex_app_server_protocol::ExternalAgentConfigImportParams;
|
||||
use codex_app_server_protocol::InitializeResponse;
|
||||
use codex_app_server_protocol::JSONRPCError;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
@@ -32,6 +34,7 @@ use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequestPayload;
|
||||
use codex_app_server_protocol::experimental_required_message;
|
||||
use codex_arg0::Arg0DispatchPaths;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::auth::ExternalAuthRefreshContext;
|
||||
@@ -49,7 +52,9 @@ use codex_core::default_client::set_default_originator;
|
||||
use codex_feedback::CodexFeedback;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use futures::FutureExt;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::watch;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
use toml::Value as TomlValue;
|
||||
@@ -124,6 +129,7 @@ pub(crate) struct MessageProcessor {
|
||||
outgoing: Arc<OutgoingMessageSender>,
|
||||
codex_message_processor: CodexMessageProcessor,
|
||||
config_api: ConfigApi,
|
||||
external_agent_config_api: ExternalAgentConfigApi,
|
||||
config: Arc<Config>,
|
||||
config_warnings: Arc<Vec<ConfigWarningNotification>>,
|
||||
}
|
||||
@@ -131,13 +137,13 @@ pub(crate) struct MessageProcessor {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub(crate) struct ConnectionSessionState {
|
||||
pub(crate) initialized: bool,
|
||||
experimental_api_enabled: bool,
|
||||
pub(crate) experimental_api_enabled: bool,
|
||||
pub(crate) opted_out_notification_methods: HashSet<String>,
|
||||
}
|
||||
|
||||
pub(crate) struct MessageProcessorArgs {
|
||||
pub(crate) outgoing: Arc<OutgoingMessageSender>,
|
||||
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub(crate) arg0_paths: Arg0DispatchPaths,
|
||||
pub(crate) config: Arc<Config>,
|
||||
pub(crate) single_client_mode: bool,
|
||||
pub(crate) cli_overrides: Vec<(String, TomlValue)>,
|
||||
@@ -153,7 +159,7 @@ impl MessageProcessor {
|
||||
pub(crate) fn new(args: MessageProcessorArgs) -> Self {
|
||||
let MessageProcessorArgs {
|
||||
outgoing,
|
||||
codex_linux_sandbox_exe,
|
||||
arg0_paths,
|
||||
config,
|
||||
single_client_mode,
|
||||
cli_overrides,
|
||||
@@ -182,7 +188,7 @@ impl MessageProcessor {
|
||||
auth_manager,
|
||||
thread_manager,
|
||||
outgoing: outgoing.clone(),
|
||||
codex_linux_sandbox_exe,
|
||||
arg0_paths,
|
||||
config: Arc::clone(&config),
|
||||
cli_overrides: cli_overrides.clone(),
|
||||
cloud_requirements: cloud_requirements.clone(),
|
||||
@@ -195,11 +201,13 @@ impl MessageProcessor {
|
||||
loader_overrides,
|
||||
cloud_requirements,
|
||||
);
|
||||
let external_agent_config_api = ExternalAgentConfigApi::new(config.codex_home.clone());
|
||||
|
||||
Self {
|
||||
outgoing,
|
||||
codex_message_processor,
|
||||
config_api,
|
||||
external_agent_config_api,
|
||||
config,
|
||||
config_warnings: Arc::new(config_warnings),
|
||||
}
|
||||
@@ -212,6 +220,12 @@ impl MessageProcessor {
|
||||
session: &mut ConnectionSessionState,
|
||||
outbound_initialized: &AtomicBool,
|
||||
) {
|
||||
let request_method = request.method.as_str();
|
||||
tracing::trace!(
|
||||
?connection_id,
|
||||
request_id = ?request.id,
|
||||
"app-server request: {request_method}"
|
||||
);
|
||||
let request_id = ConnectionRequestId {
|
||||
connection_id,
|
||||
request_id: request.id.clone(),
|
||||
@@ -355,6 +369,26 @@ impl MessageProcessor {
|
||||
)
|
||||
.await;
|
||||
}
|
||||
ClientRequest::ExternalAgentConfigDetect { request_id, params } => {
|
||||
self.handle_external_agent_config_detect(
|
||||
ConnectionRequestId {
|
||||
connection_id,
|
||||
request_id,
|
||||
},
|
||||
params,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
ClientRequest::ExternalAgentConfigImport { request_id, params } => {
|
||||
self.handle_external_agent_config_import(
|
||||
ConnectionRequestId {
|
||||
connection_id,
|
||||
request_id,
|
||||
},
|
||||
params,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
ClientRequest::ConfigValueWrite { request_id, params } => {
|
||||
self.handle_config_value_write(
|
||||
ConnectionRequestId {
|
||||
@@ -386,8 +420,12 @@ impl MessageProcessor {
|
||||
.await;
|
||||
}
|
||||
other => {
|
||||
// Box the delegated future so this wrapper's async state machine does not
|
||||
// inline the full `CodexMessageProcessor::process_request` future, which
|
||||
// can otherwise push worker-thread stack usage over the edge.
|
||||
self.codex_message_processor
|
||||
.process_request(connection_id, other)
|
||||
.boxed()
|
||||
.await;
|
||||
}
|
||||
}
|
||||
@@ -427,6 +465,11 @@ impl MessageProcessor {
|
||||
.await;
|
||||
}
|
||||
|
||||
pub(crate) fn subscribe_running_assistant_turn_count(&self) -> watch::Receiver<usize> {
|
||||
self.codex_message_processor
|
||||
.subscribe_running_assistant_turn_count()
|
||||
}
|
||||
|
||||
/// Handle a standalone JSON-RPC response originating from the peer.
|
||||
pub(crate) async fn process_response(&mut self, response: JSONRPCResponse) {
|
||||
tracing::info!("<- response: {:?}", response);
|
||||
@@ -475,4 +518,26 @@ impl MessageProcessor {
|
||||
Err(error) => self.outgoing.send_error(request_id, error).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_external_agent_config_detect(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ExternalAgentConfigDetectParams,
|
||||
) {
|
||||
match self.external_agent_config_api.detect(params).await {
|
||||
Ok(response) => self.outgoing.send_response(request_id, response).await,
|
||||
Err(error) => self.outgoing.send_error(request_id, error).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_external_agent_config_import(
|
||||
&self,
|
||||
request_id: ConnectionRequestId,
|
||||
params: ExternalAgentConfigImportParams,
|
||||
) {
|
||||
match self.external_agent_config_api.import(params).await {
|
||||
Ok(response) => self.outgoing.send_response(request_id, response).await,
|
||||
Err(error) => self.outgoing.send_error(request_id, error).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,6 +275,10 @@ impl OutgoingMessageSender {
|
||||
connection_ids: &[ConnectionId],
|
||||
notification: ServerNotification,
|
||||
) {
|
||||
tracing::trace!(
|
||||
targeted_connections = connection_ids.len(),
|
||||
"app-server event: {notification}"
|
||||
);
|
||||
let outgoing_message = OutgoingMessage::AppServerNotification(notification);
|
||||
if connection_ids.is_empty() {
|
||||
if let Err(err) = self
|
||||
|
||||
@@ -175,7 +175,13 @@ impl ThreadStateManager {
|
||||
thread_state.remove_connection(subscription_state.connection_id);
|
||||
}
|
||||
if thread_state.subscribed_connection_ids().is_empty() {
|
||||
thread_state.clear_listener();
|
||||
tracing::debug!(
|
||||
thread_id = %thread_id,
|
||||
subscription_id = %subscription_id,
|
||||
connection_id = ?subscription_state.connection_id,
|
||||
listener_generation = thread_state.listener_generation,
|
||||
"retaining thread listener after last subscription removed"
|
||||
);
|
||||
}
|
||||
}
|
||||
Some(thread_id)
|
||||
@@ -183,7 +189,15 @@ impl ThreadStateManager {
|
||||
|
||||
pub(crate) async fn remove_thread_state(&mut self, thread_id: ThreadId) {
|
||||
if let Some(thread_state) = self.thread_states.remove(&thread_id) {
|
||||
thread_state.lock().await.clear_listener();
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
tracing::debug!(
|
||||
thread_id = %thread_id,
|
||||
listener_generation = thread_state.listener_generation,
|
||||
had_listener = thread_state.cancel_tx.is_some(),
|
||||
had_active_turn = thread_state.active_turn_snapshot().is_some(),
|
||||
"clearing thread listener during thread-state teardown"
|
||||
);
|
||||
thread_state.clear_listener();
|
||||
}
|
||||
self.subscription_state_by_id
|
||||
.retain(|_, state| state.thread_id != thread_id);
|
||||
@@ -254,7 +268,11 @@ impl ThreadStateManager {
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
thread_state.remove_connection(connection_id);
|
||||
if thread_state.subscribed_connection_ids().is_empty() {
|
||||
thread_state.clear_listener();
|
||||
tracing::debug!(
|
||||
connection_id = ?connection_id,
|
||||
listener_generation = thread_state.listener_generation,
|
||||
"retaining thread listener after connection disconnect left zero subscribers"
|
||||
);
|
||||
}
|
||||
}
|
||||
return;
|
||||
@@ -265,7 +283,12 @@ impl ThreadStateManager {
|
||||
let mut thread_state = thread_state.lock().await;
|
||||
thread_state.remove_connection(connection_id);
|
||||
if thread_state.subscribed_connection_ids().is_empty() {
|
||||
thread_state.clear_listener();
|
||||
tracing::debug!(
|
||||
thread_id = %thread_id,
|
||||
connection_id = ?connection_id,
|
||||
listener_generation = thread_state.listener_generation,
|
||||
"retaining thread listener after connection disconnect left zero subscribers"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,11 +15,13 @@ use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
#[cfg(test)]
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::watch;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ThreadWatchManager {
|
||||
state: Arc<Mutex<ThreadWatchState>>,
|
||||
outgoing: Option<Arc<OutgoingMessageSender>>,
|
||||
running_turn_count_tx: watch::Sender<usize>,
|
||||
}
|
||||
|
||||
pub(crate) struct ThreadWatchActiveGuard {
|
||||
@@ -71,16 +73,20 @@ impl Default for ThreadWatchManager {
|
||||
|
||||
impl ThreadWatchManager {
|
||||
pub(crate) fn new() -> Self {
|
||||
let (running_turn_count_tx, _running_turn_count_rx) = watch::channel(0);
|
||||
Self {
|
||||
state: Arc::new(Mutex::new(ThreadWatchState::default())),
|
||||
outgoing: None,
|
||||
running_turn_count_tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new_with_outgoing(outgoing: Arc<OutgoingMessageSender>) -> Self {
|
||||
let (running_turn_count_tx, _running_turn_count_rx) = watch::channel(0);
|
||||
Self {
|
||||
state: Arc::new(Mutex::new(ThreadWatchState::default())),
|
||||
outgoing: Some(outgoing),
|
||||
running_turn_count_tx,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,6 +119,21 @@ impl ThreadWatchManager {
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) async fn running_turn_count(&self) -> usize {
|
||||
self.state
|
||||
.lock()
|
||||
.await
|
||||
.runtime_by_thread_id
|
||||
.values()
|
||||
.filter(|runtime| runtime.running)
|
||||
.count()
|
||||
}
|
||||
|
||||
pub(crate) fn subscribe_running_turn_count(&self) -> watch::Receiver<usize> {
|
||||
self.running_turn_count_tx.subscribe()
|
||||
}
|
||||
|
||||
pub(crate) async fn note_turn_started(&self, thread_id: &str) {
|
||||
self.update_runtime_for_thread(thread_id, |runtime| {
|
||||
runtime.is_loaded = true;
|
||||
@@ -193,10 +214,17 @@ impl ThreadWatchManager {
|
||||
where
|
||||
F: FnOnce(&mut ThreadWatchState) -> Option<ThreadStatusChangedNotification>,
|
||||
{
|
||||
let notification = {
|
||||
let (notification, running_turn_count) = {
|
||||
let mut state = self.state.lock().await;
|
||||
mutate(&mut state)
|
||||
let notification = mutate(&mut state);
|
||||
let running_turn_count = state
|
||||
.runtime_by_thread_id
|
||||
.values()
|
||||
.filter(|runtime| runtime.running)
|
||||
.count();
|
||||
(notification, running_turn_count)
|
||||
};
|
||||
let _ = self.running_turn_count_tx.send(running_turn_count);
|
||||
|
||||
if let Some(notification) = notification
|
||||
&& let Some(outgoing) = &self.outgoing
|
||||
@@ -588,6 +616,32 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn has_running_turns_tracks_runtime_running_flag_only() {
|
||||
let manager = ThreadWatchManager::new();
|
||||
manager
|
||||
.upsert_thread(test_thread(
|
||||
INTERACTIVE_THREAD_ID,
|
||||
codex_app_server_protocol::SessionSource::Cli,
|
||||
))
|
||||
.await;
|
||||
|
||||
assert_eq!(manager.running_turn_count().await, 0);
|
||||
|
||||
let _permission_guard = manager
|
||||
.note_permission_requested(INTERACTIVE_THREAD_ID)
|
||||
.await;
|
||||
assert_eq!(manager.running_turn_count().await, 0);
|
||||
|
||||
manager.note_turn_started(INTERACTIVE_THREAD_ID).await;
|
||||
assert_eq!(manager.running_turn_count().await, 1);
|
||||
|
||||
manager
|
||||
.note_turn_completed(INTERACTIVE_THREAD_ID, false)
|
||||
.await;
|
||||
assert_eq!(manager.running_turn_count().await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn status_change_emits_notification() {
|
||||
let (outgoing_tx, mut outgoing_rx) = mpsc::channel(8);
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::outgoing_message::OutgoingError;
|
||||
use crate::outgoing_message::OutgoingMessage;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::JSONRPCMessage;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use owo_colors::OwoColorize;
|
||||
@@ -30,8 +31,9 @@ use tokio::net::TcpListener;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_tungstenite::accept_async;
|
||||
use tokio_tungstenite::accept_async_with_config;
|
||||
use tokio_tungstenite::tungstenite::Message as WebSocketMessage;
|
||||
use tokio_tungstenite::tungstenite::protocol::WebSocketConfig;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
@@ -67,12 +69,6 @@ fn print_websocket_startup_banner(addr: SocketAddr) {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stderr)]
|
||||
fn print_websocket_connection(peer_addr: SocketAddr) {
|
||||
let connected_label = colorize("websocket client connected from", Style::new().dimmed());
|
||||
eprintln!("{connected_label} {peer_addr}");
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum AppServerTransport {
|
||||
Stdio,
|
||||
@@ -149,6 +145,7 @@ pub(crate) enum TransportEvent {
|
||||
|
||||
pub(crate) struct ConnectionState {
|
||||
pub(crate) outbound_initialized: Arc<AtomicBool>,
|
||||
pub(crate) outbound_experimental_api_enabled: Arc<AtomicBool>,
|
||||
pub(crate) outbound_opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
pub(crate) session: ConnectionSessionState,
|
||||
}
|
||||
@@ -156,10 +153,12 @@ pub(crate) struct ConnectionState {
|
||||
impl ConnectionState {
|
||||
pub(crate) fn new(
|
||||
outbound_initialized: Arc<AtomicBool>,
|
||||
outbound_experimental_api_enabled: Arc<AtomicBool>,
|
||||
outbound_opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
outbound_initialized,
|
||||
outbound_experimental_api_enabled,
|
||||
outbound_opted_out_notification_methods,
|
||||
session: ConnectionSessionState::default(),
|
||||
}
|
||||
@@ -168,6 +167,7 @@ impl ConnectionState {
|
||||
|
||||
pub(crate) struct OutboundConnectionState {
|
||||
pub(crate) initialized: Arc<AtomicBool>,
|
||||
pub(crate) experimental_api_enabled: Arc<AtomicBool>,
|
||||
pub(crate) opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
pub(crate) writer: mpsc::Sender<OutgoingMessage>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
@@ -177,11 +177,13 @@ impl OutboundConnectionState {
|
||||
pub(crate) fn new(
|
||||
writer: mpsc::Sender<OutgoingMessage>,
|
||||
initialized: Arc<AtomicBool>,
|
||||
experimental_api_enabled: Arc<AtomicBool>,
|
||||
opted_out_notification_methods: Arc<RwLock<HashSet<String>>>,
|
||||
disconnect_sender: Option<CancellationToken>,
|
||||
) -> Self {
|
||||
Self {
|
||||
initialized,
|
||||
experimental_api_enabled,
|
||||
opted_out_notification_methods,
|
||||
writer,
|
||||
disconnect_sender,
|
||||
@@ -192,7 +194,7 @@ impl OutboundConnectionState {
|
||||
self.disconnect_sender.is_some()
|
||||
}
|
||||
|
||||
fn request_disconnect(&self) {
|
||||
pub(crate) fn request_disconnect(&self) {
|
||||
if let Some(disconnect_sender) = &self.disconnect_sender {
|
||||
disconnect_sender.cancel();
|
||||
}
|
||||
@@ -270,6 +272,7 @@ pub(crate) async fn start_stdio_connection(
|
||||
pub(crate) async fn start_websocket_acceptor(
|
||||
bind_address: SocketAddr,
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
shutdown_token: CancellationToken,
|
||||
) -> IoResult<JoinHandle<()>> {
|
||||
let listener = TcpListener::bind(bind_address).await?;
|
||||
let local_addr = listener.local_addr()?;
|
||||
@@ -279,23 +282,31 @@ pub(crate) async fn start_websocket_acceptor(
|
||||
let connection_counter = Arc::new(AtomicU64::new(1));
|
||||
Ok(tokio::spawn(async move {
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((stream, peer_addr)) => {
|
||||
print_websocket_connection(peer_addr);
|
||||
let connection_id =
|
||||
ConnectionId(connection_counter.fetch_add(1, Ordering::Relaxed));
|
||||
let transport_event_tx_for_connection = transport_event_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
run_websocket_connection(
|
||||
connection_id,
|
||||
stream,
|
||||
transport_event_tx_for_connection,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
tokio::select! {
|
||||
_ = shutdown_token.cancelled() => {
|
||||
info!("websocket acceptor shutting down");
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("failed to accept websocket connection: {err}");
|
||||
accept_result = listener.accept() => {
|
||||
match accept_result {
|
||||
Ok((stream, peer_addr)) => {
|
||||
info!(%peer_addr, "websocket client connected");
|
||||
let connection_id =
|
||||
ConnectionId(connection_counter.fetch_add(1, Ordering::Relaxed));
|
||||
let transport_event_tx_for_connection = transport_event_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
run_websocket_connection(
|
||||
connection_id,
|
||||
stream,
|
||||
transport_event_tx_for_connection,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
Err(err) => {
|
||||
error!("failed to accept websocket connection: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -307,13 +318,14 @@ async fn run_websocket_connection(
|
||||
stream: TcpStream,
|
||||
transport_event_tx: mpsc::Sender<TransportEvent>,
|
||||
) {
|
||||
let websocket_stream = match accept_async(stream).await {
|
||||
Ok(stream) => stream,
|
||||
Err(err) => {
|
||||
warn!("failed to complete websocket handshake: {err}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let websocket_stream =
|
||||
match accept_async_with_config(stream, Some(WebSocketConfig::default())).await {
|
||||
Ok(stream) => stream,
|
||||
Err(err) => {
|
||||
warn!("failed to complete websocket handshake: {err}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let (writer_tx, writer_rx) = mpsc::channel::<OutgoingMessage>(CHANNEL_CAPACITY);
|
||||
let writer_tx_for_reader = writer_tx.clone();
|
||||
@@ -572,6 +584,7 @@ async fn send_message_to_connection(
|
||||
warn!("dropping message for disconnected connection: {connection_id:?}");
|
||||
return false;
|
||||
};
|
||||
let message = filter_outgoing_message_for_connection(connection_state, message);
|
||||
if should_skip_notification_for_connection(connection_state, &message) {
|
||||
return false;
|
||||
}
|
||||
@@ -597,6 +610,30 @@ async fn send_message_to_connection(
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_outgoing_message_for_connection(
|
||||
connection_state: &OutboundConnectionState,
|
||||
message: OutgoingMessage,
|
||||
) -> OutgoingMessage {
|
||||
let experimental_api_enabled = connection_state
|
||||
.experimental_api_enabled
|
||||
.load(Ordering::Acquire);
|
||||
match message {
|
||||
OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id,
|
||||
mut params,
|
||||
}) => {
|
||||
if !experimental_api_enabled {
|
||||
params.strip_experimental_fields();
|
||||
}
|
||||
OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id,
|
||||
params,
|
||||
})
|
||||
}
|
||||
_ => message,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn route_outgoing_envelope(
|
||||
connections: &mut HashMap<ConnectionId, OutboundConnectionState>,
|
||||
envelope: OutgoingEnvelope,
|
||||
@@ -636,6 +673,7 @@ mod tests {
|
||||
use crate::error_code::OVERLOADED_ERROR_CODE;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::path::PathBuf;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
@@ -875,6 +913,7 @@ mod tests {
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
initialized,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
opted_out_notification_methods,
|
||||
None,
|
||||
),
|
||||
@@ -900,6 +939,138 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn command_execution_request_approval_strips_experimental_fields_without_capability() {
|
||||
let connection_id = ConnectionId(8);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id: codex_app_server_protocol::RequestId::Integer(1),
|
||||
params: codex_app_server_protocol::CommandExecutionRequestApprovalParams {
|
||||
thread_id: "thr_123".to_string(),
|
||||
turn_id: "turn_123".to_string(),
|
||||
item_id: "call_123".to_string(),
|
||||
approval_id: None,
|
||||
reason: Some("Need extra read access".to_string()),
|
||||
network_approval_context: None,
|
||||
command: Some("cat file".to_string()),
|
||||
cwd: Some(PathBuf::from("/tmp")),
|
||||
command_actions: None,
|
||||
additional_permissions: Some(
|
||||
codex_app_server_protocol::AdditionalPermissionProfile {
|
||||
network: None,
|
||||
file_system: Some(
|
||||
codex_app_server_protocol::AdditionalFileSystemPermissions {
|
||||
read: Some(vec![PathBuf::from("/tmp/allowed")]),
|
||||
write: None,
|
||||
},
|
||||
),
|
||||
macos: None,
|
||||
},
|
||||
),
|
||||
proposed_execpolicy_amendment: None,
|
||||
proposed_network_policy_amendments: None,
|
||||
},
|
||||
}),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let message = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should be delivered to the connection");
|
||||
let json = serde_json::to_value(message).expect("request should serialize");
|
||||
assert_eq!(json["params"].get("additionalPermissions"), None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn command_execution_request_approval_keeps_experimental_fields_with_capability() {
|
||||
let connection_id = ConnectionId(9);
|
||||
let (writer_tx, mut writer_rx) = mpsc::channel(1);
|
||||
|
||||
let mut connections = HashMap::new();
|
||||
connections.insert(
|
||||
connection_id,
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
None,
|
||||
),
|
||||
);
|
||||
|
||||
route_outgoing_envelope(
|
||||
&mut connections,
|
||||
OutgoingEnvelope::ToConnection {
|
||||
connection_id,
|
||||
message: OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval {
|
||||
request_id: codex_app_server_protocol::RequestId::Integer(1),
|
||||
params: codex_app_server_protocol::CommandExecutionRequestApprovalParams {
|
||||
thread_id: "thr_123".to_string(),
|
||||
turn_id: "turn_123".to_string(),
|
||||
item_id: "call_123".to_string(),
|
||||
approval_id: None,
|
||||
reason: Some("Need extra read access".to_string()),
|
||||
network_approval_context: None,
|
||||
command: Some("cat file".to_string()),
|
||||
cwd: Some(PathBuf::from("/tmp")),
|
||||
command_actions: None,
|
||||
additional_permissions: Some(
|
||||
codex_app_server_protocol::AdditionalPermissionProfile {
|
||||
network: None,
|
||||
file_system: Some(
|
||||
codex_app_server_protocol::AdditionalFileSystemPermissions {
|
||||
read: Some(vec![PathBuf::from("/tmp/allowed")]),
|
||||
write: None,
|
||||
},
|
||||
),
|
||||
macos: None,
|
||||
},
|
||||
),
|
||||
proposed_execpolicy_amendment: None,
|
||||
proposed_network_policy_amendments: None,
|
||||
},
|
||||
}),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let message = writer_rx
|
||||
.recv()
|
||||
.await
|
||||
.expect("request should be delivered to the connection");
|
||||
let json = serde_json::to_value(message).expect("request should serialize");
|
||||
assert_eq!(
|
||||
json["params"]["additionalPermissions"],
|
||||
json!({
|
||||
"network": null,
|
||||
"fileSystem": {
|
||||
"read": ["/tmp/allowed"],
|
||||
"write": null,
|
||||
},
|
||||
"macos": null,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn broadcast_does_not_block_on_slow_connection() {
|
||||
let fast_connection_id = ConnectionId(1);
|
||||
@@ -916,6 +1087,7 @@ mod tests {
|
||||
OutboundConnectionState::new(
|
||||
fast_writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
Some(fast_disconnect_token.clone()),
|
||||
),
|
||||
@@ -925,6 +1097,7 @@ mod tests {
|
||||
OutboundConnectionState::new(
|
||||
slow_writer_tx.clone(),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
Some(slow_disconnect_token.clone()),
|
||||
),
|
||||
@@ -1001,6 +1174,7 @@ mod tests {
|
||||
OutboundConnectionState::new(
|
||||
writer_tx,
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(AtomicBool::new(true)),
|
||||
Arc::new(RwLock::new(HashSet::new())),
|
||||
None,
|
||||
),
|
||||
|
||||
@@ -105,6 +105,7 @@ impl McpProcess {
|
||||
cmd.stdin(Stdio::piped());
|
||||
cmd.stdout(Stdio::piped());
|
||||
cmd.stderr(Stdio::piped());
|
||||
cmd.current_dir(codex_home);
|
||||
cmd.env("CODEX_HOME", codex_home);
|
||||
cmd.env("RUST_LOG", "debug");
|
||||
cmd.env_remove(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR);
|
||||
|
||||
@@ -24,7 +24,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
} else {
|
||||
ModelVisibility::Hide
|
||||
},
|
||||
supported_in_api: true,
|
||||
supported_in_api: preset.supported_in_api,
|
||||
priority,
|
||||
upgrade: preset.upgrade.as_ref().map(|u| u.into()),
|
||||
base_instructions: "base instructions".to_string(),
|
||||
@@ -48,9 +48,9 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
|
||||
/// Write a models_cache.json file to the codex home directory.
|
||||
/// This prevents ModelsManager from making network requests to refresh models.
|
||||
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
|
||||
/// Uses the built-in model presets from ModelsManager, converted to ModelInfo format.
|
||||
/// Uses bundled-catalog-derived presets, converted to ModelInfo format.
|
||||
pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> {
|
||||
// Get all presets and filter for show_in_picker (same as builtin_model_presets does)
|
||||
// Get a stable bundled-catalog-derived preset list and filter for picker-visible entries.
|
||||
let presets: Vec<&ModelPreset> = all_model_presets()
|
||||
.iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// This is an instance of the fork of Bash that we bundle with
|
||||
// https://www.npmjs.com/package/@openai/codex-shell-tool-mcp.
|
||||
// Fetching the prebuilt version via DotSlash makes it easier to write
|
||||
// integration tests for the MCP server.
|
||||
// integration tests for shell execution flows.
|
||||
//
|
||||
// TODO(mbolin): Currently, we use a .tgz artifact that includes binaries for
|
||||
// multiple platforms, but we could save a bit of space by making arch-specific
|
||||
@@ -478,6 +478,7 @@ fn assert_permissions_message(item: &ResponseItem) {
|
||||
AskForApproval::Never,
|
||||
&Policy::empty(),
|
||||
&PathBuf::from("/tmp"),
|
||||
false,
|
||||
)
|
||||
.into_text();
|
||||
assert_eq!(
|
||||
|
||||
@@ -28,9 +28,9 @@ use tokio_tungstenite::WebSocketStream;
|
||||
use tokio_tungstenite::connect_async;
|
||||
use tokio_tungstenite::tungstenite::Message as WebSocketMessage;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
pub(super) const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
type WsClient = WebSocketStream<MaybeTlsStream<tokio::net::TcpStream>>;
|
||||
pub(super) type WsClient = WebSocketStream<MaybeTlsStream<tokio::net::TcpStream>>;
|
||||
|
||||
#[tokio::test]
|
||||
async fn websocket_transport_routes_per_connection_handshake_and_responses() -> Result<()> {
|
||||
@@ -78,7 +78,10 @@ async fn websocket_transport_routes_per_connection_handshake_and_responses() ->
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn spawn_websocket_server(codex_home: &Path, bind_addr: SocketAddr) -> Result<Child> {
|
||||
pub(super) async fn spawn_websocket_server(
|
||||
codex_home: &Path,
|
||||
bind_addr: SocketAddr,
|
||||
) -> Result<Child> {
|
||||
let program = codex_utils_cargo_bin::cargo_bin("codex-app-server")
|
||||
.context("should find app-server binary")?;
|
||||
let mut cmd = Command::new(program);
|
||||
@@ -106,14 +109,14 @@ async fn spawn_websocket_server(codex_home: &Path, bind_addr: SocketAddr) -> Res
|
||||
Ok(process)
|
||||
}
|
||||
|
||||
fn reserve_local_addr() -> Result<SocketAddr> {
|
||||
pub(super) fn reserve_local_addr() -> Result<SocketAddr> {
|
||||
let listener = std::net::TcpListener::bind("127.0.0.1:0")?;
|
||||
let addr = listener.local_addr()?;
|
||||
drop(listener);
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
async fn connect_websocket(bind_addr: SocketAddr) -> Result<WsClient> {
|
||||
pub(super) async fn connect_websocket(bind_addr: SocketAddr) -> Result<WsClient> {
|
||||
let url = format!("ws://{bind_addr}");
|
||||
let deadline = Instant::now() + Duration::from_secs(10);
|
||||
loop {
|
||||
@@ -129,7 +132,11 @@ async fn connect_websocket(bind_addr: SocketAddr) -> Result<WsClient> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_initialize_request(stream: &mut WsClient, id: i64, client_name: &str) -> Result<()> {
|
||||
pub(super) async fn send_initialize_request(
|
||||
stream: &mut WsClient,
|
||||
id: i64,
|
||||
client_name: &str,
|
||||
) -> Result<()> {
|
||||
let params = InitializeParams {
|
||||
client_info: ClientInfo {
|
||||
name: client_name.to_string(),
|
||||
@@ -157,7 +164,7 @@ async fn send_config_read_request(stream: &mut WsClient, id: i64) -> Result<()>
|
||||
.await
|
||||
}
|
||||
|
||||
async fn send_request(
|
||||
pub(super) async fn send_request(
|
||||
stream: &mut WsClient,
|
||||
method: &str,
|
||||
id: i64,
|
||||
@@ -179,7 +186,10 @@ async fn send_jsonrpc(stream: &mut WsClient, message: JSONRPCMessage) -> Result<
|
||||
.context("failed to send websocket frame")
|
||||
}
|
||||
|
||||
async fn read_response_for_id(stream: &mut WsClient, id: i64) -> Result<JSONRPCResponse> {
|
||||
pub(super) async fn read_response_for_id(
|
||||
stream: &mut WsClient,
|
||||
id: i64,
|
||||
) -> Result<JSONRPCResponse> {
|
||||
let target_id = RequestId::Integer(id);
|
||||
loop {
|
||||
let message = read_jsonrpc_message(stream).await?;
|
||||
@@ -235,7 +245,7 @@ async fn assert_no_message(stream: &mut WsClient, wait_for: Duration) -> Result<
|
||||
}
|
||||
}
|
||||
|
||||
fn create_config_toml(
|
||||
pub(super) fn create_config_toml(
|
||||
codex_home: &Path,
|
||||
server_uri: &str,
|
||||
approval_policy: &str,
|
||||
|
||||
@@ -0,0 +1,237 @@
|
||||
use super::connection_handling_websocket::DEFAULT_READ_TIMEOUT;
|
||||
use super::connection_handling_websocket::WsClient;
|
||||
use super::connection_handling_websocket::connect_websocket;
|
||||
use super::connection_handling_websocket::create_config_toml;
|
||||
use super::connection_handling_websocket::read_response_for_id;
|
||||
use super::connection_handling_websocket::reserve_local_addr;
|
||||
use super::connection_handling_websocket::send_initialize_request;
|
||||
use super::connection_handling_websocket::send_request;
|
||||
use super::connection_handling_websocket::spawn_websocket_server;
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use anyhow::bail;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use core_test_support::responses;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use std::process::Command as StdCommand;
|
||||
use tempfile::TempDir;
|
||||
use tokio::process::Child;
|
||||
use tokio::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
use tokio::time::sleep;
|
||||
use tokio::time::timeout;
|
||||
use tokio_tungstenite::tungstenite::Message as WebSocketMessage;
|
||||
use wiremock::Mock;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path_regex;
|
||||
|
||||
#[tokio::test]
|
||||
async fn websocket_transport_ctrl_c_waits_for_running_turn_before_exit() -> Result<()> {
|
||||
let GracefulCtrlCFixture {
|
||||
_codex_home,
|
||||
_server,
|
||||
mut process,
|
||||
mut ws,
|
||||
} = start_ctrl_c_restart_fixture(Duration::from_secs(3)).await?;
|
||||
|
||||
send_sigint(&process)?;
|
||||
assert_process_does_not_exit_within(&mut process, Duration::from_millis(300)).await?;
|
||||
|
||||
let status = wait_for_process_exit_within(
|
||||
&mut process,
|
||||
Duration::from_secs(10),
|
||||
"timed out waiting for graceful Ctrl-C restart shutdown",
|
||||
)
|
||||
.await?;
|
||||
assert!(status.success(), "expected graceful exit, got {status}");
|
||||
|
||||
expect_websocket_disconnect(&mut ws).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn websocket_transport_second_ctrl_c_forces_exit_while_turn_running() -> Result<()> {
|
||||
let GracefulCtrlCFixture {
|
||||
_codex_home,
|
||||
_server,
|
||||
mut process,
|
||||
mut ws,
|
||||
} = start_ctrl_c_restart_fixture(Duration::from_secs(3)).await?;
|
||||
|
||||
send_sigint(&process)?;
|
||||
assert_process_does_not_exit_within(&mut process, Duration::from_millis(300)).await?;
|
||||
|
||||
send_sigint(&process)?;
|
||||
let status = wait_for_process_exit_within(
|
||||
&mut process,
|
||||
Duration::from_secs(2),
|
||||
"timed out waiting for forced Ctrl-C restart shutdown",
|
||||
)
|
||||
.await?;
|
||||
assert!(status.success(), "expected graceful exit, got {status}");
|
||||
|
||||
expect_websocket_disconnect(&mut ws).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct GracefulCtrlCFixture {
|
||||
_codex_home: TempDir,
|
||||
_server: wiremock::MockServer,
|
||||
process: Child,
|
||||
ws: WsClient,
|
||||
}
|
||||
|
||||
async fn start_ctrl_c_restart_fixture(turn_delay: Duration) -> Result<GracefulCtrlCFixture> {
|
||||
let server = responses::start_mock_server().await;
|
||||
let delayed_turn_response = create_final_assistant_message_sse_response("Done")?;
|
||||
Mock::given(method("POST"))
|
||||
.and(path_regex(".*/responses$"))
|
||||
.respond_with(responses::sse_response(delayed_turn_response).set_delay(turn_delay))
|
||||
.up_to_n_times(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri(), "never")?;
|
||||
|
||||
let bind_addr = reserve_local_addr()?;
|
||||
let process = spawn_websocket_server(codex_home.path(), bind_addr).await?;
|
||||
let mut ws = connect_websocket(bind_addr).await?;
|
||||
|
||||
send_initialize_request(&mut ws, 1, "ws_graceful_shutdown").await?;
|
||||
let init_response = read_response_for_id(&mut ws, 1).await?;
|
||||
assert_eq!(init_response.id, RequestId::Integer(1));
|
||||
|
||||
send_thread_start_request(&mut ws, 2).await?;
|
||||
let thread_start_response = read_response_for_id(&mut ws, 2).await?;
|
||||
let ThreadStartResponse { thread, .. } = to_response(thread_start_response)?;
|
||||
|
||||
send_turn_start_request(&mut ws, 3, &thread.id).await?;
|
||||
let turn_start_response = read_response_for_id(&mut ws, 3).await?;
|
||||
assert_eq!(turn_start_response.id, RequestId::Integer(3));
|
||||
|
||||
wait_for_responses_post(&server, Duration::from_secs(5)).await?;
|
||||
|
||||
Ok(GracefulCtrlCFixture {
|
||||
_codex_home: codex_home,
|
||||
_server: server,
|
||||
process,
|
||||
ws,
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_thread_start_request(stream: &mut WsClient, id: i64) -> Result<()> {
|
||||
send_request(
|
||||
stream,
|
||||
"thread/start",
|
||||
id,
|
||||
Some(serde_json::to_value(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})?),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn send_turn_start_request(stream: &mut WsClient, id: i64, thread_id: &str) -> Result<()> {
|
||||
send_request(
|
||||
stream,
|
||||
"turn/start",
|
||||
id,
|
||||
Some(serde_json::to_value(TurnStartParams {
|
||||
thread_id: thread_id.to_string(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "Hello".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
..Default::default()
|
||||
})?),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn wait_for_responses_post(server: &wiremock::MockServer, wait_for: Duration) -> Result<()> {
|
||||
let deadline = Instant::now() + wait_for;
|
||||
loop {
|
||||
let requests = server
|
||||
.received_requests()
|
||||
.await
|
||||
.context("failed to read mock server requests")?;
|
||||
if requests
|
||||
.iter()
|
||||
.any(|request| request.method == "POST" && request.url.path().ends_with("/responses"))
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
if Instant::now() >= deadline {
|
||||
bail!("timed out waiting for /responses request");
|
||||
}
|
||||
sleep(Duration::from_millis(10)).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn send_sigint(process: &Child) -> Result<()> {
|
||||
let pid = process
|
||||
.id()
|
||||
.context("websocket app-server process has no pid")?;
|
||||
let status = StdCommand::new("kill")
|
||||
.arg("-INT")
|
||||
.arg(pid.to_string())
|
||||
.status()
|
||||
.context("failed to invoke kill -INT")?;
|
||||
if !status.success() {
|
||||
bail!("kill -INT exited with {status}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn assert_process_does_not_exit_within(process: &mut Child, window: Duration) -> Result<()> {
|
||||
match timeout(window, process.wait()).await {
|
||||
Err(_) => Ok(()),
|
||||
Ok(Ok(status)) => bail!("process exited too early during graceful drain: {status}"),
|
||||
Ok(Err(err)) => Err(err).context("failed waiting for process"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_for_process_exit_within(
|
||||
process: &mut Child,
|
||||
window: Duration,
|
||||
timeout_context: &'static str,
|
||||
) -> Result<std::process::ExitStatus> {
|
||||
timeout(window, process.wait())
|
||||
.await
|
||||
.context(timeout_context)?
|
||||
.context("failed waiting for websocket app-server process exit")
|
||||
}
|
||||
|
||||
async fn expect_websocket_disconnect(stream: &mut WsClient) -> Result<()> {
|
||||
loop {
|
||||
let frame = timeout(DEFAULT_READ_TIMEOUT, stream.next())
|
||||
.await
|
||||
.context("timed out waiting for websocket disconnect")?;
|
||||
match frame {
|
||||
None => return Ok(()),
|
||||
Some(Ok(WebSocketMessage::Close(_))) => return Ok(()),
|
||||
Some(Ok(WebSocketMessage::Ping(payload))) => {
|
||||
stream
|
||||
.send(WebSocketMessage::Pong(payload))
|
||||
.await
|
||||
.context("failed to reply to ping while waiting for disconnect")?;
|
||||
}
|
||||
Some(Ok(WebSocketMessage::Pong(_))) => {}
|
||||
Some(Ok(WebSocketMessage::Frame(_))) => {}
|
||||
Some(Ok(WebSocketMessage::Text(_))) => {}
|
||||
Some(Ok(WebSocketMessage::Binary(_))) => {}
|
||||
Some(Err(_)) => return Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,7 @@ use codex_app_server_protocol::ExperimentalFeatureListResponse;
|
||||
use codex_app_server_protocol::ExperimentalFeatureStage;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::features::FEATURES;
|
||||
use codex_core::features::Stage;
|
||||
use pretty_assertions::assert_eq;
|
||||
@@ -20,6 +21,11 @@ const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
#[tokio::test]
|
||||
async fn experimental_feature_list_returns_feature_metadata_with_stage() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let config = ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.fallback_cwd(Some(codex_home.path().to_path_buf()))
|
||||
.build()
|
||||
.await?;
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
|
||||
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
|
||||
@@ -63,7 +69,7 @@ async fn experimental_feature_list_returns_feature_metadata_with_stage() -> Resu
|
||||
display_name,
|
||||
description,
|
||||
announcement,
|
||||
enabled: spec.default_enabled,
|
||||
enabled: config.features.enabled(spec.id),
|
||||
default_enabled: spec.default_enabled,
|
||||
}
|
||||
})
|
||||
|
||||
@@ -5,6 +5,8 @@ mod collaboration_mode_list;
|
||||
mod compaction;
|
||||
mod config_rpc;
|
||||
mod connection_handling_websocket;
|
||||
#[cfg(unix)]
|
||||
mod connection_handling_websocket_unix;
|
||||
mod dynamic_tools;
|
||||
mod experimental_api;
|
||||
mod experimental_feature_list;
|
||||
@@ -16,6 +18,7 @@ mod rate_limits;
|
||||
mod request_user_input;
|
||||
mod review;
|
||||
mod safety_check_downgrade;
|
||||
mod skill_approval;
|
||||
mod skills_list;
|
||||
mod thread_archive;
|
||||
mod thread_fork;
|
||||
|
||||
@@ -12,8 +12,7 @@ use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
use codex_app_server_protocol::ReasoningEffortOption;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_protocol::openai_models::InputModality;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
@@ -21,6 +20,48 @@ use tokio::time::timeout;
|
||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
const INVALID_REQUEST_ERROR_CODE: i64 = -32600;
|
||||
|
||||
fn model_from_preset(preset: &ModelPreset) -> Model {
|
||||
Model {
|
||||
id: preset.id.clone(),
|
||||
model: preset.model.clone(),
|
||||
upgrade: preset.upgrade.as_ref().map(|upgrade| upgrade.id.clone()),
|
||||
display_name: preset.display_name.clone(),
|
||||
description: preset.description.clone(),
|
||||
hidden: !preset.show_in_picker,
|
||||
supported_reasoning_efforts: preset
|
||||
.supported_reasoning_efforts
|
||||
.iter()
|
||||
.map(|preset| ReasoningEffortOption {
|
||||
reasoning_effort: preset.effort,
|
||||
description: preset.description.clone(),
|
||||
})
|
||||
.collect(),
|
||||
default_reasoning_effort: preset.default_reasoning_effort,
|
||||
input_modalities: preset.input_modalities.clone(),
|
||||
// `write_models_cache()` round-trips through a simplified ModelInfo fixture that does not
|
||||
// preserve personality placeholders in base instructions, so app-server list results from
|
||||
// cache report `supports_personality = false`.
|
||||
// todo(sayan): fix, maybe make roundtrip use ModelInfo only
|
||||
supports_personality: false,
|
||||
is_default: preset.is_default,
|
||||
}
|
||||
}
|
||||
|
||||
fn expected_visible_models() -> Vec<Model> {
|
||||
// Filter by supported_in_api to support testing with both ChatGPT and non-ChatGPT auth modes.
|
||||
let mut presets =
|
||||
ModelPreset::filter_by_auth(codex_core::test_support::all_model_presets().clone(), false);
|
||||
|
||||
// Mirror `ModelsManager::build_available_models()` default selection after auth filtering.
|
||||
ModelPreset::mark_default_by_picker_visibility(&mut presets);
|
||||
|
||||
presets
|
||||
.iter()
|
||||
.filter(|preset| preset.show_in_picker)
|
||||
.map(model_from_preset)
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -48,130 +89,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
|
||||
next_cursor,
|
||||
} = to_response::<ModelListResponse>(response)?;
|
||||
|
||||
let expected_models = vec![
|
||||
Model {
|
||||
id: "gpt-5.2-codex".to_string(),
|
||||
model: "gpt-5.2-codex".to_string(),
|
||||
upgrade: None,
|
||||
display_name: "gpt-5.2-codex".to_string(),
|
||||
description: "Latest frontier agentic coding model.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Fast responses with lighter reasoning".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Balances speed and reasoning depth for everyday tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Greater reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
description: "Extra high reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
input_modalities: vec![InputModality::Text, InputModality::Image],
|
||||
supports_personality: false,
|
||||
is_default: true,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1-codex-max".to_string(),
|
||||
model: "gpt-5.1-codex-max".to_string(),
|
||||
upgrade: Some("gpt-5.2-codex".to_string()),
|
||||
display_name: "gpt-5.1-codex-max".to_string(),
|
||||
description: "Codex-optimized flagship for deep and fast reasoning.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Fast responses with lighter reasoning".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Balances speed and reasoning depth for everyday tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Greater reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
description: "Extra high reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
input_modalities: vec![InputModality::Text, InputModality::Image],
|
||||
supports_personality: false,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.1-codex-mini".to_string(),
|
||||
model: "gpt-5.1-codex-mini".to_string(),
|
||||
upgrade: Some("gpt-5.2-codex".to_string()),
|
||||
display_name: "gpt-5.1-codex-mini".to_string(),
|
||||
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Dynamically adjusts reasoning based on the task".to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
input_modalities: vec![InputModality::Text, InputModality::Image],
|
||||
supports_personality: false,
|
||||
is_default: false,
|
||||
},
|
||||
Model {
|
||||
id: "gpt-5.2".to_string(),
|
||||
model: "gpt-5.2".to_string(),
|
||||
upgrade: Some("gpt-5.2-codex".to_string()),
|
||||
display_name: "gpt-5.2".to_string(),
|
||||
description:
|
||||
"Latest frontier model with improvements across knowledge, reasoning and coding"
|
||||
.to_string(),
|
||||
hidden: false,
|
||||
supported_reasoning_efforts: vec![
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Low,
|
||||
description: "Balances speed with some reasoning; useful for straightforward \
|
||||
queries and short explanations"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::Medium,
|
||||
description: "Provides a solid balance of reasoning depth and latency for \
|
||||
general-purpose tasks"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::High,
|
||||
description: "Maximizes reasoning depth for complex or ambiguous problems"
|
||||
.to_string(),
|
||||
},
|
||||
ReasoningEffortOption {
|
||||
reasoning_effort: ReasoningEffort::XHigh,
|
||||
description: "Extra high reasoning depth for complex problems".to_string(),
|
||||
},
|
||||
],
|
||||
default_reasoning_effort: ReasoningEffort::Medium,
|
||||
input_modalities: vec![InputModality::Text, InputModality::Image],
|
||||
supports_personality: false,
|
||||
is_default: false,
|
||||
},
|
||||
];
|
||||
let expected_models = expected_visible_models();
|
||||
|
||||
assert_eq!(items, expected_models);
|
||||
assert!(next_cursor.is_none());
|
||||
@@ -237,8 +155,10 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
next_cursor: first_cursor,
|
||||
} = to_response::<ModelListResponse>(first_response)?;
|
||||
|
||||
let expected_models = expected_visible_models();
|
||||
|
||||
assert_eq!(first_items.len(), 1);
|
||||
assert_eq!(first_items[0].id, "gpt-5.2-codex");
|
||||
assert_eq!(first_items[0].id, expected_models[0].id);
|
||||
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
|
||||
|
||||
let second_request = mcp
|
||||
@@ -261,7 +181,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(second_response)?;
|
||||
|
||||
assert_eq!(second_items.len(), 1);
|
||||
assert_eq!(second_items[0].id, "gpt-5.1-codex-max");
|
||||
assert_eq!(second_items[0].id, expected_models[1].id);
|
||||
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
|
||||
|
||||
let third_request = mcp
|
||||
@@ -284,7 +204,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(third_response)?;
|
||||
|
||||
assert_eq!(third_items.len(), 1);
|
||||
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
|
||||
assert_eq!(third_items[0].id, expected_models[2].id);
|
||||
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
|
||||
|
||||
let fourth_request = mcp
|
||||
@@ -307,7 +227,7 @@ async fn list_models_pagination_works() -> Result<()> {
|
||||
} = to_response::<ModelListResponse>(fourth_response)?;
|
||||
|
||||
assert_eq!(fourth_items.len(), 1);
|
||||
assert_eq!(fourth_items[0].id, "gpt-5.2");
|
||||
assert_eq!(fourth_items[0].id, expected_models[3].id);
|
||||
assert!(fourth_cursor.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
138
codex-rs/app-server/tests/suite/v2/skill_approval.rs
Normal file
138
codex-rs/app-server/tests/suite/v2/skill_approval.rs
Normal file
@@ -0,0 +1,138 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_responses_server_sequence;
|
||||
use app_test_support::to_response;
|
||||
use app_test_support::write_mock_responses_config_toml;
|
||||
use codex_app_server_protocol::JSONRPCResponse;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use codex_app_server_protocol::ThreadStartParams;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::UserInput as V2UserInput;
|
||||
use codex_core::features::Feature;
|
||||
use core_test_support::responses;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
|
||||
fn write_skill_with_script(
|
||||
home: &Path,
|
||||
name: &str,
|
||||
script_body: &str,
|
||||
) -> Result<std::path::PathBuf> {
|
||||
let skill_dir = home.join("skills").join(name);
|
||||
let scripts_dir = skill_dir.join("scripts");
|
||||
fs::create_dir_all(&scripts_dir)?;
|
||||
fs::write(
|
||||
skill_dir.join("SKILL.md"),
|
||||
format!("---\nname: {name}\ndescription: {name} skill\n---\n"),
|
||||
)?;
|
||||
let script_path = scripts_dir.join("run.py");
|
||||
fs::write(&script_path, script_body)?;
|
||||
Ok(script_path)
|
||||
}
|
||||
|
||||
fn shell_command_response(tool_call_id: &str, command: &str) -> Result<String> {
|
||||
let arguments = serde_json::to_string(&json!({
|
||||
"command": command,
|
||||
"timeout_ms": 500,
|
||||
}))?;
|
||||
Ok(responses::sse(vec![
|
||||
responses::ev_response_created("resp-1"),
|
||||
responses::ev_function_call(tool_call_id, "shell_command", &arguments),
|
||||
responses::ev_completed("resp-1"),
|
||||
]))
|
||||
}
|
||||
|
||||
fn command_for_script(script_path: &Path) -> Result<String> {
|
||||
let runner = if cfg!(windows) { "python" } else { "python3" };
|
||||
let script_path = script_path.to_string_lossy().into_owned();
|
||||
Ok(shlex::try_join([runner, script_path.as_str()])?)
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn skill_request_approval_round_trip_on_shell_command_skill_script_exec() -> Result<()> {
|
||||
let codex_home = tempfile::TempDir::new()?;
|
||||
let script_path = write_skill_with_script(codex_home.path(), "demo", "print('hello')")?;
|
||||
let tool_call_id = "skill-call";
|
||||
let command = command_for_script(&script_path)?;
|
||||
let server = create_mock_responses_server_sequence(vec![
|
||||
shell_command_response(tool_call_id, &command)?,
|
||||
create_final_assistant_message_sse_response("done")?,
|
||||
])
|
||||
.await;
|
||||
write_mock_responses_config_toml(
|
||||
codex_home.path(),
|
||||
&server.uri(),
|
||||
&BTreeMap::from([(Feature::SkillApproval, true)]),
|
||||
8192,
|
||||
Some(false),
|
||||
"mock_provider",
|
||||
"compact",
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let thread_start_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let thread_start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(thread_start_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response(thread_start_resp)?;
|
||||
|
||||
let turn_start_id = mcp
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "ask something".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
model: Some("mock-model".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let turn_start_resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(turn_start_id)),
|
||||
)
|
||||
.await??;
|
||||
let TurnStartResponse { .. } = to_response::<TurnStartResponse>(turn_start_resp)?;
|
||||
|
||||
let server_req = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_request_message(),
|
||||
)
|
||||
.await??;
|
||||
let ServerRequest::SkillRequestApproval { request_id, params } = server_req else {
|
||||
panic!("expected SkillRequestApproval request, got: {server_req:?}");
|
||||
};
|
||||
|
||||
assert_eq!(params.item_id, tool_call_id);
|
||||
assert_eq!(params.skill_name, "demo");
|
||||
|
||||
mcp.send_response(request_id, serde_json::json!({ "decision": "approve" }))
|
||||
.await?;
|
||||
|
||||
timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -78,6 +78,7 @@ async fn list_threads_with_sort(
|
||||
source_kinds,
|
||||
archived,
|
||||
cwd: None,
|
||||
search_term: None,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
@@ -491,6 +492,7 @@ async fn thread_list_respects_cwd_filter() -> Result<()> {
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: Some(target_cwd.to_string_lossy().into_owned()),
|
||||
search_term: None,
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
@@ -511,6 +513,86 @@ async fn thread_list_respects_cwd_filter() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_respects_search_term_filter() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
std::fs::write(
|
||||
codex_home.path().join("config.toml"),
|
||||
r#"
|
||||
model = "mock-model"
|
||||
approval_policy = "never"
|
||||
suppress_unstable_features_warning = true
|
||||
|
||||
[features]
|
||||
sqlite = true
|
||||
"#,
|
||||
)?;
|
||||
|
||||
let older_match = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
"2025-01-02T10-00-00",
|
||||
"2025-01-02T10:00:00Z",
|
||||
"match: needle",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let _non_match = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
"2025-01-02T11-00-00",
|
||||
"2025-01-02T11:00:00Z",
|
||||
"no hit here",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let newer_match = create_fake_rollout(
|
||||
codex_home.path(),
|
||||
"2025-01-02T12-00-00",
|
||||
"2025-01-02T12:00:00Z",
|
||||
"needle suffix",
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
// `thread/list` only applies `search_term` on the sqlite path. In this test we
|
||||
// create rollouts manually, so we must also create the sqlite DB and mark backfill
|
||||
// complete; otherwise app-server will permanently use filesystem fallback.
|
||||
let state_db = codex_state::StateRuntime::init(
|
||||
codex_home.path().to_path_buf(),
|
||||
"mock_provider".into(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
state_db.mark_backfill_complete(None).await?;
|
||||
|
||||
let mut mcp = init_mcp(codex_home.path()).await?;
|
||||
let request_id = mcp
|
||||
.send_thread_list_request(codex_app_server_protocol::ThreadListParams {
|
||||
cursor: None,
|
||||
limit: Some(10),
|
||||
sort_key: None,
|
||||
model_providers: Some(vec!["mock_provider".to_string()]),
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: None,
|
||||
search_term: Some("needle".to_string()),
|
||||
})
|
||||
.await?;
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadListResponse {
|
||||
data, next_cursor, ..
|
||||
} = to_response::<ThreadListResponse>(resp)?;
|
||||
|
||||
assert_eq!(next_cursor, None);
|
||||
let ids: Vec<_> = data.iter().map(|thread| thread.id.as_str()).collect();
|
||||
assert_eq!(ids, vec![newer_match, older_match]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_list_empty_source_kinds_defaults_to_interactive_only() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
@@ -1335,6 +1417,7 @@ async fn thread_list_invalid_cursor_returns_error() -> Result<()> {
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: None,
|
||||
search_term: None,
|
||||
})
|
||||
.await?;
|
||||
let error: JSONRPCError = timeout(
|
||||
|
||||
@@ -289,6 +289,7 @@ async fn thread_name_set_is_reflected_in_read_list_and_resume() -> Result<()> {
|
||||
source_kinds: None,
|
||||
archived: None,
|
||||
cwd: None,
|
||||
search_term: None,
|
||||
})
|
||||
.await?;
|
||||
let list_resp: JSONRPCResponse = timeout(
|
||||
|
||||
@@ -146,6 +146,34 @@ model_reasoning_effort = "high"
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_start_accepts_metrics_service_name() -> Result<()> {
|
||||
let server = create_mock_responses_server_repeating_assistant("Done").await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
create_config_toml(codex_home.path(), &server.uri())?;
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let req_id = mcp
|
||||
.send_thread_start_request(ThreadStartParams {
|
||||
service_name: Some("my_app_server_client".to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
|
||||
)
|
||||
.await??;
|
||||
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(resp)?;
|
||||
assert!(!thread.id.is_empty(), "thread id should not be empty");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_start_ephemeral_remains_pathless() -> Result<()> {
|
||||
let server = create_mock_responses_server_repeating_assistant("Done").await;
|
||||
|
||||
@@ -2,18 +2,15 @@
|
||||
//
|
||||
// Running these tests with the patched zsh fork:
|
||||
//
|
||||
// The suite uses `CODEX_TEST_ZSH_PATH` when set. Example:
|
||||
// CODEX_TEST_ZSH_PATH="$HOME/.local/codex-zsh-77045ef/bin/zsh" \
|
||||
// cargo test -p codex-app-server turn_start_zsh_fork -- --nocapture
|
||||
//
|
||||
// For a single test:
|
||||
// CODEX_TEST_ZSH_PATH="$HOME/.local/codex-zsh-77045ef/bin/zsh" \
|
||||
// cargo test -p codex-app-server turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2 -- --nocapture
|
||||
// The suite resolves the shared test-only zsh DotSlash file at
|
||||
// `app-server/tests/suite/zsh` via DotSlash on first use, so `dotslash` and
|
||||
// network access are required the first time the artifact is fetched.
|
||||
|
||||
use anyhow::Result;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_final_assistant_message_sse_response;
|
||||
use app_test_support::create_mock_responses_server_sequence;
|
||||
use app_test_support::create_mock_responses_server_sequence_unchecked;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::to_response;
|
||||
use codex_app_server_protocol::CommandExecutionApprovalDecision;
|
||||
@@ -57,7 +54,7 @@ async fn turn_start_shell_zsh_fork_executes_command_v2() -> Result<()> {
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let Some(zsh_path) = find_test_zsh_path() else {
|
||||
let Some(zsh_path) = find_test_zsh_path()? else {
|
||||
eprintln!("skipping zsh fork test: no zsh executable found");
|
||||
return Ok(());
|
||||
};
|
||||
@@ -82,7 +79,7 @@ async fn turn_start_shell_zsh_fork_executes_command_v2() -> Result<()> {
|
||||
&zsh_path,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||
let mut mcp = create_zsh_test_mcp_process(&codex_home, &workspace).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
@@ -167,7 +164,7 @@ async fn turn_start_shell_zsh_fork_exec_approval_decline_v2() -> Result<()> {
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let Some(zsh_path) = find_test_zsh_path() else {
|
||||
let Some(zsh_path) = find_test_zsh_path()? else {
|
||||
eprintln!("skipping zsh fork decline test: no zsh executable found");
|
||||
return Ok(());
|
||||
};
|
||||
@@ -199,7 +196,7 @@ async fn turn_start_shell_zsh_fork_exec_approval_decline_v2() -> Result<()> {
|
||||
&zsh_path,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||
let mut mcp = create_zsh_test_mcp_process(&codex_home, &workspace).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
@@ -303,7 +300,7 @@ async fn turn_start_shell_zsh_fork_exec_approval_cancel_v2() -> Result<()> {
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let Some(zsh_path) = find_test_zsh_path() else {
|
||||
let Some(zsh_path) = find_test_zsh_path()? else {
|
||||
eprintln!("skipping zsh fork cancel test: no zsh executable found");
|
||||
return Ok(());
|
||||
};
|
||||
@@ -332,7 +329,7 @@ async fn turn_start_shell_zsh_fork_exec_approval_cancel_v2() -> Result<()> {
|
||||
&zsh_path,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||
let mut mcp = create_zsh_test_mcp_process(&codex_home, &workspace).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
@@ -434,7 +431,7 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
let workspace = tmp.path().join("workspace");
|
||||
std::fs::create_dir(&workspace)?;
|
||||
|
||||
let Some(zsh_path) = find_test_zsh_path() else {
|
||||
let Some(zsh_path) = find_test_zsh_path()? else {
|
||||
eprintln!("skipping zsh fork subcommand decline test: no zsh executable found");
|
||||
return Ok(());
|
||||
};
|
||||
@@ -446,9 +443,17 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
return Ok(());
|
||||
}
|
||||
eprintln!("using zsh path for zsh-fork test: {}", zsh_path.display());
|
||||
|
||||
let first_file = workspace.join("first.txt");
|
||||
let second_file = workspace.join("second.txt");
|
||||
std::fs::write(&first_file, "one")?;
|
||||
std::fs::write(&second_file, "two")?;
|
||||
let shell_command = format!(
|
||||
"/bin/rm {} && /bin/rm {}",
|
||||
first_file.display(),
|
||||
second_file.display()
|
||||
);
|
||||
let tool_call_arguments = serde_json::to_string(&serde_json::json!({
|
||||
"command": "/usr/bin/true && /usr/bin/true",
|
||||
"command": shell_command,
|
||||
"workdir": serde_json::Value::Null,
|
||||
"timeout_ms": 5000
|
||||
}))?;
|
||||
@@ -461,11 +466,20 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
),
|
||||
responses::ev_completed("resp-1"),
|
||||
]);
|
||||
let server = create_mock_responses_server_sequence(vec![response]).await;
|
||||
let no_op_response = responses::sse(vec![
|
||||
responses::ev_response_created("resp-2"),
|
||||
responses::ev_completed("resp-2"),
|
||||
]);
|
||||
// Linux CI has occasionally issued a second `/responses` POST after the
|
||||
// subcommand-decline flow. This test is about approval/decline behavior in
|
||||
// the zsh fork, not exact model request count, so allow an extra request
|
||||
// and return a harmless no-op response if it arrives.
|
||||
let server =
|
||||
create_mock_responses_server_sequence_unchecked(vec![response, no_op_response]).await;
|
||||
create_config_toml(
|
||||
&codex_home,
|
||||
&server.uri(),
|
||||
"on-request",
|
||||
"untrusted",
|
||||
&BTreeMap::from([
|
||||
(Feature::ShellZshFork, true),
|
||||
(Feature::UnifiedExec, false),
|
||||
@@ -474,7 +488,7 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
&zsh_path,
|
||||
)?;
|
||||
|
||||
let mut mcp = McpProcess::new(&codex_home).await?;
|
||||
let mut mcp = create_zsh_test_mcp_process(&codex_home, &workspace).await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let start_id = mcp
|
||||
@@ -495,13 +509,17 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
.send_turn_start_request(TurnStartParams {
|
||||
thread_id: thread.id.clone(),
|
||||
input: vec![V2UserInput::Text {
|
||||
text: "run true true".to_string(),
|
||||
text: "remove both files".to_string(),
|
||||
text_elements: Vec::new(),
|
||||
}],
|
||||
cwd: Some(workspace.clone()),
|
||||
approval_policy: Some(codex_app_server_protocol::AskForApproval::OnRequest),
|
||||
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::ReadOnly {
|
||||
access: codex_app_server_protocol::ReadOnlyAccess::FullAccess,
|
||||
approval_policy: Some(codex_app_server_protocol::AskForApproval::UnlessTrusted),
|
||||
sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![workspace.clone().try_into()?],
|
||||
read_only_access: codex_app_server_protocol::ReadOnlyAccess::FullAccess,
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
}),
|
||||
model: Some("mock-model".to_string()),
|
||||
effort: Some(codex_protocol::openai_models::ReasoningEffort::Medium),
|
||||
@@ -516,11 +534,15 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
.await??;
|
||||
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
|
||||
|
||||
let mut approval_ids = Vec::new();
|
||||
for decision in [
|
||||
let mut approved_subcommand_strings = Vec::new();
|
||||
let mut approved_subcommand_ids = Vec::new();
|
||||
let mut saw_parent_approval = false;
|
||||
let target_decisions = [
|
||||
CommandExecutionApprovalDecision::Accept,
|
||||
CommandExecutionApprovalDecision::Cancel,
|
||||
] {
|
||||
];
|
||||
let mut target_decision_index = 0;
|
||||
while target_decision_index < target_decisions.len() {
|
||||
let server_req = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_request_message(),
|
||||
@@ -531,13 +553,47 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
panic!("expected CommandExecutionRequestApproval request");
|
||||
};
|
||||
assert_eq!(params.item_id, "call-zsh-fork-subcommand-decline");
|
||||
approval_ids.push(
|
||||
params
|
||||
.approval_id
|
||||
.clone()
|
||||
.expect("approval_id must be present for zsh subcommand approvals"),
|
||||
);
|
||||
assert_eq!(params.thread_id, thread.id);
|
||||
let approval_command = params
|
||||
.command
|
||||
.as_deref()
|
||||
.expect("approval command should be present");
|
||||
let is_target_subcommand = (approval_command.starts_with("/bin/rm ")
|
||||
|| approval_command.starts_with("/usr/bin/rm "))
|
||||
&& (approval_command.contains(&first_file.display().to_string())
|
||||
|| approval_command.contains(&second_file.display().to_string()));
|
||||
if is_target_subcommand {
|
||||
assert!(
|
||||
approval_command.contains(&first_file.display().to_string())
|
||||
|| approval_command.contains(&second_file.display().to_string()),
|
||||
"expected zsh subcommand approval for one of the rm commands, got: {approval_command}"
|
||||
);
|
||||
approved_subcommand_ids.push(
|
||||
params
|
||||
.approval_id
|
||||
.clone()
|
||||
.expect("approval_id must be present for zsh subcommand approvals"),
|
||||
);
|
||||
approved_subcommand_strings.push(approval_command.to_string());
|
||||
}
|
||||
let is_parent_approval = approval_command.contains(&zsh_path.display().to_string())
|
||||
&& approval_command.contains(&shell_command);
|
||||
let decision = if is_target_subcommand {
|
||||
let decision = target_decisions[target_decision_index].clone();
|
||||
target_decision_index += 1;
|
||||
decision
|
||||
} else if is_parent_approval {
|
||||
assert!(
|
||||
!saw_parent_approval,
|
||||
"unexpected extra non-target approval: {approval_command}"
|
||||
);
|
||||
saw_parent_approval = true;
|
||||
CommandExecutionApprovalDecision::Accept
|
||||
} else {
|
||||
// Login shells may run startup helpers (for example path_helper on macOS)
|
||||
// before the parent shell command or target subcommands are reached.
|
||||
CommandExecutionApprovalDecision::Accept
|
||||
};
|
||||
mcp.send_response(
|
||||
request_id,
|
||||
serde_json::to_value(CommandExecutionRequestApprovalResponse { decision })?,
|
||||
@@ -545,6 +601,15 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
.await?;
|
||||
}
|
||||
|
||||
assert!(
|
||||
saw_parent_approval,
|
||||
"expected parent shell approval request"
|
||||
);
|
||||
assert_eq!(approved_subcommand_ids.len(), 2);
|
||||
assert_ne!(approved_subcommand_ids[0], approved_subcommand_ids[1]);
|
||||
assert_eq!(approved_subcommand_strings.len(), 2);
|
||||
assert!(approved_subcommand_strings[0].contains(&first_file.display().to_string()));
|
||||
assert!(approved_subcommand_strings[1].contains(&second_file.display().to_string()));
|
||||
let parent_completed_command_execution = timeout(DEFAULT_READ_TIMEOUT, async {
|
||||
loop {
|
||||
let completed_notif = mcp
|
||||
@@ -563,32 +628,61 @@ async fn turn_start_shell_zsh_fork_subcommand_decline_marks_parent_declined_v2()
|
||||
}
|
||||
}
|
||||
})
|
||||
.await??;
|
||||
.await;
|
||||
|
||||
let ThreadItem::CommandExecution {
|
||||
id,
|
||||
status,
|
||||
aggregated_output,
|
||||
..
|
||||
} = parent_completed_command_execution
|
||||
else {
|
||||
unreachable!("loop ensures we break on parent command execution item");
|
||||
};
|
||||
assert_eq!(id, "call-zsh-fork-subcommand-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::Declined);
|
||||
assert!(
|
||||
aggregated_output.is_none()
|
||||
|| aggregated_output == Some("exec command rejected by user".to_string())
|
||||
);
|
||||
assert_eq!(approval_ids.len(), 2);
|
||||
assert_ne!(approval_ids[0], approval_ids[1]);
|
||||
match parent_completed_command_execution {
|
||||
Ok(Ok(parent_completed_command_execution)) => {
|
||||
let ThreadItem::CommandExecution {
|
||||
id,
|
||||
status,
|
||||
aggregated_output,
|
||||
..
|
||||
} = parent_completed_command_execution
|
||||
else {
|
||||
unreachable!("loop ensures we break on parent command execution item");
|
||||
};
|
||||
assert_eq!(id, "call-zsh-fork-subcommand-decline");
|
||||
assert_eq!(status, CommandExecutionStatus::Declined);
|
||||
assert!(
|
||||
aggregated_output.is_none()
|
||||
|| aggregated_output == Some("exec command rejected by user".to_string())
|
||||
);
|
||||
|
||||
mcp.interrupt_turn_and_wait_for_aborted(thread.id, turn.id, DEFAULT_READ_TIMEOUT)
|
||||
.await?;
|
||||
mcp.interrupt_turn_and_wait_for_aborted(
|
||||
thread.id.clone(),
|
||||
turn.id.clone(),
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(Err(error)) => return Err(error),
|
||||
Err(_) => {
|
||||
// Some zsh builds abort the turn immediately after the rejected
|
||||
// subcommand without emitting a parent `item/completed`.
|
||||
let completed_notif = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_notification_message("turn/completed"),
|
||||
)
|
||||
.await??;
|
||||
let completed: TurnCompletedNotification = serde_json::from_value(
|
||||
completed_notif
|
||||
.params
|
||||
.expect("turn/completed params must be present"),
|
||||
)?;
|
||||
assert_eq!(completed.thread_id, thread.id);
|
||||
assert_eq!(completed.turn.id, turn.id);
|
||||
assert_eq!(completed.turn.status, TurnStatus::Interrupted);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_zsh_test_mcp_process(codex_home: &Path, zdotdir: &Path) -> Result<McpProcess> {
|
||||
let zdotdir = zdotdir.to_string_lossy().into_owned();
|
||||
McpProcess::new_with_env(codex_home, &[("ZDOTDIR", Some(zdotdir.as_str()))]).await
|
||||
}
|
||||
|
||||
fn create_config_toml(
|
||||
codex_home: &Path,
|
||||
server_uri: &str,
|
||||
@@ -640,36 +734,24 @@ stream_max_retries = 0
|
||||
)
|
||||
}
|
||||
|
||||
fn find_test_zsh_path() -> Option<std::path::PathBuf> {
|
||||
if let Some(path) = std::env::var_os("CODEX_TEST_ZSH_PATH") {
|
||||
let path = std::path::PathBuf::from(path);
|
||||
if path.is_file() {
|
||||
return Some(path);
|
||||
}
|
||||
panic!(
|
||||
"CODEX_TEST_ZSH_PATH is set but is not a file: {}",
|
||||
path.display()
|
||||
fn find_test_zsh_path() -> Result<Option<std::path::PathBuf>> {
|
||||
let repo_root = codex_utils_cargo_bin::repo_root()?;
|
||||
let dotslash_zsh = repo_root.join("codex-rs/app-server/tests/suite/zsh");
|
||||
if !dotslash_zsh.is_file() {
|
||||
eprintln!(
|
||||
"skipping zsh fork test: shared zsh DotSlash file not found at {}",
|
||||
dotslash_zsh.display()
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
for candidate in ["/bin/zsh", "/usr/bin/zsh"] {
|
||||
let path = Path::new(candidate);
|
||||
if path.is_file() {
|
||||
return Some(path.to_path_buf());
|
||||
match core_test_support::fetch_dotslash_file(&dotslash_zsh, None) {
|
||||
Ok(path) => return Ok(Some(path)),
|
||||
Err(error) => {
|
||||
eprintln!("failed to fetch vendored zsh via dotslash: {error:#}");
|
||||
}
|
||||
}
|
||||
|
||||
let shell = std::env::var_os("SHELL")?;
|
||||
let shell_path = std::path::PathBuf::from(shell);
|
||||
if shell_path
|
||||
.file_name()
|
||||
.is_some_and(|file_name| file_name == "zsh")
|
||||
&& shell_path.is_file()
|
||||
{
|
||||
return Some(shell_path);
|
||||
}
|
||||
|
||||
None
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn supports_exec_wrapper_intercept(zsh_path: &Path) -> bool {
|
||||
|
||||
72
codex-rs/app-server/tests/suite/zsh
Executable file
72
codex-rs/app-server/tests/suite/zsh
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env dotslash
|
||||
|
||||
// This is the patched zsh fork built by
|
||||
// `.github/workflows/shell-tool-mcp.yml` for the shell-tool-mcp package.
|
||||
// Fetching the prebuilt version via DotSlash makes it easier to write
|
||||
// integration tests that exercise the zsh fork behavior in app-server tests.
|
||||
//
|
||||
// TODO(mbolin): Currently, we use a .tgz artifact that includes binaries for
|
||||
// multiple platforms, but we could save a bit of space by making arch-specific
|
||||
// artifacts available in the GitHub releases and referencing those here.
|
||||
{
|
||||
"name": "codex-zsh",
|
||||
"platforms": {
|
||||
// macOS 13 builds (and therefore x86_64) were dropped in
|
||||
// https://github.com/openai/codex/pull/7295, so we only provide an
|
||||
// Apple Silicon build for now.
|
||||
"macos-aarch64": {
|
||||
"size": 53771483,
|
||||
"hash": "blake3",
|
||||
"digest": "ff664f63f5e1fa62762c9aff0aafa66cf196faf9b157f98ec98f59c152fc7bd3",
|
||||
"format": "tar.gz",
|
||||
"path": "package/vendor/aarch64-apple-darwin/zsh/macos-15/zsh",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/openai/codex/releases/download/rust-v0.104.0/codex-shell-tool-mcp-npm-0.104.0.tgz"
|
||||
},
|
||||
{
|
||||
"type": "github-release",
|
||||
"repo": "openai/codex",
|
||||
"tag": "rust-v0.104.0",
|
||||
"name": "codex-shell-tool-mcp-npm-0.104.0.tgz"
|
||||
}
|
||||
]
|
||||
},
|
||||
"linux-x86_64": {
|
||||
"size": 53771483,
|
||||
"hash": "blake3",
|
||||
"digest": "ff664f63f5e1fa62762c9aff0aafa66cf196faf9b157f98ec98f59c152fc7bd3",
|
||||
"format": "tar.gz",
|
||||
"path": "package/vendor/x86_64-unknown-linux-musl/zsh/ubuntu-24.04/zsh",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/openai/codex/releases/download/rust-v0.104.0/codex-shell-tool-mcp-npm-0.104.0.tgz"
|
||||
},
|
||||
{
|
||||
"type": "github-release",
|
||||
"repo": "openai/codex",
|
||||
"tag": "rust-v0.104.0",
|
||||
"name": "codex-shell-tool-mcp-npm-0.104.0.tgz"
|
||||
}
|
||||
]
|
||||
},
|
||||
"linux-aarch64": {
|
||||
"size": 53771483,
|
||||
"hash": "blake3",
|
||||
"digest": "ff664f63f5e1fa62762c9aff0aafa66cf196faf9b157f98ec98f59c152fc7bd3",
|
||||
"format": "tar.gz",
|
||||
"path": "package/vendor/aarch64-unknown-linux-musl/zsh/ubuntu-24.04/zsh",
|
||||
"providers": [
|
||||
{
|
||||
"url": "https://github.com/openai/codex/releases/download/rust-v0.104.0/codex-shell-tool-mcp-npm-0.104.0.tgz"
|
||||
},
|
||||
{
|
||||
"type": "github-release",
|
||||
"repo": "openai/codex",
|
||||
"tag": "rust-v0.104.0",
|
||||
"name": "codex-shell-tool-mcp-npm-0.104.0.tgz"
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,15 @@ use crate::invocation::ExtractHeredocError;
|
||||
/// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool.
|
||||
pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md");
|
||||
|
||||
/// Special argv[1] flag used when the Codex executable self-invokes to run the
|
||||
/// internal `apply_patch` path.
|
||||
///
|
||||
/// Although this constant lives in `codex-apply-patch` (to avoid forcing
|
||||
/// `codex-arg0` to depend on `codex-core`), it is part of the "codex core"
|
||||
/// process-invocation contract between the apply-patch runtime and the arg0
|
||||
/// dispatcher.
|
||||
pub const CODEX_CORE_APPLY_PATCH_ARG1: &str = "--codex-run-as-apply-patch";
|
||||
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum ApplyPatchError {
|
||||
#[error(transparent)]
|
||||
|
||||
@@ -14,8 +14,9 @@ workspace = true
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
codex-apply-patch = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
codex-linux-sandbox = { workspace = true }
|
||||
codex-shell-escalation = { workspace = true }
|
||||
codex-utils-home-dir = { workspace = true }
|
||||
dotenvy = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||
|
||||
@@ -3,7 +3,8 @@ use std::future::Future;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use codex_core::CODEX_APPLY_PATCH_ARG1;
|
||||
use codex_apply_patch::CODEX_CORE_APPLY_PATCH_ARG1;
|
||||
use codex_utils_home_dir::find_codex_home;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::symlink;
|
||||
use tempfile::TempDir;
|
||||
@@ -11,22 +12,36 @@ use tempfile::TempDir;
|
||||
const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox";
|
||||
const APPLY_PATCH_ARG0: &str = "apply_patch";
|
||||
const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch";
|
||||
#[cfg(unix)]
|
||||
const EXECVE_WRAPPER_ARG0: &str = "codex-execve-wrapper";
|
||||
const LOCK_FILENAME: &str = ".lock";
|
||||
const TOKIO_WORKER_STACK_SIZE_BYTES: usize = 16 * 1024 * 1024;
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
pub struct Arg0DispatchPaths {
|
||||
pub codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
pub main_execve_wrapper_exe: Option<PathBuf>,
|
||||
}
|
||||
|
||||
/// Keeps the per-session PATH entry alive and locked for the process lifetime.
|
||||
pub struct Arg0PathEntryGuard {
|
||||
_temp_dir: TempDir,
|
||||
_lock_file: File,
|
||||
paths: Arg0DispatchPaths,
|
||||
}
|
||||
|
||||
impl Arg0PathEntryGuard {
|
||||
fn new(temp_dir: TempDir, lock_file: File) -> Self {
|
||||
fn new(temp_dir: TempDir, lock_file: File, paths: Arg0DispatchPaths) -> Self {
|
||||
Self {
|
||||
_temp_dir: temp_dir,
|
||||
_lock_file: lock_file,
|
||||
paths,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn paths(&self) -> &Arg0DispatchPaths {
|
||||
&self.paths
|
||||
}
|
||||
}
|
||||
|
||||
pub fn arg0_dispatch() -> Option<Arg0PathEntryGuard> {
|
||||
@@ -38,6 +53,32 @@ pub fn arg0_dispatch() -> Option<Arg0PathEntryGuard> {
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("");
|
||||
|
||||
#[cfg(unix)]
|
||||
if exe_name == EXECVE_WRAPPER_ARG0 {
|
||||
let mut args = std::env::args();
|
||||
let _ = args.next();
|
||||
let file = match args.next() {
|
||||
Some(file) => file,
|
||||
None => std::process::exit(1),
|
||||
};
|
||||
let argv = args.collect::<Vec<_>>();
|
||||
|
||||
let runtime = match tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
{
|
||||
Ok(runtime) => runtime,
|
||||
Err(_) => std::process::exit(1),
|
||||
};
|
||||
let exit_code = runtime.block_on(
|
||||
codex_shell_escalation::run_shell_escalation_execve_wrapper(file, argv),
|
||||
);
|
||||
match exit_code {
|
||||
Ok(exit_code) => std::process::exit(exit_code),
|
||||
Err(_) => std::process::exit(1),
|
||||
}
|
||||
}
|
||||
|
||||
if exe_name == LINUX_SANDBOX_ARG0 {
|
||||
// Safety: [`run_main`] never returns.
|
||||
codex_linux_sandbox::run_main();
|
||||
@@ -46,7 +87,7 @@ pub fn arg0_dispatch() -> Option<Arg0PathEntryGuard> {
|
||||
}
|
||||
|
||||
let argv1 = args.next().unwrap_or_default();
|
||||
if argv1 == CODEX_APPLY_PATCH_ARG1 {
|
||||
if argv1 == CODEX_CORE_APPLY_PATCH_ARG1 {
|
||||
let patch_arg = args.next().and_then(|s| s.to_str().map(str::to_owned));
|
||||
let exit_code = match patch_arg {
|
||||
Some(patch_arg) => {
|
||||
@@ -58,7 +99,7 @@ pub fn arg0_dispatch() -> Option<Arg0PathEntryGuard> {
|
||||
}
|
||||
}
|
||||
None => {
|
||||
eprintln!("Error: {CODEX_APPLY_PATCH_ARG1} requires a UTF-8 PATCH argument.");
|
||||
eprintln!("Error: {CODEX_CORE_APPLY_PATCH_ARG1} requires a UTF-8 PATCH argument.");
|
||||
1
|
||||
}
|
||||
};
|
||||
@@ -95,33 +136,43 @@ pub fn arg0_dispatch() -> Option<Arg0PathEntryGuard> {
|
||||
/// 3. Derive the path to the current executable (so children can re-invoke the
|
||||
/// sandbox) when running on Linux.
|
||||
/// 4. Execute the provided async `main_fn` inside that runtime, forwarding any
|
||||
/// error. Note that `main_fn` receives `codex_linux_sandbox_exe:
|
||||
/// Option<PathBuf>`, as an argument, which is generally needed as part of
|
||||
/// constructing [`codex_core::config::Config`].
|
||||
/// error. Note that `main_fn` receives [`Arg0DispatchPaths`], which
|
||||
/// contains the helper executable paths needed to construct
|
||||
/// [`codex_core::config::Config`].
|
||||
///
|
||||
/// This function should be used to wrap any `main()` function in binary crates
|
||||
/// in this workspace that depends on these helper CLIs.
|
||||
pub fn arg0_dispatch_or_else<F, Fut>(main_fn: F) -> anyhow::Result<()>
|
||||
where
|
||||
F: FnOnce(Option<PathBuf>) -> Fut,
|
||||
F: FnOnce(Arg0DispatchPaths) -> Fut,
|
||||
Fut: Future<Output = anyhow::Result<()>>,
|
||||
{
|
||||
// Retain the TempDir so it exists for the lifetime of the invocation of
|
||||
// this executable. Admittedly, we could invoke `keep()` on it, but it
|
||||
// would be nice to avoid leaving temporary directories behind, if possible.
|
||||
let _path_entry = arg0_dispatch();
|
||||
let path_entry = arg0_dispatch();
|
||||
|
||||
// Regular invocation – create a Tokio runtime and execute the provided
|
||||
// async entry-point.
|
||||
let runtime = build_runtime()?;
|
||||
runtime.block_on(async move {
|
||||
let codex_linux_sandbox_exe: Option<PathBuf> = if cfg!(target_os = "linux") {
|
||||
std::env::current_exe().ok()
|
||||
} else {
|
||||
None
|
||||
let current_exe = std::env::current_exe().ok();
|
||||
let paths = Arg0DispatchPaths {
|
||||
codex_linux_sandbox_exe: if cfg!(target_os = "linux") {
|
||||
current_exe.or_else(|| {
|
||||
path_entry
|
||||
.as_ref()
|
||||
.and_then(|path_entry| path_entry.paths().codex_linux_sandbox_exe.clone())
|
||||
})
|
||||
} else {
|
||||
None
|
||||
},
|
||||
main_execve_wrapper_exe: path_entry
|
||||
.as_ref()
|
||||
.and_then(|path_entry| path_entry.paths().main_execve_wrapper_exe.clone()),
|
||||
};
|
||||
|
||||
main_fn(codex_linux_sandbox_exe).await
|
||||
main_fn(paths).await
|
||||
})
|
||||
}
|
||||
|
||||
@@ -139,7 +190,7 @@ const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_";
|
||||
/// Security: Do not allow `.env` files to create or modify any variables
|
||||
/// with names starting with `CODEX_`.
|
||||
fn load_dotenv() {
|
||||
if let Ok(codex_home) = codex_core::config::find_codex_home()
|
||||
if let Ok(codex_home) = find_codex_home()
|
||||
&& let Ok(iter) = dotenvy::from_path_iter(codex_home.join(".env"))
|
||||
{
|
||||
set_filtered(iter);
|
||||
@@ -175,7 +226,7 @@ where
|
||||
/// IMPORTANT: This function modifies the PATH environment variable, so it MUST
|
||||
/// be called before multiple threads are spawned.
|
||||
pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<Arg0PathEntryGuard> {
|
||||
let codex_home = codex_core::config::find_codex_home()?;
|
||||
let codex_home = find_codex_home()?;
|
||||
#[cfg(not(debug_assertions))]
|
||||
{
|
||||
// Guard against placing helpers in system temp directories outside debug builds.
|
||||
@@ -226,6 +277,8 @@ pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<Arg0PathEntryGu
|
||||
MISSPELLED_APPLY_PATCH_ARG0,
|
||||
#[cfg(target_os = "linux")]
|
||||
LINUX_SANDBOX_ARG0,
|
||||
#[cfg(unix)]
|
||||
EXECVE_WRAPPER_ARG0,
|
||||
] {
|
||||
let exe = std::env::current_exe()?;
|
||||
|
||||
@@ -242,7 +295,7 @@ pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<Arg0PathEntryGu
|
||||
&batch_script,
|
||||
format!(
|
||||
r#"@echo off
|
||||
"{}" {CODEX_APPLY_PATCH_ARG1} %*
|
||||
"{}" {CODEX_CORE_APPLY_PATCH_ARG1} %*
|
||||
"#,
|
||||
exe.display()
|
||||
),
|
||||
@@ -270,7 +323,30 @@ pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<Arg0PathEntryGu
|
||||
std::env::set_var("PATH", updated_path_env_var);
|
||||
}
|
||||
|
||||
Ok(Arg0PathEntryGuard::new(temp_dir, lock_file))
|
||||
let paths = Arg0DispatchPaths {
|
||||
codex_linux_sandbox_exe: {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
Some(path.join(LINUX_SANDBOX_ARG0))
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
None
|
||||
}
|
||||
},
|
||||
main_execve_wrapper_exe: {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
Some(path.join(EXECVE_WRAPPER_ARG0))
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
None
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
Ok(Arg0PathEntryGuard::new(temp_dir, lock_file, paths))
|
||||
}
|
||||
|
||||
fn janitor_cleanup(temp_root: &Path) -> std::io::Result<()> {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user