mirror of
https://github.com/openai/codex.git
synced 2026-05-08 13:26:34 +00:00
Compare commits
2 Commits
efrazer/co
...
dev/mzeng/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
afe57f7db8 | ||
|
|
9657104a7b |
44
.bazelrc
44
.bazelrc
@@ -29,13 +29,10 @@ common:linux --test_env=PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
|
||||
common:macos --test_env=PATH=/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
|
||||
|
||||
# Pass through some env vars Windows needs to use powershell?
|
||||
common:windows --test_env=PATH
|
||||
common:windows --test_env=SYSTEMROOT
|
||||
common:windows --test_env=COMSPEC
|
||||
common:windows --test_env=WINDIR
|
||||
# Rust's libtest harness runs test bodies on std-spawned threads. The default
|
||||
# 2 MiB stack can be too small for large async test futures on Windows CI; see
|
||||
# https://github.com/openai/codex/pull/19067 for the motivating failure.
|
||||
common --test_env=RUST_MIN_STACK=8388608 # 8 MiB
|
||||
|
||||
common --test_output=errors
|
||||
common --bes_results_url=https://app.buildbuddy.io/invocation/
|
||||
@@ -68,10 +65,6 @@ common:ci --verbose_failures
|
||||
common:ci --build_metadata=REPO_URL=https://github.com/openai/codex.git
|
||||
common:ci --build_metadata=ROLE=CI
|
||||
common:ci --build_metadata=VISIBILITY=PUBLIC
|
||||
# rules_rust derives debug level from Bazel toolchain/compilation-mode settings,
|
||||
# not Cargo profiles. Keep CI Rust actions explicit and lean.
|
||||
common:ci --@rules_rust//rust/settings:extra_rustc_flag=-Cdebuginfo=0
|
||||
common:ci --@rules_rust//rust/settings:extra_exec_rustc_flag=-Cdebuginfo=0
|
||||
|
||||
# Disable disk cache in CI since we have a remote one and aren't using persistent workers.
|
||||
common:ci --disk_cache=
|
||||
@@ -79,10 +72,6 @@ common:ci --disk_cache=
|
||||
# Shared config for the main Bazel CI workflow.
|
||||
common:ci-bazel --config=ci
|
||||
common:ci-bazel --build_metadata=TAG_workflow=bazel
|
||||
# Bazel CI cross-compiles in several legs, and the V8-backed code-mode tests
|
||||
# are not stable in that setup yet. Keep running the rest of the Rust
|
||||
# integration suites through the workspace-root launcher.
|
||||
common:ci-bazel --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::
|
||||
|
||||
# Shared config for Bazel-backed Rust linting.
|
||||
build:clippy --aspects=@rules_rust//rust:defs.bzl%rust_clippy_aspect
|
||||
@@ -93,8 +82,6 @@ build:clippy --@rules_rust//rust/settings:clippy.toml=//codex-rs:clippy.toml
|
||||
# in their own `Cargo.toml`, but `rules_rust` Bazel clippy does not read Cargo lint levels.
|
||||
# `clippy.toml` can configure lint behavior, but it cannot set allow/warn/deny/forbid levels.
|
||||
build:clippy --@rules_rust//rust/settings:clippy_flag=-Dwarnings
|
||||
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::await_holding_invalid_type
|
||||
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::await_holding_lock
|
||||
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::expect_used
|
||||
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::identity_op
|
||||
build:clippy --@rules_rust//rust/settings:clippy_flag=--deny=clippy::manual_clamp
|
||||
@@ -157,25 +144,6 @@ common:ci-macos --config=remote
|
||||
common:ci-macos --strategy=remote
|
||||
common:ci-macos --strategy=TestRunner=darwin-sandbox,local
|
||||
|
||||
# On Windows, use Linux remote execution for build actions but keep test actions
|
||||
# on the Windows runner so Bazel's normal test sharding and flaky-test retries
|
||||
# still run against Windows binaries.
|
||||
common:ci-windows-cross --config=ci-windows
|
||||
common:ci-windows-cross --build_metadata=TAG_windows_cross_compile=true
|
||||
common:ci-windows-cross --config=remote
|
||||
common:ci-windows-cross --host_platform=//:rbe
|
||||
common:ci-windows-cross --strategy=remote
|
||||
common:ci-windows-cross --strategy=TestRunner=local
|
||||
common:ci-windows-cross --local_test_jobs=4
|
||||
common:ci-windows-cross --test_env=RUST_TEST_THREADS=1
|
||||
# Native Windows CI still covers the PowerShell tests. The cross-built gnullvm
|
||||
# binaries currently hang in PowerShell AST parser tests when those binaries are
|
||||
# run on the Windows runner.
|
||||
common:ci-windows-cross --test_env=CODEX_BAZEL_TEST_SKIP_FILTERS=suite::code_mode::,powershell
|
||||
common:ci-windows-cross --platforms=//:windows_x86_64_gnullvm
|
||||
common:ci-windows-cross --extra_execution_platforms=//:rbe,//:windows_x86_64_msvc
|
||||
common:ci-windows-cross --extra_toolchains=//:windows_gnullvm_tests_on_msvc_host_toolchain
|
||||
|
||||
# Linux-only V8 CI config.
|
||||
common:ci-v8 --config=ci
|
||||
common:ci-v8 --build_metadata=TAG_workflow=v8
|
||||
@@ -183,15 +151,5 @@ common:ci-v8 --build_metadata=TAG_os=linux
|
||||
common:ci-v8 --config=remote
|
||||
common:ci-v8 --strategy=remote
|
||||
|
||||
# Source-built Bazel V8 artifacts use the in-process sandbox by default. This
|
||||
# does not affect Cargo's default prebuilt rusty_v8 path.
|
||||
common --@v8//:v8_enable_pointer_compression=True
|
||||
common --@v8//:v8_enable_sandbox=True
|
||||
|
||||
# Keep currently published rusty_v8 release artifacts non-sandboxed until the
|
||||
# artifact migration ships matching Rust feature selection for Cargo consumers.
|
||||
common:v8-release-compat --@v8//:v8_enable_pointer_compression=False
|
||||
common:v8-release-compat --@v8//:v8_enable_sandbox=False
|
||||
|
||||
# Optional per-user local overrides.
|
||||
try-import %workspace%/user.bazelrc
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
iTerm
|
||||
iTerm2
|
||||
psuedo
|
||||
SOM
|
||||
te
|
||||
TE
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[codespell]
|
||||
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
|
||||
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new
|
||||
skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl,frame*.txt,*.snap,*.snap.new,*meriyah.umd.min.js
|
||||
check-hidden = true
|
||||
ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b
|
||||
ignore-words-list = ratatui,ser,iTerm,iterm2,iterm,te,TE,PASE,SEH
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
|
||||
version = 1
|
||||
name = "codex"
|
||||
|
||||
[setup]
|
||||
script = ""
|
||||
|
||||
[[actions]]
|
||||
name = "Run"
|
||||
icon = "run"
|
||||
command = "cargo +1.93.0 run --manifest-path=codex-rs/Cargo.toml --bin codex -- -c mcp_oauth_credentials_store=file"
|
||||
@@ -27,10 +27,10 @@ Accept any of the following:
|
||||
2. Run the watcher script to snapshot PR/review/CI state (or consume each streamed snapshot from `--watch`).
|
||||
3. Inspect the `actions` list in the JSON response.
|
||||
4. If `diagnose_ci_failure` is present, inspect failed run logs and classify the failure.
|
||||
5. If the failure is likely caused by the current branch, patch code locally, commit, and push. Do not patch random flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are unrelated to the branch.
|
||||
5. If the failure is likely caused by the current branch, patch code locally, commit, and push.
|
||||
6. If `process_review_comment` is present, inspect surfaced review items and decide whether to address them.
|
||||
7. If a review item is actionable and correct, patch code locally, commit, push, and then mark the associated review thread/comment as resolved once the fix is on GitHub.
|
||||
8. Do not post replies to human-authored review comments/threads unless the user explicitly confirms the exact response. If a human review item is non-actionable, already addressed, or not valid, surface the item and recommended response to the user instead of replying on GitHub.
|
||||
8. If a review item from another author is non-actionable, already addressed, or not valid, post one reply on the comment/thread explaining that decision (for example answering the question or explaining why no change is needed). If the watcher later surfaces your own reply, treat that self-authored item as already handled and do not reply again.
|
||||
9. If the failure is likely flaky/unrelated and `retry_failed_checks` is present, rerun failed jobs with `--retry-failed-now`.
|
||||
10. If both actionable review feedback and `retry_failed_checks` are present, prioritize review feedback first; a new commit will retrigger CI, so avoid rerunning flaky checks on the old SHA unless you intentionally defer the review change.
|
||||
11. On every loop, look for newly surfaced review feedback before acting on CI failures or mergeability state, then verify mergeability / merge-conflict status (for example via `gh pr view`) alongside CI.
|
||||
@@ -69,18 +69,12 @@ python3 .codex/skills/babysit-pr/scripts/gh_pr_watch.py --pr <number-or-url> --o
|
||||
Use `gh` commands to inspect failed runs before deciding to rerun.
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh api repos/<owner>/<repo>/actions/runs/<run-id>/jobs -X GET -f per_page=100`
|
||||
- `gh api repos/<owner>/<repo>/actions/jobs/<job-id>/logs > /tmp/codex-gh-job-<job-id>-logs.zip`
|
||||
- `gh run view <run-id> --log-failed` as a fallback after the overall workflow run is complete
|
||||
- `gh run view <run-id> --log-failed`
|
||||
|
||||
`gh run view --log-failed` is workflow-run scoped and may not expose failed-job logs until the overall run finishes. For faster diagnosis, poll the run's jobs first and, as soon as a specific job has failed, fetch that job's logs directly from the Actions job logs endpoint. The watcher includes a `failed_jobs` list with each failed job's `job_id` and `logs_endpoint` when GitHub exposes one.
|
||||
|
||||
Prefer treating failures as branch-related when failed-job logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
|
||||
Prefer treating failures as branch-related when logs point to changed code (compile/test/lint/typecheck/snapshots/static analysis in touched areas).
|
||||
|
||||
Prefer treating failures as flaky/unrelated when logs show transient infra/external issues (timeouts, runner provisioning failures, registry/network outages, GitHub Actions infra errors).
|
||||
|
||||
Do not attempt to fix flaky/unrelated failures by changing tests, build scripts, CI configuration, dependency pins, or infrastructure-adjacent code unless the logs clearly connect the failure to the PR branch. For flaky/unrelated failures, rerun only when the watcher recommends `retry_failed_checks`; otherwise wait or stop for user help.
|
||||
|
||||
If classification is ambiguous, perform one manual diagnosis attempt before choosing rerun.
|
||||
|
||||
Read `.codex/skills/babysit-pr/references/heuristics.md` for a concise checklist.
|
||||
@@ -105,8 +99,7 @@ When you agree with a comment and it is actionable:
|
||||
5. Resume watching on the new SHA immediately (do not stop after reporting the push).
|
||||
6. If monitoring was running in `--watch` mode, restart `--watch` immediately after the push in the same turn; do not wait for the user to ask again.
|
||||
|
||||
Do not post replies to human-authored GitHub review comments/threads automatically. If you disagree with a human comment, believe it is non-actionable/already addressed, or need to answer a question, report the item to the user with a suggested response and wait for explicit confirmation before posting anything on GitHub. If the user approves a response, prefix it with `[codex]` so it is clear the response is automated and not from the human user.
|
||||
If the watcher later surfaces your own approved reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
|
||||
If you disagree or the comment is non-actionable/already addressed, reply once directly on the GitHub comment/thread so the reviewer gets an explicit answer, then continue the watcher loop. If the watcher later surfaces your own reply because the authenticated operator is treated as a trusted review author, treat that self-authored item as already handled and do not reply again.
|
||||
If a code review comment/thread is already marked as resolved in GitHub, treat it as non-actionable and safely ignore it unless new unresolved follow-up feedback appears.
|
||||
|
||||
## Git Safety Rules
|
||||
@@ -132,11 +125,11 @@ Use this loop in a live Codex session:
|
||||
2. Read `actions`.
|
||||
3. First check whether the PR is now merged or otherwise closed; if so, report that terminal state and stop polling immediately.
|
||||
4. Check CI summary, new review items, and mergeability/conflict status.
|
||||
5. Diagnose CI failures and classify branch-related vs flaky/unrelated. If the overall run is still pending but `failed_jobs` already includes a failed job, fetch that job's logs and diagnose immediately instead of waiting for the whole workflow run to finish. Patch only when the failure is branch-related.
|
||||
6. For each surfaced review item from another author, patch/commit/push and then resolve it if it is actionable. If it is non-actionable, already addressed, or requires a written answer, surface it to the user with a suggested response instead of posting automatically. If a later snapshot surfaces your own approved reply, treat it as informational and continue without responding again.
|
||||
5. Diagnose CI failures and classify branch-related vs flaky/unrelated.
|
||||
6. For each surfaced review item from another author, either reply once with an explanation if it is non-actionable or patch/commit/push and then resolve it if it is actionable. If a later snapshot surfaces your own reply, treat it as informational and continue without responding again.
|
||||
7. Process actionable review comments before flaky reruns when both are present; if a review fix requires a commit, push it and skip rerunning failed checks on the old SHA.
|
||||
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit. Do not make code changes for unrelated flakes or infrastructure failures just to get CI green.
|
||||
9. If you pushed a commit, resolved a review thread, or triggered a rerun, report the action briefly and continue polling (do not stop). If a human review comment needs a written GitHub response, stop and ask for confirmation before posting.
|
||||
8. Retry failed checks only when `retry_failed_checks` is present and you are not about to replace the current SHA with a review/CI fix commit.
|
||||
9. If you pushed a commit, resolved a review thread, replied to a review comment, or triggered a rerun, report the action briefly and continue polling (do not stop).
|
||||
10. After a review-fix push, proactively restart continuous monitoring (`--watch`) in the same turn unless a strict stop condition has already been reached.
|
||||
11. If everything is passing, mergeable, not blocked on required review approval, and there are no unaddressed review items, report that the PR is currently ready to merge but keep the watcher running so new review comments are surfaced quickly while the PR remains open.
|
||||
12. If blocked on a user-help-required issue (infra outage, exhausted flaky retries, unclear reviewer request, permissions), report the blocker and stop.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
interface:
|
||||
display_name: "PR Babysitter"
|
||||
short_description: "Watch PR review comments, CI, and merge conflicts"
|
||||
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watcher’s --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Do not post replies to human-authored review comments unless the user explicitly confirms the exact response. Do not patch unrelated flaky tests, CI infrastructure, dependency outages, runner issues, or other failures that are not caused by the branch. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
|
||||
default_prompt: "Babysit the current PR: monitor reviewer comments, CI, and merge-conflict status (prefer the watcher’s --watch mode for live monitoring); surface new review feedback before acting on CI or mergeability work, fix valid issues, push updates, and rerun flaky failures up to 3 times. Keep exactly one watcher session active for the PR (do not leave duplicate --watch terminals running). If you pause monitoring to patch review/CI feedback, restart --watch yourself immediately after the push in the same turn. If a watcher is still running and no strict stop condition has been reached, the task is still in progress: keep consuming watcher output and sending progress updates instead of ending the turn. Do not treat a green + mergeable PR as a terminal stop while it is still open; continue polling autonomously after any push/rerun so newly posted review comments are surfaced until a strict terminal stop condition is reached or the user interrupts."
|
||||
|
||||
@@ -23,11 +23,9 @@ Used to discover failed workflow runs and rerunnable run IDs.
|
||||
### Failed log inspection
|
||||
|
||||
- `gh run view <run-id> --json jobs,name,workflowName,conclusion,status,url,headSha`
|
||||
- `gh api repos/{owner}/{repo}/actions/runs/{run_id}/jobs -X GET -f per_page=100`
|
||||
- `gh api repos/{owner}/{repo}/actions/jobs/{job_id}/logs > /tmp/codex-gh-job-{job_id}-logs.zip`
|
||||
- `gh run view <run-id> --log-failed`
|
||||
|
||||
Used by Codex to classify branch-related vs flaky/unrelated failures. Prefer the direct job log endpoint as soon as a job has failed because `gh run view --log-failed` may not produce failed-job logs until the overall workflow run completes.
|
||||
Used by Codex to classify branch-related vs flaky/unrelated failures.
|
||||
|
||||
### Retry failed jobs only
|
||||
|
||||
@@ -72,11 +70,3 @@ Reruns only failed jobs (and dependencies) for a workflow run.
|
||||
- `conclusion`
|
||||
- `html_url`
|
||||
- `head_sha`
|
||||
|
||||
### Actions run jobs API (`jobs[]`)
|
||||
|
||||
- `id`
|
||||
- `name`
|
||||
- `status`
|
||||
- `conclusion`
|
||||
- `html_url`
|
||||
|
||||
@@ -18,8 +18,6 @@ Treat as **likely flaky or unrelated** when evidence points to transient or exte
|
||||
- Cloud/service rate limits or transient API outages
|
||||
- Non-deterministic failures in unrelated integration tests with known flake patterns
|
||||
|
||||
Do not patch likely flaky/unrelated failures. Use the retry budget for rerunnable failures, wait for pending jobs, or stop and report the blocker when the failure is persistent or infrastructure-owned.
|
||||
|
||||
If uncertain, inspect failed logs once before choosing rerun.
|
||||
|
||||
## Decision tree (fix vs rerun vs stop)
|
||||
@@ -27,11 +25,9 @@ If uncertain, inspect failed logs once before choosing rerun.
|
||||
1. If PR is merged/closed: stop.
|
||||
2. If there are failed checks:
|
||||
- Diagnose first.
|
||||
- If checks are still pending but an individual job has already failed: fetch that job's logs and diagnose now.
|
||||
- If branch-related: fix locally, commit, push.
|
||||
- If likely flaky/unrelated and all checks for the current SHA are terminal: rerun failed jobs.
|
||||
- If likely flaky/unrelated and not safely rerunnable: stop and report the blocker; do not edit unrelated tests, build scripts, CI configuration, dependency pins, or infrastructure code.
|
||||
- If checks are still pending and no failed job is available yet: wait.
|
||||
- If checks are still pending: wait.
|
||||
3. If flaky reruns for the same SHA reach the configured limit (default 3): stop and report persistent failure.
|
||||
4. Independently, process any new human review comments.
|
||||
|
||||
@@ -44,15 +40,12 @@ Address the comment when:
|
||||
- The requested change does not conflict with the user’s intent or recent guidance.
|
||||
- The change can be made safely without unrelated refactors.
|
||||
|
||||
Fix valid human review feedback in code when possible, but do not post a GitHub reply to a human-authored comment/thread unless the user explicitly confirms the exact response.
|
||||
|
||||
Do not auto-fix when:
|
||||
|
||||
- The comment is ambiguous and needs clarification.
|
||||
- The request conflicts with explicit user instructions.
|
||||
- The proposed change requires product/design decisions the user has not made.
|
||||
- The codebase is in a dirty/unrelated state that makes safe editing uncertain.
|
||||
- The comment only needs a written answer or disagreement response; propose the reply to the user instead of posting it automatically.
|
||||
|
||||
## Stop-and-ask conditions
|
||||
|
||||
@@ -63,4 +56,3 @@ Stop and ask the user instead of continuing automatically when:
|
||||
- The PR branch cannot be pushed.
|
||||
- CI failures persist after the flaky retry budget.
|
||||
- Reviewer feedback requires a product decision or cross-team coordination.
|
||||
- A human review comment requires a written GitHub reply instead of a code change.
|
||||
|
||||
@@ -338,66 +338,6 @@ def failed_runs_from_workflow_runs(runs, head_sha):
|
||||
return failed_runs
|
||||
|
||||
|
||||
def get_jobs_for_run(repo, run_id):
|
||||
endpoint = f"repos/{repo}/actions/runs/{run_id}/jobs"
|
||||
data = gh_json(["api", endpoint, "-X", "GET", "-f", "per_page=100"], repo=repo)
|
||||
if not isinstance(data, dict):
|
||||
raise GhCommandError("Unexpected payload from actions run jobs API")
|
||||
jobs = data.get("jobs") or []
|
||||
if not isinstance(jobs, list):
|
||||
raise GhCommandError("Expected `jobs` to be a list")
|
||||
return jobs
|
||||
|
||||
|
||||
def failed_jobs_from_workflow_runs(repo, runs, head_sha):
|
||||
failed_jobs = []
|
||||
for run in runs:
|
||||
if not isinstance(run, dict):
|
||||
continue
|
||||
if str(run.get("head_sha") or "") != head_sha:
|
||||
continue
|
||||
run_id = run.get("id")
|
||||
if run_id in (None, ""):
|
||||
continue
|
||||
run_status = str(run.get("status") or "")
|
||||
run_conclusion = str(run.get("conclusion") or "")
|
||||
if run_status.lower() == "completed" and run_conclusion not in FAILED_RUN_CONCLUSIONS:
|
||||
continue
|
||||
jobs = get_jobs_for_run(repo, run_id)
|
||||
for job in jobs:
|
||||
if not isinstance(job, dict):
|
||||
continue
|
||||
conclusion = str(job.get("conclusion") or "")
|
||||
if conclusion not in FAILED_RUN_CONCLUSIONS:
|
||||
continue
|
||||
job_id = job.get("id")
|
||||
logs_endpoint = None
|
||||
if job_id not in (None, ""):
|
||||
logs_endpoint = f"repos/{repo}/actions/jobs/{job_id}/logs"
|
||||
failed_jobs.append(
|
||||
{
|
||||
"run_id": run_id,
|
||||
"workflow_name": run.get("name") or run.get("display_title") or "",
|
||||
"run_status": run_status,
|
||||
"run_conclusion": run_conclusion,
|
||||
"job_id": job_id,
|
||||
"job_name": str(job.get("name") or ""),
|
||||
"status": str(job.get("status") or ""),
|
||||
"conclusion": conclusion,
|
||||
"html_url": str(job.get("html_url") or ""),
|
||||
"logs_endpoint": logs_endpoint,
|
||||
}
|
||||
)
|
||||
failed_jobs.sort(
|
||||
key=lambda item: (
|
||||
str(item.get("workflow_name") or ""),
|
||||
str(item.get("job_name") or ""),
|
||||
str(item.get("job_id") or ""),
|
||||
)
|
||||
)
|
||||
return failed_jobs
|
||||
|
||||
|
||||
def get_authenticated_login():
|
||||
data = gh_json(["api", "user"])
|
||||
if not isinstance(data, dict) or not data.get("login"):
|
||||
@@ -628,7 +568,7 @@ def is_pr_ready_to_merge(pr, checks_summary, new_review_items):
|
||||
return True
|
||||
|
||||
|
||||
def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_items, retries_used, max_retries):
|
||||
def recommend_actions(pr, checks_summary, failed_runs, new_review_items, retries_used, max_retries):
|
||||
actions = []
|
||||
if pr["closed"] or pr["merged"]:
|
||||
if new_review_items:
|
||||
@@ -643,7 +583,7 @@ def recommend_actions(pr, checks_summary, failed_runs, failed_jobs, new_review_i
|
||||
if new_review_items:
|
||||
actions.append("process_review_comment")
|
||||
|
||||
has_failed_pr_checks = checks_summary["failed_count"] > 0 or bool(failed_jobs)
|
||||
has_failed_pr_checks = checks_summary["failed_count"] > 0
|
||||
if has_failed_pr_checks:
|
||||
if checks_summary["all_terminal"] and retries_used >= max_retries:
|
||||
actions.append("stop_exhausted_retries")
|
||||
@@ -681,14 +621,12 @@ def collect_snapshot(args):
|
||||
checks_summary = summarize_checks(checks)
|
||||
workflow_runs = get_workflow_runs_for_sha(pr["repo"], pr["head_sha"])
|
||||
failed_runs = failed_runs_from_workflow_runs(workflow_runs, pr["head_sha"])
|
||||
failed_jobs = failed_jobs_from_workflow_runs(pr["repo"], workflow_runs, pr["head_sha"])
|
||||
|
||||
retries_used = current_retry_count(state, pr["head_sha"])
|
||||
actions = recommend_actions(
|
||||
pr,
|
||||
checks_summary,
|
||||
failed_runs,
|
||||
failed_jobs,
|
||||
new_review_items,
|
||||
retries_used,
|
||||
args.max_flaky_retries,
|
||||
@@ -703,7 +641,6 @@ def collect_snapshot(args):
|
||||
"pr": pr,
|
||||
"checks": checks_summary,
|
||||
"failed_runs": failed_runs,
|
||||
"failed_jobs": failed_jobs,
|
||||
"new_review_items": new_review_items,
|
||||
"actions": actions,
|
||||
"retry_state": {
|
||||
|
||||
@@ -75,11 +75,6 @@ def test_collect_snapshot_fetches_review_items_before_ci(monkeypatch, tmp_path):
|
||||
"failed_runs_from_workflow_runs",
|
||||
lambda *args, **kwargs: call_order.append("failed_runs") or [],
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"failed_jobs_from_workflow_runs",
|
||||
lambda *args, **kwargs: call_order.append("failed_jobs") or [],
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"recommend_actions",
|
||||
@@ -105,7 +100,6 @@ def test_recommend_actions_prioritizes_review_comments():
|
||||
sample_pr(),
|
||||
sample_checks(failed_count=1),
|
||||
[{"run_id": 99}],
|
||||
[],
|
||||
[{"kind": "review_comment", "id": "1"}],
|
||||
0,
|
||||
3,
|
||||
@@ -125,7 +119,6 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
|
||||
"pr": sample_pr(),
|
||||
"checks": sample_checks(),
|
||||
"failed_runs": [],
|
||||
"failed_jobs": [],
|
||||
"new_review_items": [],
|
||||
"actions": ["ready_to_merge"],
|
||||
"retry_state": {
|
||||
@@ -160,58 +153,3 @@ def test_run_watch_keeps_polling_open_ready_to_merge_pr(monkeypatch):
|
||||
|
||||
assert sleeps == [30, 30]
|
||||
assert [event for event, _ in events] == ["snapshot", "snapshot"]
|
||||
|
||||
|
||||
def test_failed_jobs_include_direct_logs_endpoint(monkeypatch):
|
||||
jobs_by_run = {
|
||||
99: [
|
||||
{
|
||||
"id": 555,
|
||||
"name": "unit tests",
|
||||
"status": "completed",
|
||||
"conclusion": "failure",
|
||||
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
|
||||
},
|
||||
{
|
||||
"id": 556,
|
||||
"name": "lint",
|
||||
"status": "completed",
|
||||
"conclusion": "success",
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
monkeypatch.setattr(
|
||||
gh_pr_watch,
|
||||
"get_jobs_for_run",
|
||||
lambda repo, run_id: jobs_by_run[run_id],
|
||||
)
|
||||
|
||||
failed_jobs = gh_pr_watch.failed_jobs_from_workflow_runs(
|
||||
"openai/codex",
|
||||
[
|
||||
{
|
||||
"id": 99,
|
||||
"name": "CI",
|
||||
"status": "in_progress",
|
||||
"conclusion": "",
|
||||
"head_sha": "abc123",
|
||||
}
|
||||
],
|
||||
"abc123",
|
||||
)
|
||||
|
||||
assert failed_jobs == [
|
||||
{
|
||||
"run_id": 99,
|
||||
"workflow_name": "CI",
|
||||
"run_status": "in_progress",
|
||||
"run_conclusion": "",
|
||||
"job_id": 555,
|
||||
"job_name": "unit tests",
|
||||
"status": "completed",
|
||||
"conclusion": "failure",
|
||||
"html_url": "https://github.com/openai/codex/actions/runs/99/job/555",
|
||||
"logs_endpoint": "repos/openai/codex/actions/jobs/555/logs",
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
name: code-breaking-changes
|
||||
description: Breaking changes
|
||||
---
|
||||
|
||||
Search for breaking changes in external integration surfaces:
|
||||
- app-server APIs
|
||||
- CLI parameters
|
||||
- configuration loading
|
||||
- resuming sessions from existing rollouts
|
||||
|
||||
Do not stop after finding one issue; analyze all possible ways breaking changes can happen.
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
name: code-review-change-size
|
||||
description: Change size guidance (800 lines)
|
||||
---
|
||||
|
||||
Unless the change is mechanical the total number of changed lines should not exceed 800 lines.
|
||||
For complex logic changes the size should be under 500 lines.
|
||||
|
||||
If the change is larger, explain whether it can be split into reviewable stages and identify the smallest coherent stage to land first.
|
||||
Base the staging suggestion on the actual diff, dependencies, and affected call sites.
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
name: code-review-context
|
||||
description: Model visible context
|
||||
---
|
||||
|
||||
Codex maintains a context (history of messages) that is sent to the model in inference requests.
|
||||
|
||||
1. No history rewrite - the context must be built up incrementally.
|
||||
2. Avoid frequent changes to context that cause cache misses.
|
||||
3. No unbounded items - everything injected in the model context must have a bounded size and a hard cap.
|
||||
4. No items larger than 10K tokens.
|
||||
5. Highlight new individual items that can cross >1k tokens as P0. These need an additional manual review.
|
||||
6. All injected fragments must be defined as structs in `core/context` and implement ContextualUserFragment trait
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
name: code-review-testing
|
||||
description: Test authoring guidance
|
||||
---
|
||||
|
||||
For agent changes prefer integration tests over unit tests. Integration tests are under `core/suite` and use `test_codex` to set up a test instance of codex.
|
||||
|
||||
Features that change the agent logic MUST add an integration test:
|
||||
- Provide a list of major logic changes and user-facing behaviors that need to be tested.
|
||||
|
||||
If unit tests are needed, put them in a dedicated test file (*_tests.rs).
|
||||
Avoid test-only functions in the main implementation.
|
||||
|
||||
Check whether there are existing helpers to make tests more streamlined and readable.
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
name: code-review
|
||||
description: Run a final code review on a pull request
|
||||
---
|
||||
|
||||
Use subagents to review code using all code-review-* skills in this repository other than this orchestrator. One subagent per skill. Pass full skill path to subagents. Use xhigh reasoning.
|
||||
|
||||
You must return every single issue from every subagent. You can return an unlimited number of findings.
|
||||
Use raw Markdown to report findings.
|
||||
Number findings for ease of reference.
|
||||
Each finding must include a specific file path and line number.
|
||||
|
||||
If the GitHub user running the review is the owner of the pull request add a `code-reviewed` label.
|
||||
Do not leave GitHub comments unless explicitly asked.
|
||||
@@ -1,127 +0,0 @@
|
||||
---
|
||||
name: codex-issue-digest
|
||||
description: Run a GitHub issue digest for openai/codex by feature-area labels, all areas, and configurable time windows. Use when asked to summarize recent Codex bug reports or enhancement requests, especially for owner-specific labels such as tui, exec, app, or similar areas.
|
||||
---
|
||||
|
||||
# Codex Issue Digest
|
||||
|
||||
## Objective
|
||||
|
||||
Produce a headline-first, insight-oriented digest of `openai/codex` issues for the requested feature-area labels over the previous 24 hours by default. Honor a different duration when the user asks for one, for example "past week" or "48 hours". Default to a summary-only response; include details only when requested.
|
||||
|
||||
Include only issues that currently have `bug` or `enhancement` plus at least one requested owner label. If the user asks for all areas or all labels, collect `bug`/`enhancement` issues across all labels.
|
||||
|
||||
## Inputs
|
||||
|
||||
- Feature-area labels, for example `tui exec`
|
||||
- `all areas` / `all labels` to scan all current feature labels
|
||||
- Optional repo override, default `openai/codex`
|
||||
- Optional time window, default previous 24 hours; examples: `48h`, `7d`, `1w`, `past week`
|
||||
|
||||
## Workflow
|
||||
|
||||
1. Run the collector from a current Codex repo checkout:
|
||||
|
||||
```bash
|
||||
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --labels tui exec --window-hours 24
|
||||
```
|
||||
|
||||
Use `--window "past week"` or `--window-hours 168` when the user asks for a non-default duration. Use `--all-labels` when the user says all areas or all labels.
|
||||
|
||||
2. Use the JSON as the source of truth. It includes new issues, new issue comments, new reactions/upvotes, current labels, current reaction counts, model-ready `summary_inputs`, and detailed `digest_rows`.
|
||||
3. Choose the output mode from the user's request:
|
||||
- Default mode: start the report with `## Summary` and do not emit `## Details`.
|
||||
- Details-upfront mode: if the user asks for details, a table, a full digest, "include details", or similar, start with `## Summary`, then include `## Details`.
|
||||
- Follow-up details mode: if the user asks for more detail after a summary-only digest, produce `## Details` from the existing collector JSON when it is still available; otherwise rerun the collector.
|
||||
4. In `## Summary`, write a headline-first executive summary:
|
||||
- The first nonblank line under `## Summary` must be a single-line headline or judgment, not a bullet. It should be useful even if the reader stops there.
|
||||
- On quiet days, prefer exactly: `No major issues reported by users.` Use this when there are no elevated rows, no newly repeated theme, and nothing that needs owner action.
|
||||
- When users are surfacing notable issues, make the headline name the count or theme, for example `Two issues are being surfaced by users:`.
|
||||
- Immediately under an active headline, list only the issues or themes driving attention, ordered by importance. Start each line with the row's `attention_marker` when present, then a concise owner-readable description and inline issue refs.
|
||||
- Treat `🔥🔥` as headline-worthy and `🔥` as elevated. Do not add fire emoji yourself; only copy the row's `attention_marker`.
|
||||
- Keep any extra summary detail after the headline to 1-3 terse lines, only when it adds a decision-relevant caveat, repeated theme, or owner action.
|
||||
- Do not include routine counts, broad stats, or low-signal table summaries in `## Summary` unless they change the headline. Put metadata and optional counts in `## Details` or the footer.
|
||||
- In default mode, end the report with a concise prompt such as `Want details? I can expand this into the issue table.` Keep this separate from the summary headline so the headline stays clean.
|
||||
- Cluster and name themes yourself from `summary_inputs`; the collector intentionally does not hard-code issue categories.
|
||||
- Use a cluster only when the issues genuinely share the same product problem. If several issues merely share a broad platform or label, describe them individually.
|
||||
- Do not omit a repeated theme just because its individual issues fall below the details table cutoff. Several similar reports should be called out as a repeated customer concern.
|
||||
- For single-issue rows, summarize the concern directly instead of calling it a cluster.
|
||||
- Use inline numbered issue links from each relevant row's `ref_markdown`.
|
||||
- Example quiet summary:
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
No major issues reported by users.
|
||||
|
||||
Source: collector v4, git `abc123def456`, window `2026-04-27T00:00:00Z` to `2026-04-28T00:00:00Z`.
|
||||
Want details? I can expand this into the issue table.
|
||||
```
|
||||
|
||||
- Example active summary:
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
Two issues are being surfaced by users:
|
||||
🔥🔥 Terminal launch hangs on startup [1](https://github.com/openai/codex/issues/123)
|
||||
🔥 Resume switches model providers unexpectedly [2](https://github.com/openai/codex/issues/456)
|
||||
|
||||
Source: collector v4, git `abc123def456`, window `2026-04-27T00:00:00Z` to `2026-04-28T00:00:00Z`.
|
||||
Want details? I can expand this into the issue table.
|
||||
```
|
||||
5. In `## Details`, when details are requested, include a compact table only when useful:
|
||||
- Prefer rows from `digest_rows`; include a `Refs` column using each row's `ref_markdown`.
|
||||
- Keep the table short; omit low-signal rows when the summary already covers them.
|
||||
- Use compact columns such as marker, area, type, description, interactions, and refs.
|
||||
- The `Description` cell should be a short owner-readable phrase. Use row `description`, title, body excerpts, and recent comments, but do not mechanically copy the raw GitHub issue title when it contains incidental details.
|
||||
- A clear quiet/no-concern sentence when there is no meaningful signal.
|
||||
6. Use the JSON `attention_marker` exactly. It is empty for normal rows, `🔥` for elevated rows, and `🔥🔥` for very high-attention rows. The actual cutoffs are in `attention_thresholds`.
|
||||
7. Use inline numbered references where a row or bullet points to issues, for example `Compaction bugs [1](https://github.com/openai/codex/issues/123), [2](https://github.com/openai/codex/issues/456)`. Do not add a separate footnotes section.
|
||||
8. Label `interactions` as `Interactions`; it counts posts/comments/reactions during the requested window, not unique people.
|
||||
9. Mention the collector `script_version`, repo checkout `git_head`, and time window in one compact source line. In default mode, put this before the details prompt so the final line still asks whether the user wants details. In details-upfront mode, it can be the footer.
|
||||
|
||||
## Reaction Handling
|
||||
|
||||
The collector uses GitHub reactions endpoints, which include `created_at`, to count reactions created during the digest window for hydrated issues. It reports both in-window reaction counts and current reaction totals. Treat current reaction totals as standing engagement, and treat `new_reactions` / `new_upvotes` as windowed activity.
|
||||
|
||||
By default, the collector fetches issue comments with `since=<window start>` and caps the number of comment pages per issue. This keeps very long historical threads from dominating a digest run and focuses the report on recent posts. Use `--fetch-all-comments` only when exhaustive comment history is more important than runtime.
|
||||
|
||||
GitHub issue search is still seeded by issue `updated_at`, so a purely reaction-only issue may be missed if reactions do not bump `updated_at`. Covering every reaction-only case would require either a persisted snapshot store or a broader scan of labeled issues.
|
||||
|
||||
## Attention Markers
|
||||
|
||||
The collector scales attention markers by the requested time window. The baseline is 5 human user interactions for `🔥` and 10 for `🔥🔥` over 24 hours; longer or shorter windows scale those cutoffs linearly and round up. For example, a one-week report uses 35 and 70 interactions. Human user interactions are human-authored new issue posts, human-authored new comments, and human reactions created during the window, including upvotes. Bot posts and bot reactions are excluded. In prose, explain this as high user interaction rather than naming the emoji.
|
||||
|
||||
## Freshness
|
||||
|
||||
The automation should run from a repo checkout that contains this skill. For shared daily use, prefer one of these patterns:
|
||||
|
||||
- Run the automation in a checkout that is refreshed before the automation starts, for example with `git pull --ff-only`.
|
||||
- If the automation cannot safely mutate the checkout, have it report the current `git_head` from the collector output so readers know which skill/script version produced the digest.
|
||||
|
||||
## Sample Owner Prompt
|
||||
|
||||
```text
|
||||
Use $codex-issue-digest to run the Codex issue digest for labels tui and exec over the previous 24 hours.
|
||||
```
|
||||
|
||||
```text
|
||||
Use $codex-issue-digest to run the Codex issue digest for all areas over the past week.
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Dry run the collector against recent issues:
|
||||
|
||||
```bash
|
||||
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --labels tui exec --window-hours 24
|
||||
```
|
||||
|
||||
```bash
|
||||
python3 .codex/skills/codex-issue-digest/scripts/collect_issue_digest.py --all-labels --window "past week" --limit-issues 10
|
||||
```
|
||||
|
||||
Run the focused script tests:
|
||||
|
||||
```bash
|
||||
pytest .codex/skills/codex-issue-digest/scripts/test_collect_issue_digest.py
|
||||
```
|
||||
@@ -1,4 +0,0 @@
|
||||
interface:
|
||||
display_name: "Codex Issue Digest"
|
||||
short_description: "Summarize Codex issues by labels or all areas"
|
||||
default_prompt: "Use $codex-issue-digest to run the Codex issue digest for labels tui and exec over the previous 24 hours."
|
||||
@@ -1,994 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Collect recent openai/codex issue activity for owner-focused digests."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import math
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from urllib.parse import quote
|
||||
|
||||
SCRIPT_VERSION = 4
|
||||
QUALIFYING_KIND_LABELS = ("bug", "enhancement")
|
||||
REACTION_KEYS = ("+1", "-1", "laugh", "hooray", "confused", "heart", "rocket", "eyes")
|
||||
BASE_ATTENTION_WINDOW_HOURS = 24.0
|
||||
ONE_ATTENTION_INTERACTION_THRESHOLD = 5
|
||||
TWO_ATTENTION_INTERACTION_THRESHOLD = 10
|
||||
ALL_LABEL_PHRASES = {"all", "all areas", "all labels", "all-areas", "all-labels", "*"}
|
||||
|
||||
|
||||
class GhCommandError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Collect recent GitHub issue activity for a Codex owner digest."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repo", default="openai/codex", help="OWNER/REPO, default openai/codex"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--labels",
|
||||
nargs="+",
|
||||
default=[],
|
||||
help="Feature-area labels owned by the digest recipient, for example: tui exec",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--all-labels",
|
||||
action="store_true",
|
||||
help="Collect bug/enhancement issues across all feature-area labels",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--window",
|
||||
help='Lookback duration such as "24h", "7d", "1w", or "past week"',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--window-hours", type=float, default=24.0, help="Lookback window"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--since", help="UTC ISO timestamp override for the window start"
|
||||
)
|
||||
parser.add_argument("--until", help="UTC ISO timestamp override for the window end")
|
||||
parser.add_argument(
|
||||
"--limit-issues",
|
||||
type=int,
|
||||
default=200,
|
||||
help="Maximum candidate issues to hydrate after search",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--body-chars", type=int, default=1200, help="Issue body excerpt length"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--comment-chars", type=int, default=900, help="Comment excerpt length"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-comment-pages",
|
||||
type=int,
|
||||
default=3,
|
||||
help=(
|
||||
"Maximum pages of issue comments to hydrate per issue after applying the "
|
||||
"window filter. Use 0 with --fetch-all-comments for no page cap."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--fetch-all-comments",
|
||||
action="store_true",
|
||||
help="Hydrate complete issue comment histories instead of only window-updated comments.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def parse_timestamp(value, arg_name):
|
||||
if value is None:
|
||||
return None
|
||||
normalized = value.strip()
|
||||
if not normalized:
|
||||
return None
|
||||
if normalized.endswith("Z"):
|
||||
normalized = f"{normalized[:-1]}+00:00"
|
||||
try:
|
||||
parsed = datetime.fromisoformat(normalized)
|
||||
except ValueError as err:
|
||||
raise ValueError(f"{arg_name} must be an ISO timestamp") from err
|
||||
if parsed.tzinfo is None:
|
||||
parsed = parsed.replace(tzinfo=timezone.utc)
|
||||
return parsed.astimezone(timezone.utc)
|
||||
|
||||
|
||||
def format_timestamp(value):
|
||||
return (
|
||||
value.astimezone(timezone.utc)
|
||||
.replace(microsecond=0)
|
||||
.isoformat()
|
||||
.replace("+00:00", "Z")
|
||||
)
|
||||
|
||||
|
||||
def resolve_window(args):
|
||||
until = parse_timestamp(args.until, "--until") or datetime.now(timezone.utc)
|
||||
since = parse_timestamp(args.since, "--since")
|
||||
if since is None:
|
||||
hours = parse_duration_hours(getattr(args, "window", None))
|
||||
if hours is None:
|
||||
hours = getattr(args, "window_hours", 24.0)
|
||||
if hours <= 0:
|
||||
raise ValueError("window duration must be > 0")
|
||||
since = until - timedelta(hours=hours)
|
||||
if since >= until:
|
||||
raise ValueError("--since must be before --until")
|
||||
return since, until
|
||||
|
||||
|
||||
def parse_duration_hours(value):
|
||||
if value is None:
|
||||
return None
|
||||
text = value.strip().casefold().replace("_", " ")
|
||||
if not text:
|
||||
return None
|
||||
text = re.sub(r"^(past|last)\s+", "", text)
|
||||
aliases = {
|
||||
"day": 24.0,
|
||||
"24h": 24.0,
|
||||
"week": 168.0,
|
||||
"7d": 168.0,
|
||||
}
|
||||
if text in aliases:
|
||||
return aliases[text]
|
||||
match = re.fullmatch(r"(\d+(?:\.\d+)?)\s*(h|hr|hrs|hour|hours)", text)
|
||||
if match:
|
||||
return float(match.group(1))
|
||||
match = re.fullmatch(r"(\d+(?:\.\d+)?)\s*(d|day|days)", text)
|
||||
if match:
|
||||
return float(match.group(1)) * 24.0
|
||||
match = re.fullmatch(r"(\d+(?:\.\d+)?)\s*(w|week|weeks)", text)
|
||||
if match:
|
||||
return float(match.group(1)) * 168.0
|
||||
raise ValueError(f"Unsupported duration: {value}")
|
||||
|
||||
|
||||
def normalize_requested_labels(labels, all_labels=False):
|
||||
out = []
|
||||
seen = set()
|
||||
for raw in labels:
|
||||
for piece in raw.split(","):
|
||||
label = piece.strip()
|
||||
if not label:
|
||||
continue
|
||||
key = label.casefold()
|
||||
if key not in seen:
|
||||
out.append(label)
|
||||
seen.add(key)
|
||||
phrase = " ".join(label.casefold() for label in out)
|
||||
if all_labels or phrase in ALL_LABEL_PHRASES:
|
||||
return [], True
|
||||
if not out:
|
||||
raise ValueError(
|
||||
"At least one feature-area label is required, or use --all-labels"
|
||||
)
|
||||
return out, False
|
||||
|
||||
|
||||
def quote_label(label):
|
||||
if re.fullmatch(r"[A-Za-z0-9_.:-]+", label):
|
||||
return f"label:{label}"
|
||||
escaped = label.replace('"', '\\"')
|
||||
return f'label:"{escaped}"'
|
||||
|
||||
|
||||
def build_search_queries(
|
||||
repo, owner_labels, since, kind_labels=QUALIFYING_KIND_LABELS, all_labels=False
|
||||
):
|
||||
since_date = since.date().isoformat()
|
||||
queries = []
|
||||
if all_labels:
|
||||
for kind_label in kind_labels:
|
||||
queries.append(
|
||||
" ".join(
|
||||
[
|
||||
f"repo:{repo}",
|
||||
"is:issue",
|
||||
f"updated:>={since_date}",
|
||||
quote_label(kind_label),
|
||||
]
|
||||
)
|
||||
)
|
||||
return queries
|
||||
for owner_label in owner_labels:
|
||||
for kind_label in kind_labels:
|
||||
queries.append(
|
||||
" ".join(
|
||||
[
|
||||
f"repo:{repo}",
|
||||
"is:issue",
|
||||
f"updated:>={since_date}",
|
||||
quote_label(owner_label),
|
||||
quote_label(kind_label),
|
||||
]
|
||||
)
|
||||
)
|
||||
return queries
|
||||
|
||||
|
||||
def _format_gh_error(cmd, err):
|
||||
stdout = (err.stdout or "").strip()
|
||||
stderr = (err.stderr or "").strip()
|
||||
parts = [f"GitHub CLI command failed: {' '.join(cmd)}"]
|
||||
if stdout:
|
||||
parts.append(f"stdout: {stdout}")
|
||||
if stderr:
|
||||
parts.append(f"stderr: {stderr}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def gh_json(args):
|
||||
cmd = ["gh", *args]
|
||||
try:
|
||||
proc = subprocess.run(cmd, check=True, capture_output=True, text=True)
|
||||
except FileNotFoundError as err:
|
||||
raise GhCommandError("`gh` command not found") from err
|
||||
except subprocess.CalledProcessError as err:
|
||||
raise GhCommandError(_format_gh_error(cmd, err)) from err
|
||||
raw = proc.stdout.strip()
|
||||
if not raw:
|
||||
return None
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError as err:
|
||||
raise GhCommandError(
|
||||
f"Failed to parse JSON from gh output for {' '.join(args)}"
|
||||
) from err
|
||||
|
||||
|
||||
def gh_text(args):
|
||||
cmd = ["gh", *args]
|
||||
try:
|
||||
proc = subprocess.run(cmd, check=True, capture_output=True, text=True)
|
||||
except (FileNotFoundError, subprocess.CalledProcessError):
|
||||
return ""
|
||||
return proc.stdout.strip()
|
||||
|
||||
|
||||
def git_head():
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
["git", "rev-parse", "--short=12", "HEAD"],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
except (FileNotFoundError, subprocess.CalledProcessError):
|
||||
return None
|
||||
return proc.stdout.strip() or None
|
||||
|
||||
|
||||
def skill_relative_path():
|
||||
try:
|
||||
return str(Path(__file__).resolve().relative_to(Path.cwd().resolve()))
|
||||
except ValueError:
|
||||
return str(Path(__file__).resolve())
|
||||
|
||||
|
||||
def gh_api_list_paginated(endpoint, per_page=100, max_pages=None, with_metadata=False):
|
||||
items = []
|
||||
page = 1
|
||||
truncated = False
|
||||
while True:
|
||||
sep = "&" if "?" in endpoint else "?"
|
||||
page_endpoint = f"{endpoint}{sep}per_page={per_page}&page={page}"
|
||||
payload = gh_json(["api", page_endpoint])
|
||||
if payload is None:
|
||||
break
|
||||
if not isinstance(payload, list):
|
||||
raise GhCommandError(f"Unexpected paginated payload from gh api {endpoint}")
|
||||
items.extend(payload)
|
||||
if len(payload) < per_page:
|
||||
break
|
||||
if max_pages is not None and page >= max_pages:
|
||||
truncated = True
|
||||
break
|
||||
page += 1
|
||||
if with_metadata:
|
||||
return {
|
||||
"items": items,
|
||||
"truncated": truncated,
|
||||
"pages": page,
|
||||
"max_pages": max_pages,
|
||||
}
|
||||
return items
|
||||
|
||||
|
||||
def search_issue_numbers(queries, limit):
|
||||
numbers = {}
|
||||
for query in queries:
|
||||
page = 1
|
||||
seen_for_query = 0
|
||||
while True:
|
||||
payload = gh_json(
|
||||
[
|
||||
"api",
|
||||
"search/issues",
|
||||
"-X",
|
||||
"GET",
|
||||
"-f",
|
||||
f"q={query}",
|
||||
"-f",
|
||||
"sort=updated",
|
||||
"-f",
|
||||
"order=desc",
|
||||
"-f",
|
||||
"per_page=100",
|
||||
"-f",
|
||||
f"page={page}",
|
||||
]
|
||||
)
|
||||
if not isinstance(payload, dict):
|
||||
raise GhCommandError("Unexpected payload from GitHub issue search")
|
||||
items = payload.get("items") or []
|
||||
if not isinstance(items, list):
|
||||
raise GhCommandError("Expected search `items` to be a list")
|
||||
for item in items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
number = item.get("number")
|
||||
if isinstance(number, int):
|
||||
numbers[number] = str(item.get("updated_at") or "")
|
||||
seen_for_query += 1
|
||||
if len(items) < 100 or seen_for_query >= limit:
|
||||
break
|
||||
page += 1
|
||||
ordered = sorted(
|
||||
numbers, key=lambda number: (numbers[number], number), reverse=True
|
||||
)
|
||||
return ordered[:limit]
|
||||
|
||||
|
||||
def fetch_issue(repo, number):
|
||||
payload = gh_json(["api", f"repos/{repo}/issues/{number}"])
|
||||
if not isinstance(payload, dict):
|
||||
raise GhCommandError(f"Unexpected issue payload for #{number}")
|
||||
return payload
|
||||
|
||||
|
||||
def fetch_comments(repo, number, since=None, max_pages=None):
|
||||
endpoint = f"repos/{repo}/issues/{number}/comments"
|
||||
if since is not None:
|
||||
endpoint = f"{endpoint}?since={quote(format_timestamp(since), safe='')}"
|
||||
return gh_api_list_paginated(
|
||||
endpoint,
|
||||
max_pages=max_pages,
|
||||
with_metadata=True,
|
||||
)
|
||||
|
||||
|
||||
def fetch_reactions_for_item(endpoint, item):
|
||||
if reaction_summary(item)["total"] <= 0:
|
||||
return []
|
||||
return gh_api_list_paginated(endpoint)
|
||||
|
||||
|
||||
def fetch_comment_reactions(repo, comments):
|
||||
reactions_by_comment_id = {}
|
||||
for comment in comments:
|
||||
comment_id = comment.get("id")
|
||||
if comment_id in (None, ""):
|
||||
continue
|
||||
endpoint = f"repos/{repo}/issues/comments/{comment_id}/reactions"
|
||||
reactions_by_comment_id[comment_id] = fetch_reactions_for_item(
|
||||
endpoint, comment
|
||||
)
|
||||
return reactions_by_comment_id
|
||||
|
||||
|
||||
def extract_login(user_obj):
|
||||
if isinstance(user_obj, dict):
|
||||
return str(user_obj.get("login") or "")
|
||||
return ""
|
||||
|
||||
|
||||
def is_bot_login(login):
|
||||
return bool(login) and login.lower().endswith("[bot]")
|
||||
|
||||
|
||||
def is_human_user(user_obj):
|
||||
login = extract_login(user_obj)
|
||||
return bool(login) and not is_bot_login(login)
|
||||
|
||||
|
||||
def label_names(issue):
|
||||
labels = []
|
||||
for label in issue.get("labels") or []:
|
||||
if isinstance(label, dict) and label.get("name"):
|
||||
labels.append(str(label["name"]))
|
||||
return sorted(labels, key=str.casefold)
|
||||
|
||||
|
||||
def matching_labels(labels, requested):
|
||||
labels_by_key = {label.casefold(): label for label in labels}
|
||||
return [label for label in requested if label.casefold() in labels_by_key]
|
||||
|
||||
|
||||
def area_labels(labels):
|
||||
kind_keys = {label.casefold() for label in QUALIFYING_KIND_LABELS}
|
||||
return [label for label in labels if label.casefold() not in kind_keys]
|
||||
|
||||
|
||||
def attention_thresholds_for_window(window_hours):
|
||||
if window_hours <= 0:
|
||||
raise ValueError("window_hours must be > 0")
|
||||
window_hours = round(window_hours, 6)
|
||||
scale = window_hours / BASE_ATTENTION_WINDOW_HOURS
|
||||
elevated = max(1, math.ceil(ONE_ATTENTION_INTERACTION_THRESHOLD * scale))
|
||||
very_high = max(
|
||||
elevated + 1, math.ceil(TWO_ATTENTION_INTERACTION_THRESHOLD * scale)
|
||||
)
|
||||
return {
|
||||
"base_window_hours": BASE_ATTENTION_WINDOW_HOURS,
|
||||
"window_hours": round(window_hours, 3),
|
||||
"scale": round(scale, 3),
|
||||
"elevated": elevated,
|
||||
"very_high": very_high,
|
||||
}
|
||||
|
||||
|
||||
def attention_level_for(user_interactions, attention_thresholds=None):
|
||||
thresholds = attention_thresholds or attention_thresholds_for_window(
|
||||
BASE_ATTENTION_WINDOW_HOURS
|
||||
)
|
||||
if user_interactions >= thresholds["very_high"]:
|
||||
return 2
|
||||
if user_interactions >= thresholds["elevated"]:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def attention_marker_for(user_interactions, attention_thresholds=None):
|
||||
return "🔥" * attention_level_for(user_interactions, attention_thresholds)
|
||||
|
||||
|
||||
def reaction_summary(item):
|
||||
reactions = item.get("reactions")
|
||||
if not isinstance(reactions, dict):
|
||||
return {"total": 0, "counts": {}}
|
||||
counts = {}
|
||||
for key in REACTION_KEYS:
|
||||
value = reactions.get(key, 0)
|
||||
if isinstance(value, int) and value:
|
||||
counts[key] = value
|
||||
total = reactions.get("total_count")
|
||||
if not isinstance(total, int):
|
||||
total = sum(counts.values())
|
||||
return {"total": total, "counts": counts}
|
||||
|
||||
|
||||
def reaction_event_summary(reactions, since, until):
|
||||
counts = {}
|
||||
total = 0
|
||||
for reaction in reactions or []:
|
||||
if not isinstance(reaction, dict):
|
||||
continue
|
||||
if not is_in_window(str(reaction.get("created_at") or ""), since, until):
|
||||
continue
|
||||
if not is_human_user(reaction.get("user")):
|
||||
continue
|
||||
content = str(reaction.get("content") or "")
|
||||
if not content:
|
||||
continue
|
||||
counts[content] = counts.get(content, 0) + 1
|
||||
total += 1
|
||||
return {
|
||||
"total": total,
|
||||
"counts": counts,
|
||||
"upvotes": counts.get("+1", 0),
|
||||
}
|
||||
|
||||
|
||||
def compact_text(value, limit):
|
||||
text = re.sub(r"\s+", " ", str(value or "")).strip()
|
||||
if limit <= 0:
|
||||
return ""
|
||||
if len(text) <= limit:
|
||||
return text
|
||||
return f"{text[: max(limit - 1, 0)].rstrip()}..."
|
||||
|
||||
|
||||
def clean_title_for_description(title):
|
||||
cleaned = re.sub(r"\s+", " ", str(title or "")).strip()
|
||||
cleaned = re.sub(
|
||||
r"^(codex(?: desktop| app|\.app| cli)?|desktop|windows codex app)\s*[:,-]\s*",
|
||||
"",
|
||||
cleaned,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
cleaned = re.sub(r"^on windows,\s*", "Windows: ", cleaned, flags=re.IGNORECASE)
|
||||
cleaned = cleaned.strip(" -:;")
|
||||
return compact_text(cleaned, 80) or "Issue needs owner review"
|
||||
|
||||
|
||||
def issue_description(issue):
|
||||
return clean_title_for_description(issue.get("title"))
|
||||
|
||||
|
||||
def is_in_window(timestamp, since, until):
|
||||
parsed = parse_timestamp(timestamp, "timestamp")
|
||||
if parsed is None:
|
||||
return False
|
||||
return since <= parsed < until
|
||||
|
||||
|
||||
def summarize_comment(
|
||||
comment, comment_chars, reaction_events=None, since=None, until=None
|
||||
):
|
||||
reactions = reaction_summary(comment)
|
||||
new_reactions = (
|
||||
reaction_event_summary(reaction_events, since, until)
|
||||
if since is not None and until is not None
|
||||
else {"total": 0, "counts": {}, "upvotes": 0}
|
||||
)
|
||||
human_user_interaction = is_human_user(comment.get("user"))
|
||||
return {
|
||||
"id": comment.get("id"),
|
||||
"author": extract_login(comment.get("user")),
|
||||
"author_association": str(comment.get("author_association") or ""),
|
||||
"created_at": str(comment.get("created_at") or ""),
|
||||
"updated_at": str(comment.get("updated_at") or ""),
|
||||
"url": str(comment.get("html_url") or ""),
|
||||
"human_user_interaction": human_user_interaction,
|
||||
"reactions": reactions["counts"],
|
||||
"reaction_total": reactions["total"],
|
||||
"new_reactions": new_reactions["total"],
|
||||
"new_upvotes": new_reactions["upvotes"],
|
||||
"new_reaction_counts": new_reactions["counts"],
|
||||
"body_excerpt": compact_text(comment.get("body"), comment_chars),
|
||||
}
|
||||
|
||||
|
||||
def summarize_issue(
|
||||
issue,
|
||||
comments,
|
||||
requested_labels,
|
||||
since,
|
||||
until,
|
||||
body_chars,
|
||||
comment_chars,
|
||||
issue_reaction_events=None,
|
||||
comment_reactions_by_id=None,
|
||||
all_labels=False,
|
||||
comments_hydration=None,
|
||||
attention_thresholds=None,
|
||||
):
|
||||
labels = label_names(issue)
|
||||
labels_by_key = {label.casefold() for label in labels}
|
||||
kind_labels = [
|
||||
label for label in QUALIFYING_KIND_LABELS if label.casefold() in labels_by_key
|
||||
]
|
||||
if all_labels:
|
||||
owner_labels = area_labels(labels) or ["unlabeled"]
|
||||
else:
|
||||
owner_labels = matching_labels(labels, requested_labels)
|
||||
if not kind_labels or not owner_labels:
|
||||
return None
|
||||
|
||||
updated_at = str(issue.get("updated_at") or "")
|
||||
if not is_in_window(updated_at, since, until):
|
||||
return None
|
||||
|
||||
new_issue = is_in_window(str(issue.get("created_at") or ""), since, until)
|
||||
comment_reactions_by_id = comment_reactions_by_id or {}
|
||||
new_comments = [
|
||||
summarize_comment(
|
||||
comment,
|
||||
comment_chars,
|
||||
reaction_events=comment_reactions_by_id.get(comment.get("id")),
|
||||
since=since,
|
||||
until=until,
|
||||
)
|
||||
for comment in comments
|
||||
if is_in_window(str(comment.get("created_at") or ""), since, until)
|
||||
]
|
||||
new_comments.sort(key=lambda item: (item["created_at"], str(item["id"])))
|
||||
|
||||
issue_reactions = reaction_summary(issue)
|
||||
issue_reaction_events_summary = reaction_event_summary(
|
||||
issue_reaction_events, since, until
|
||||
)
|
||||
comment_reaction_events_summary = reaction_event_summary(
|
||||
[
|
||||
reaction
|
||||
for reactions in comment_reactions_by_id.values()
|
||||
for reaction in reactions
|
||||
],
|
||||
since,
|
||||
until,
|
||||
)
|
||||
new_reactions = (
|
||||
issue_reaction_events_summary["total"]
|
||||
+ comment_reaction_events_summary["total"]
|
||||
)
|
||||
new_upvotes = (
|
||||
issue_reaction_events_summary["upvotes"]
|
||||
+ comment_reaction_events_summary["upvotes"]
|
||||
)
|
||||
all_comment_reaction_total = sum(
|
||||
reaction_summary(comment)["total"] for comment in comments
|
||||
)
|
||||
new_comment_reaction_total = sum(
|
||||
comment["reaction_total"] for comment in new_comments
|
||||
)
|
||||
new_issue_user_interaction = new_issue and is_human_user(issue.get("user"))
|
||||
new_comment_user_interactions = sum(
|
||||
1 for comment in new_comments if comment["human_user_interaction"]
|
||||
)
|
||||
user_interactions = (
|
||||
int(new_issue_user_interaction) + new_comment_user_interactions + new_reactions
|
||||
)
|
||||
attention_level = attention_level_for(user_interactions, attention_thresholds)
|
||||
attention_marker = attention_marker_for(user_interactions, attention_thresholds)
|
||||
updated_without_visible_new_post = (
|
||||
not new_issue and not new_comments and new_reactions == 0
|
||||
)
|
||||
|
||||
engagement_score = (
|
||||
len(new_comments) * 3
|
||||
+ new_reactions
|
||||
+ issue_reactions["total"]
|
||||
+ new_comment_reaction_total
|
||||
+ min(int(issue.get("comments") or len(comments) or 0), 10)
|
||||
)
|
||||
|
||||
return {
|
||||
"number": issue.get("number"),
|
||||
"title": str(issue.get("title") or ""),
|
||||
"description": issue_description(issue),
|
||||
"url": str(issue.get("html_url") or ""),
|
||||
"state": str(issue.get("state") or ""),
|
||||
"author": extract_login(issue.get("user")),
|
||||
"author_association": str(issue.get("author_association") or ""),
|
||||
"created_at": str(issue.get("created_at") or ""),
|
||||
"updated_at": updated_at,
|
||||
"labels": labels,
|
||||
"kind_labels": kind_labels,
|
||||
"owner_labels": owner_labels,
|
||||
"comments_total": int(issue.get("comments") or len(comments) or 0),
|
||||
"comments_hydration": comments_hydration
|
||||
or {
|
||||
"fetched": len(comments),
|
||||
"since": None,
|
||||
"truncated": False,
|
||||
"max_pages": None,
|
||||
},
|
||||
"issue_reactions": issue_reactions["counts"],
|
||||
"issue_reaction_total": issue_reactions["total"],
|
||||
"comment_reaction_total": all_comment_reaction_total,
|
||||
"new_comment_reaction_total": new_comment_reaction_total,
|
||||
"new_issue_reactions": issue_reaction_events_summary["total"],
|
||||
"new_issue_upvotes": issue_reaction_events_summary["upvotes"],
|
||||
"new_comment_reactions": comment_reaction_events_summary["total"],
|
||||
"new_comment_upvotes": comment_reaction_events_summary["upvotes"],
|
||||
"new_reactions": new_reactions,
|
||||
"new_upvotes": new_upvotes,
|
||||
"user_interactions": user_interactions,
|
||||
"attention": attention_level > 0,
|
||||
"attention_level": attention_level,
|
||||
"attention_marker": attention_marker,
|
||||
"engagement_score": engagement_score,
|
||||
"activity": {
|
||||
"new_issue": new_issue,
|
||||
"new_comments": len(new_comments),
|
||||
"new_human_comments": new_comment_user_interactions,
|
||||
"new_reactions": new_reactions,
|
||||
"new_upvotes": new_upvotes,
|
||||
"updated_without_visible_new_post": updated_without_visible_new_post,
|
||||
},
|
||||
"body_excerpt": compact_text(issue.get("body"), body_chars),
|
||||
"new_comments": new_comments,
|
||||
}
|
||||
|
||||
|
||||
def count_by_label(issues, labels):
|
||||
out = {}
|
||||
for label in labels:
|
||||
matching = [issue for issue in issues if label in issue["owner_labels"]]
|
||||
out[label] = {
|
||||
"issues": len(matching),
|
||||
"new_issues": sum(
|
||||
1 for issue in matching if issue["activity"]["new_issue"]
|
||||
),
|
||||
"new_comments": sum(
|
||||
issue["activity"]["new_comments"] for issue in matching
|
||||
),
|
||||
}
|
||||
return out
|
||||
|
||||
|
||||
def count_by_kind(issues):
|
||||
out = {}
|
||||
for kind in QUALIFYING_KIND_LABELS:
|
||||
matching = [issue for issue in issues if kind in issue["kind_labels"]]
|
||||
out[kind] = {
|
||||
"issues": len(matching),
|
||||
"new_issues": sum(
|
||||
1 for issue in matching if issue["activity"]["new_issue"]
|
||||
),
|
||||
"new_comments": sum(
|
||||
issue["activity"]["new_comments"] for issue in matching
|
||||
),
|
||||
}
|
||||
return out
|
||||
|
||||
|
||||
def hot_items(issues, limit=8):
|
||||
ranked = sorted(
|
||||
issues,
|
||||
key=lambda issue: (
|
||||
issue["attention"],
|
||||
issue["attention_level"],
|
||||
issue["user_interactions"],
|
||||
issue["engagement_score"],
|
||||
issue["activity"]["new_comments"],
|
||||
issue["issue_reaction_total"] + issue["comment_reaction_total"],
|
||||
issue["updated_at"],
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
return [
|
||||
{
|
||||
"number": issue["number"],
|
||||
"title": issue["title"],
|
||||
"url": issue["url"],
|
||||
"owner_labels": issue["owner_labels"],
|
||||
"kind_labels": issue["kind_labels"],
|
||||
"attention": issue["attention"],
|
||||
"attention_level": issue["attention_level"],
|
||||
"attention_marker": issue["attention_marker"],
|
||||
"user_interactions": issue["user_interactions"],
|
||||
"new_reactions": issue["new_reactions"],
|
||||
"new_upvotes": issue["new_upvotes"],
|
||||
"engagement_score": issue["engagement_score"],
|
||||
"new_comments": issue["activity"]["new_comments"],
|
||||
"reaction_total": issue["issue_reaction_total"]
|
||||
+ issue["comment_reaction_total"],
|
||||
}
|
||||
for issue in ranked[:limit]
|
||||
if issue["engagement_score"] > 0
|
||||
]
|
||||
|
||||
|
||||
def ranked_digest_issues(issues):
|
||||
return sorted(
|
||||
issues,
|
||||
key=lambda issue: (
|
||||
issue["attention"],
|
||||
issue["attention_level"],
|
||||
issue["user_interactions"],
|
||||
issue["engagement_score"],
|
||||
issue["activity"]["new_comments"],
|
||||
issue["updated_at"],
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
|
||||
def digest_rows(issues, limit=10, ref_map=None):
|
||||
ranked = ranked_digest_issues(issues)
|
||||
if ref_map is None:
|
||||
ref_map = {issue["number"]: ref for ref, issue in enumerate(ranked, start=1)}
|
||||
rows = []
|
||||
for issue in ranked[:limit]:
|
||||
ref = ref_map[issue["number"]]
|
||||
reaction_total = issue["issue_reaction_total"] + issue["comment_reaction_total"]
|
||||
rows.append(
|
||||
{
|
||||
"ref": ref,
|
||||
"ref_markdown": f"[{ref}]({issue['url']})",
|
||||
"marker": issue["attention_marker"],
|
||||
"attention_marker": issue["attention_marker"],
|
||||
"number": issue["number"],
|
||||
"description": issue["description"],
|
||||
"title": issue["title"],
|
||||
"url": issue["url"],
|
||||
"area": ", ".join(issue["owner_labels"]),
|
||||
"kind": ", ".join(issue["kind_labels"]),
|
||||
"state": issue["state"],
|
||||
"interactions": issue["user_interactions"],
|
||||
"user_interactions": issue["user_interactions"],
|
||||
"new_reactions": issue["new_reactions"],
|
||||
"new_upvotes": issue["new_upvotes"],
|
||||
"current_reactions": reaction_total,
|
||||
}
|
||||
)
|
||||
return rows
|
||||
|
||||
|
||||
def issue_ref_markdown(issue, ref_map):
|
||||
ref = ref_map[issue["number"]]
|
||||
return f"[{ref}]({issue['url']})"
|
||||
|
||||
|
||||
def summary_inputs(issues, limit=80, ref_map=None):
|
||||
ranked = ranked_digest_issues(issues)
|
||||
if ref_map is None:
|
||||
ref_map = {issue["number"]: ref for ref, issue in enumerate(ranked, start=1)}
|
||||
rows = []
|
||||
for issue in ranked[:limit]:
|
||||
rows.append(
|
||||
{
|
||||
"ref": ref_map[issue["number"]],
|
||||
"ref_markdown": issue_ref_markdown(issue, ref_map),
|
||||
"number": issue["number"],
|
||||
"title": issue["title"],
|
||||
"description": issue["description"],
|
||||
"url": issue["url"],
|
||||
"labels": issue["labels"],
|
||||
"owner_labels": issue["owner_labels"],
|
||||
"kind_labels": issue["kind_labels"],
|
||||
"state": issue.get("state", ""),
|
||||
"attention_marker": issue.get("attention_marker", ""),
|
||||
"interactions": issue["user_interactions"],
|
||||
"new_comments": issue["activity"].get("new_comments", 0),
|
||||
"new_reactions": issue.get("new_reactions", 0),
|
||||
"new_upvotes": issue.get("new_upvotes", 0),
|
||||
"current_reactions": issue.get("issue_reaction_total", 0)
|
||||
+ issue.get("comment_reaction_total", 0),
|
||||
}
|
||||
)
|
||||
return rows
|
||||
|
||||
|
||||
def collect_digest(args):
|
||||
since, until = resolve_window(args)
|
||||
window_hours = (until - since).total_seconds() / 3600
|
||||
attention_thresholds = attention_thresholds_for_window(window_hours)
|
||||
requested_labels, all_labels = normalize_requested_labels(
|
||||
args.labels, all_labels=args.all_labels
|
||||
)
|
||||
queries = build_search_queries(
|
||||
args.repo, requested_labels, since, all_labels=all_labels
|
||||
)
|
||||
numbers = search_issue_numbers(queries, args.limit_issues)
|
||||
gh_version_output = gh_text(["--version"])
|
||||
|
||||
issues = []
|
||||
max_comment_pages = None if args.max_comment_pages <= 0 else args.max_comment_pages
|
||||
for number in numbers:
|
||||
issue = fetch_issue(args.repo, number)
|
||||
comments_since = None if args.fetch_all_comments else since
|
||||
comments_payload = fetch_comments(
|
||||
args.repo,
|
||||
number,
|
||||
since=comments_since,
|
||||
max_pages=max_comment_pages,
|
||||
)
|
||||
comments = comments_payload["items"]
|
||||
issue_reaction_events = fetch_reactions_for_item(
|
||||
f"repos/{args.repo}/issues/{number}/reactions", issue
|
||||
)
|
||||
comment_reactions_by_id = fetch_comment_reactions(args.repo, comments)
|
||||
comments_hydration = {
|
||||
"fetched": len(comments),
|
||||
"total": int(issue.get("comments") or len(comments) or 0),
|
||||
"since": format_timestamp(comments_since) if comments_since else None,
|
||||
"truncated": comments_payload["truncated"],
|
||||
"max_pages": comments_payload["max_pages"],
|
||||
"fetch_all_comments": args.fetch_all_comments,
|
||||
}
|
||||
summary = summarize_issue(
|
||||
issue,
|
||||
comments,
|
||||
requested_labels,
|
||||
since,
|
||||
until,
|
||||
args.body_chars,
|
||||
args.comment_chars,
|
||||
issue_reaction_events=issue_reaction_events,
|
||||
comment_reactions_by_id=comment_reactions_by_id,
|
||||
all_labels=all_labels,
|
||||
comments_hydration=comments_hydration,
|
||||
attention_thresholds=attention_thresholds,
|
||||
)
|
||||
if summary is not None:
|
||||
issues.append(summary)
|
||||
|
||||
issues.sort(
|
||||
key=lambda issue: (issue["updated_at"], int(issue["number"] or 0)), reverse=True
|
||||
)
|
||||
totals = {
|
||||
"candidate_issues": len(numbers),
|
||||
"included_issues": len(issues),
|
||||
"new_issues": sum(1 for issue in issues if issue["activity"]["new_issue"]),
|
||||
"issues_with_new_comments": sum(
|
||||
1 for issue in issues if issue["activity"]["new_comments"] > 0
|
||||
),
|
||||
"new_comments": sum(issue["activity"]["new_comments"] for issue in issues),
|
||||
"comments_fetched": sum(
|
||||
issue["comments_hydration"]["fetched"] for issue in issues
|
||||
),
|
||||
"issues_with_truncated_comment_hydration": sum(
|
||||
1 for issue in issues if issue["comments_hydration"]["truncated"]
|
||||
),
|
||||
"updated_without_visible_new_post": sum(
|
||||
1
|
||||
for issue in issues
|
||||
if issue["activity"]["updated_without_visible_new_post"]
|
||||
),
|
||||
"issue_reactions_current_total": sum(
|
||||
issue["issue_reaction_total"] for issue in issues
|
||||
),
|
||||
"comment_reactions_current_total": sum(
|
||||
issue["comment_reaction_total"] for issue in issues
|
||||
),
|
||||
"new_reactions": sum(issue["new_reactions"] for issue in issues),
|
||||
"new_upvotes": sum(issue["new_upvotes"] for issue in issues),
|
||||
"user_interactions": sum(issue["user_interactions"] for issue in issues),
|
||||
}
|
||||
ranked = ranked_digest_issues(issues)
|
||||
ref_map = {issue["number"]: ref for ref, issue in enumerate(ranked, start=1)}
|
||||
filter_label = "all" if all_labels else requested_labels
|
||||
|
||||
return {
|
||||
"generated_at": format_timestamp(datetime.now(timezone.utc)),
|
||||
"source": {
|
||||
"repo": args.repo,
|
||||
"skill": "codex-issue-digest",
|
||||
"collector": skill_relative_path(),
|
||||
"script_version": SCRIPT_VERSION,
|
||||
"git_head": git_head(),
|
||||
"gh_version": gh_version_output.splitlines()[0]
|
||||
if gh_version_output
|
||||
else None,
|
||||
},
|
||||
"window": {
|
||||
"since": format_timestamp(since),
|
||||
"until": format_timestamp(until),
|
||||
"hours": round(window_hours, 3),
|
||||
},
|
||||
"attention_thresholds": attention_thresholds,
|
||||
"filters": {
|
||||
"owner_labels": filter_label,
|
||||
"all_labels": all_labels,
|
||||
"kind_labels": list(QUALIFYING_KIND_LABELS),
|
||||
},
|
||||
"collection_notes": [
|
||||
"Issues are selected when they currently have bug or enhancement plus at least one requested owner label and were updated during the window.",
|
||||
"By default, issue comments are fetched with since=window_start and a max page cap to avoid long historical threads; use --fetch-all-comments when exhaustive comment history is needed.",
|
||||
"New issue comments are filtered by comment creation time within the window from the fetched comment set.",
|
||||
"Reaction events are counted by GitHub reaction created_at timestamps for hydrated issues and fetched comments.",
|
||||
"Current reaction totals are standing engagement signals; new_reactions and new_upvotes are windowed activity.",
|
||||
"The collector does not assign semantic clusters; use summary_inputs as model-ready evidence for report-time clustering.",
|
||||
"Pure reaction-only issues may be missed if GitHub issue search does not surface them via updated_at.",
|
||||
"Issues updated during the window without a new issue body or new comment are retained because label/status edits can still be useful owner signals.",
|
||||
],
|
||||
"totals": totals,
|
||||
"by_owner_label": count_by_label(
|
||||
issues,
|
||||
sorted(
|
||||
{area for issue in issues for area in issue["owner_labels"]},
|
||||
key=str.casefold,
|
||||
)
|
||||
if all_labels
|
||||
else requested_labels,
|
||||
),
|
||||
"by_kind_label": count_by_kind(issues),
|
||||
"hot_items": hot_items(issues),
|
||||
"summary_inputs": summary_inputs(issues, ref_map=ref_map),
|
||||
"digest_rows": digest_rows(issues, ref_map=ref_map),
|
||||
"issues": issues,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
try:
|
||||
digest = collect_digest(args)
|
||||
except (GhCommandError, RuntimeError, ValueError) as err:
|
||||
sys.stderr.write(f"collect_issue_digest.py error: {err}\n")
|
||||
return 1
|
||||
sys.stdout.write(json.dumps(digest, indent=2, sort_keys=True) + "\n")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,685 +0,0 @@
|
||||
import importlib.util
|
||||
from datetime import timezone
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
MODULE_PATH = Path(__file__).with_name("collect_issue_digest.py")
|
||||
MODULE_SPEC = importlib.util.spec_from_file_location(
|
||||
"collect_issue_digest", MODULE_PATH
|
||||
)
|
||||
collect_issue_digest = importlib.util.module_from_spec(MODULE_SPEC)
|
||||
assert MODULE_SPEC.loader is not None
|
||||
MODULE_SPEC.loader.exec_module(collect_issue_digest)
|
||||
|
||||
|
||||
def test_build_search_queries_uses_each_owner_and_kind_label():
|
||||
since = collect_issue_digest.parse_timestamp("2026-04-25T12:34:56Z", "--since")
|
||||
|
||||
queries = collect_issue_digest.build_search_queries(
|
||||
"openai/codex", ["tui", "exec"], since
|
||||
)
|
||||
|
||||
assert queries == [
|
||||
"repo:openai/codex is:issue updated:>=2026-04-25 label:tui label:bug",
|
||||
"repo:openai/codex is:issue updated:>=2026-04-25 label:tui label:enhancement",
|
||||
"repo:openai/codex is:issue updated:>=2026-04-25 label:exec label:bug",
|
||||
"repo:openai/codex is:issue updated:>=2026-04-25 label:exec label:enhancement",
|
||||
]
|
||||
|
||||
|
||||
def test_build_search_queries_can_scan_all_labels():
|
||||
since = collect_issue_digest.parse_timestamp("2026-04-25T12:34:56Z", "--since")
|
||||
|
||||
queries = collect_issue_digest.build_search_queries(
|
||||
"openai/codex", [], since, all_labels=True
|
||||
)
|
||||
|
||||
assert queries == [
|
||||
"repo:openai/codex is:issue updated:>=2026-04-25 label:bug",
|
||||
"repo:openai/codex is:issue updated:>=2026-04-25 label:enhancement",
|
||||
]
|
||||
|
||||
|
||||
def test_normalize_requested_labels_accepts_all_area_phrases():
|
||||
assert collect_issue_digest.normalize_requested_labels(["all", "areas"]) == (
|
||||
[],
|
||||
True,
|
||||
)
|
||||
assert collect_issue_digest.normalize_requested_labels(["all-labels"]) == (
|
||||
[],
|
||||
True,
|
||||
)
|
||||
|
||||
|
||||
def test_search_issue_numbers_requests_updated_sort(monkeypatch):
|
||||
calls = []
|
||||
|
||||
def fake_gh_json(args):
|
||||
calls.append(args)
|
||||
return {
|
||||
"items": [
|
||||
{"number": 1, "updated_at": "2026-04-25T00:00:00Z"},
|
||||
]
|
||||
}
|
||||
|
||||
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
|
||||
|
||||
assert collect_issue_digest.search_issue_numbers(["query"], limit=10) == [1]
|
||||
assert "-f" in calls[0]
|
||||
assert "sort=updated" in calls[0]
|
||||
assert "order=desc" in calls[0]
|
||||
|
||||
|
||||
def test_search_issue_numbers_applies_limit_per_query(monkeypatch):
|
||||
calls = []
|
||||
|
||||
def fake_gh_json(args):
|
||||
calls.append(args)
|
||||
query = next(
|
||||
value.removeprefix("q=") for value in args if value.startswith("q=")
|
||||
)
|
||||
page = int(
|
||||
next(
|
||||
value.removeprefix("page=")
|
||||
for value in args
|
||||
if value.startswith("page=")
|
||||
)
|
||||
)
|
||||
base = 10_000 if query == "first" else 20_000
|
||||
offset = (page - 1) * 100
|
||||
return {
|
||||
"items": [
|
||||
{
|
||||
"number": base + offset + idx,
|
||||
"updated_at": f"2026-04-25T00:{idx:02d}:00Z",
|
||||
}
|
||||
for idx in range(100)
|
||||
]
|
||||
}
|
||||
|
||||
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
|
||||
|
||||
collect_issue_digest.search_issue_numbers(["first", "second"], limit=150)
|
||||
|
||||
queried_pages = [
|
||||
(
|
||||
next(
|
||||
value.removeprefix("q=") for value in args if value.startswith("q=")
|
||||
),
|
||||
next(
|
||||
value.removeprefix("page=")
|
||||
for value in args
|
||||
if value.startswith("page=")
|
||||
),
|
||||
)
|
||||
for args in calls
|
||||
]
|
||||
assert queried_pages == [
|
||||
("first", "1"),
|
||||
("first", "2"),
|
||||
("second", "1"),
|
||||
("second", "2"),
|
||||
]
|
||||
|
||||
|
||||
def test_summarize_issue_keeps_new_comments_and_reaction_signals():
|
||||
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
|
||||
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
|
||||
issue = {
|
||||
"number": 123,
|
||||
"title": "TUI does not redraw",
|
||||
"html_url": "https://github.com/openai/codex/issues/123",
|
||||
"state": "open",
|
||||
"created_at": "2026-04-24T20:00:00Z",
|
||||
"updated_at": "2026-04-25T10:00:00Z",
|
||||
"user": {"login": "alice"},
|
||||
"author_association": "NONE",
|
||||
"comments": 2,
|
||||
"body": "The terminal freezes after resize.",
|
||||
"labels": [{"name": "bug"}, {"name": "tui"}],
|
||||
"reactions": {"total_count": 3, "+1": 2, "rocket": 1},
|
||||
}
|
||||
comments = [
|
||||
{
|
||||
"id": 1,
|
||||
"created_at": "2026-04-25T11:00:00Z",
|
||||
"updated_at": "2026-04-25T11:00:00Z",
|
||||
"html_url": "https://github.com/openai/codex/issues/123#issuecomment-1",
|
||||
"user": {"login": "bob"},
|
||||
"author_association": "MEMBER",
|
||||
"body": "I can reproduce this on main.",
|
||||
"reactions": {"total_count": 4, "heart": 1, "+1": 3},
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"created_at": "2026-04-24T11:00:00Z",
|
||||
"updated_at": "2026-04-24T11:00:00Z",
|
||||
"html_url": "https://github.com/openai/codex/issues/123#issuecomment-2",
|
||||
"user": {"login": "carol"},
|
||||
"author_association": "NONE",
|
||||
"body": "Older comment.",
|
||||
"reactions": {"total_count": 1, "eyes": 1},
|
||||
},
|
||||
]
|
||||
|
||||
summary = collect_issue_digest.summarize_issue(
|
||||
issue,
|
||||
comments,
|
||||
["tui", "exec"],
|
||||
since,
|
||||
until,
|
||||
body_chars=200,
|
||||
comment_chars=200,
|
||||
)
|
||||
|
||||
assert summary == {
|
||||
"number": 123,
|
||||
"title": "TUI does not redraw",
|
||||
"description": "TUI does not redraw",
|
||||
"url": "https://github.com/openai/codex/issues/123",
|
||||
"state": "open",
|
||||
"author": "alice",
|
||||
"author_association": "NONE",
|
||||
"created_at": "2026-04-24T20:00:00Z",
|
||||
"updated_at": "2026-04-25T10:00:00Z",
|
||||
"labels": ["bug", "tui"],
|
||||
"kind_labels": ["bug"],
|
||||
"owner_labels": ["tui"],
|
||||
"comments_total": 2,
|
||||
"comments_hydration": {
|
||||
"fetched": 2,
|
||||
"since": None,
|
||||
"truncated": False,
|
||||
"max_pages": None,
|
||||
},
|
||||
"issue_reactions": {"+1": 2, "rocket": 1},
|
||||
"issue_reaction_total": 3,
|
||||
"comment_reaction_total": 5,
|
||||
"new_comment_reaction_total": 4,
|
||||
"new_issue_reactions": 0,
|
||||
"new_issue_upvotes": 0,
|
||||
"new_comment_reactions": 0,
|
||||
"new_comment_upvotes": 0,
|
||||
"new_reactions": 0,
|
||||
"new_upvotes": 0,
|
||||
"user_interactions": 1,
|
||||
"attention": False,
|
||||
"attention_level": 0,
|
||||
"attention_marker": "",
|
||||
"engagement_score": 12,
|
||||
"activity": {
|
||||
"new_issue": False,
|
||||
"new_comments": 1,
|
||||
"new_human_comments": 1,
|
||||
"new_reactions": 0,
|
||||
"new_upvotes": 0,
|
||||
"updated_without_visible_new_post": False,
|
||||
},
|
||||
"body_excerpt": "The terminal freezes after resize.",
|
||||
"new_comments": [
|
||||
{
|
||||
"id": 1,
|
||||
"author": "bob",
|
||||
"author_association": "MEMBER",
|
||||
"created_at": "2026-04-25T11:00:00Z",
|
||||
"updated_at": "2026-04-25T11:00:00Z",
|
||||
"url": "https://github.com/openai/codex/issues/123#issuecomment-1",
|
||||
"human_user_interaction": True,
|
||||
"reactions": {"+1": 3, "heart": 1},
|
||||
"reaction_total": 4,
|
||||
"new_reactions": 0,
|
||||
"new_upvotes": 0,
|
||||
"new_reaction_counts": {},
|
||||
"body_excerpt": "I can reproduce this on main.",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def test_summarize_issue_filters_non_owner_or_non_kind_labels():
|
||||
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
|
||||
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
|
||||
base_issue = {
|
||||
"number": 1,
|
||||
"title": "Question",
|
||||
"created_at": "2026-04-25T01:00:00Z",
|
||||
"updated_at": "2026-04-25T01:00:00Z",
|
||||
"labels": [{"name": "question"}, {"name": "tui"}],
|
||||
}
|
||||
|
||||
assert (
|
||||
collect_issue_digest.summarize_issue(
|
||||
base_issue,
|
||||
[],
|
||||
["tui"],
|
||||
since,
|
||||
until,
|
||||
body_chars=100,
|
||||
comment_chars=100,
|
||||
)
|
||||
is None
|
||||
)
|
||||
|
||||
issue_without_owner = dict(base_issue)
|
||||
issue_without_owner["labels"] = [{"name": "bug"}, {"name": "app"}]
|
||||
|
||||
assert (
|
||||
collect_issue_digest.summarize_issue(
|
||||
issue_without_owner,
|
||||
[],
|
||||
["tui"],
|
||||
since,
|
||||
until,
|
||||
body_chars=100,
|
||||
comment_chars=100,
|
||||
)
|
||||
is None
|
||||
)
|
||||
|
||||
|
||||
def test_resolve_window_defaults_to_previous_hours():
|
||||
class Args:
|
||||
since = None
|
||||
until = "2026-04-26T12:00:00Z"
|
||||
window_hours = 24
|
||||
|
||||
since, until = collect_issue_digest.resolve_window(Args())
|
||||
|
||||
assert since.isoformat() == "2026-04-25T12:00:00+00:00"
|
||||
assert until.tzinfo == timezone.utc
|
||||
|
||||
|
||||
def test_parse_duration_hours_accepts_common_phrases():
|
||||
assert collect_issue_digest.parse_duration_hours("past week") == 168
|
||||
assert collect_issue_digest.parse_duration_hours("48h") == 48
|
||||
assert collect_issue_digest.parse_duration_hours("2 days") == 48
|
||||
assert collect_issue_digest.parse_duration_hours("1w") == 168
|
||||
|
||||
|
||||
def test_attention_thresholds_scale_by_window_length():
|
||||
one_day = collect_issue_digest.attention_thresholds_for_window(24)
|
||||
assert one_day["elevated"] == 5
|
||||
assert one_day["very_high"] == 10
|
||||
|
||||
half_day = collect_issue_digest.attention_thresholds_for_window(12)
|
||||
assert half_day["elevated"] == 3
|
||||
assert half_day["very_high"] == 5
|
||||
|
||||
week = collect_issue_digest.attention_thresholds_for_window(168)
|
||||
assert week["elevated"] == 35
|
||||
assert week["very_high"] == 70
|
||||
assert collect_issue_digest.attention_marker_for(34, week) == ""
|
||||
assert collect_issue_digest.attention_marker_for(35, week) == "🔥"
|
||||
assert collect_issue_digest.attention_marker_for(70, week) == "🔥🔥"
|
||||
|
||||
|
||||
def test_fetch_comments_uses_since_filter_and_page_cap(monkeypatch):
|
||||
calls = []
|
||||
|
||||
def fake_gh_json(args):
|
||||
calls.append(args)
|
||||
return [{"id": idx} for idx in range(100)]
|
||||
|
||||
monkeypatch.setattr(collect_issue_digest, "gh_json", fake_gh_json)
|
||||
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
|
||||
|
||||
payload = collect_issue_digest.fetch_comments(
|
||||
"openai/codex", 123, since=since, max_pages=1
|
||||
)
|
||||
|
||||
assert len(payload["items"]) == 100
|
||||
assert payload["truncated"] is True
|
||||
assert payload["max_pages"] == 1
|
||||
assert calls == [
|
||||
[
|
||||
"api",
|
||||
"repos/openai/codex/issues/123/comments?since=2026-04-25T00%3A00%3A00Z&per_page=100&page=1",
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
def test_issue_description_prefers_title_over_body_noise():
|
||||
issue = {
|
||||
"title": "Codex.app GUI: MCP child processes not reaped after task completion",
|
||||
"body": "A later crash mention should not override the title-level symptom.",
|
||||
"labels": [{"name": "app"}, {"name": "bug"}],
|
||||
}
|
||||
|
||||
description = collect_issue_digest.issue_description(issue)
|
||||
assert "MCP child processes" in description
|
||||
assert "crash" not in description.casefold()
|
||||
|
||||
|
||||
def test_attention_markers_count_human_user_interactions():
|
||||
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
|
||||
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
|
||||
issue = {
|
||||
"number": 456,
|
||||
"title": "Agent context is exploding",
|
||||
"html_url": "https://github.com/openai/codex/issues/456",
|
||||
"state": "open",
|
||||
"created_at": "2026-04-25T01:00:00Z",
|
||||
"updated_at": "2026-04-25T12:00:00Z",
|
||||
"user": {"login": "alice"},
|
||||
"labels": [{"name": "bug"}, {"name": "agent"}],
|
||||
}
|
||||
comments = [
|
||||
{
|
||||
"id": idx,
|
||||
"created_at": "2026-04-25T02:00:00Z",
|
||||
"updated_at": "2026-04-25T02:00:00Z",
|
||||
"user": {"login": f"user-{idx}"},
|
||||
"body": "same here",
|
||||
}
|
||||
for idx in range(4)
|
||||
]
|
||||
comments.append(
|
||||
{
|
||||
"id": 99,
|
||||
"created_at": "2026-04-25T02:00:00Z",
|
||||
"updated_at": "2026-04-25T02:00:00Z",
|
||||
"user": {"login": "github-actions[bot]"},
|
||||
"body": "duplicate bot note",
|
||||
}
|
||||
)
|
||||
|
||||
summary = collect_issue_digest.summarize_issue(
|
||||
issue,
|
||||
comments,
|
||||
["agent"],
|
||||
since,
|
||||
until,
|
||||
body_chars=100,
|
||||
comment_chars=100,
|
||||
)
|
||||
|
||||
assert summary["user_interactions"] == 5
|
||||
assert summary["activity"]["new_human_comments"] == 4
|
||||
assert summary["attention"] is True
|
||||
assert summary["attention_level"] == 1
|
||||
assert summary["attention_marker"] == "🔥"
|
||||
|
||||
issue["created_at"] = "2026-04-24T01:00:00Z"
|
||||
comments.extend(
|
||||
{
|
||||
"id": idx,
|
||||
"created_at": "2026-04-25T03:00:00Z",
|
||||
"updated_at": "2026-04-25T03:00:00Z",
|
||||
"user": {"login": f"extra-user-{idx}"},
|
||||
"body": "also seeing this",
|
||||
}
|
||||
for idx in range(100, 106)
|
||||
)
|
||||
|
||||
summary = collect_issue_digest.summarize_issue(
|
||||
issue,
|
||||
comments,
|
||||
["agent"],
|
||||
since,
|
||||
until,
|
||||
body_chars=100,
|
||||
comment_chars=100,
|
||||
)
|
||||
|
||||
assert summary["user_interactions"] == 10
|
||||
assert summary["attention_level"] == 2
|
||||
assert summary["attention_marker"] == "🔥🔥"
|
||||
|
||||
|
||||
def test_reactions_count_toward_attention_markers():
|
||||
since = collect_issue_digest.parse_timestamp("2026-04-25T00:00:00Z", "--since")
|
||||
until = collect_issue_digest.parse_timestamp("2026-04-26T00:00:00Z", "--until")
|
||||
issue = {
|
||||
"number": 789,
|
||||
"title": "Support 1M token context",
|
||||
"html_url": "https://github.com/openai/codex/issues/789",
|
||||
"state": "open",
|
||||
"created_at": "2026-04-24T01:00:00Z",
|
||||
"updated_at": "2026-04-25T12:00:00Z",
|
||||
"user": {"login": "alice"},
|
||||
"labels": [{"name": "enhancement"}, {"name": "context"}],
|
||||
"reactions": {"total_count": 20, "+1": 20},
|
||||
}
|
||||
comments = [
|
||||
{
|
||||
"id": 1,
|
||||
"created_at": "2026-04-25T02:00:00Z",
|
||||
"updated_at": "2026-04-25T02:00:00Z",
|
||||
"user": {"login": "commenter"},
|
||||
"body": "please",
|
||||
"reactions": {"total_count": 2, "+1": 2},
|
||||
}
|
||||
]
|
||||
issue_reactions = [
|
||||
{
|
||||
"content": "+1",
|
||||
"created_at": "2026-04-25T03:00:00Z",
|
||||
"user": {"login": f"reactor-{idx}"},
|
||||
}
|
||||
for idx in range(18)
|
||||
]
|
||||
comment_reactions_by_id = {
|
||||
1: [
|
||||
{
|
||||
"content": "heart",
|
||||
"created_at": "2026-04-25T04:00:00Z",
|
||||
"user": {"login": "human-reactor"},
|
||||
},
|
||||
{
|
||||
"content": "+1",
|
||||
"created_at": "2026-04-25T04:00:00Z",
|
||||
"user": {"login": "github-actions[bot]"},
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
summary = collect_issue_digest.summarize_issue(
|
||||
issue,
|
||||
comments,
|
||||
["context"],
|
||||
since,
|
||||
until,
|
||||
body_chars=100,
|
||||
comment_chars=100,
|
||||
issue_reaction_events=issue_reactions,
|
||||
comment_reactions_by_id=comment_reactions_by_id,
|
||||
)
|
||||
|
||||
assert summary["new_reactions"] == 19
|
||||
assert summary["new_upvotes"] == 18
|
||||
assert summary["user_interactions"] == 20
|
||||
assert summary["attention_level"] == 2
|
||||
assert summary["attention_marker"] == "🔥🔥"
|
||||
assert summary["new_comments"][0]["new_reactions"] == 1
|
||||
assert summary["new_comments"][0]["new_upvotes"] == 0
|
||||
|
||||
|
||||
def test_digest_rows_are_table_ready_with_concise_descriptions():
|
||||
rows = collect_issue_digest.digest_rows(
|
||||
[
|
||||
{
|
||||
"number": 1,
|
||||
"title": "Quiet bug",
|
||||
"description": "Quiet bug",
|
||||
"url": "https://github.com/openai/codex/issues/1",
|
||||
"owner_labels": ["context"],
|
||||
"kind_labels": ["bug"],
|
||||
"state": "open",
|
||||
"attention": False,
|
||||
"attention_level": 0,
|
||||
"attention_marker": "",
|
||||
"user_interactions": 1,
|
||||
"new_reactions": 0,
|
||||
"new_upvotes": 0,
|
||||
"engagement_score": 3,
|
||||
"issue_reaction_total": 0,
|
||||
"comment_reaction_total": 0,
|
||||
"updated_at": "2026-04-25T01:00:00Z",
|
||||
"activity": {
|
||||
"new_issue": True,
|
||||
"new_comments": 0,
|
||||
"new_reactions": 0,
|
||||
"updated_without_visible_new_post": False,
|
||||
},
|
||||
},
|
||||
{
|
||||
"number": 2,
|
||||
"title": "Busy bug",
|
||||
"description": "High-volume bug report",
|
||||
"url": "https://github.com/openai/codex/issues/2",
|
||||
"owner_labels": ["agent"],
|
||||
"kind_labels": ["bug"],
|
||||
"state": "open",
|
||||
"attention": True,
|
||||
"attention_level": 1,
|
||||
"attention_marker": "🔥",
|
||||
"user_interactions": 17,
|
||||
"new_reactions": 3,
|
||||
"new_upvotes": 2,
|
||||
"engagement_score": 20,
|
||||
"issue_reaction_total": 5,
|
||||
"comment_reaction_total": 2,
|
||||
"updated_at": "2026-04-25T02:00:00Z",
|
||||
"activity": {
|
||||
"new_issue": False,
|
||||
"new_comments": 16,
|
||||
"new_reactions": 3,
|
||||
"updated_without_visible_new_post": False,
|
||||
},
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
assert rows[0] == {
|
||||
"ref": 1,
|
||||
"ref_markdown": "[1](https://github.com/openai/codex/issues/2)",
|
||||
"marker": "🔥",
|
||||
"attention_marker": "🔥",
|
||||
"number": 2,
|
||||
"description": "High-volume bug report",
|
||||
"title": "Busy bug",
|
||||
"url": "https://github.com/openai/codex/issues/2",
|
||||
"area": "agent",
|
||||
"kind": "bug",
|
||||
"state": "open",
|
||||
"interactions": 17,
|
||||
"user_interactions": 17,
|
||||
"new_reactions": 3,
|
||||
"new_upvotes": 2,
|
||||
"current_reactions": 7,
|
||||
}
|
||||
|
||||
|
||||
def test_summary_inputs_are_model_ready_without_preclustering():
|
||||
issues = [
|
||||
{
|
||||
"number": 20,
|
||||
"title": "Windows app Browser Use external navigation fails",
|
||||
"description": "Browser Use navigation or app-server failure",
|
||||
"url": "https://github.com/openai/codex/issues/20",
|
||||
"labels": ["app", "bug"],
|
||||
"owner_labels": ["app"],
|
||||
"kind_labels": ["bug"],
|
||||
"attention": False,
|
||||
"attention_level": 0,
|
||||
"attention_marker": "",
|
||||
"user_interactions": 3,
|
||||
"new_reactions": 1,
|
||||
"engagement_score": 8,
|
||||
"updated_at": "2026-04-25T04:00:00Z",
|
||||
"activity": {"new_comments": 2},
|
||||
},
|
||||
{
|
||||
"number": 21,
|
||||
"title": "On Windows, cmake output waits until timeout",
|
||||
"description": "Windows command timeout/capture problem",
|
||||
"url": "https://github.com/openai/codex/issues/21",
|
||||
"labels": ["app", "bug"],
|
||||
"owner_labels": ["app"],
|
||||
"kind_labels": ["bug"],
|
||||
"attention": False,
|
||||
"attention_level": 0,
|
||||
"attention_marker": "",
|
||||
"user_interactions": 3,
|
||||
"new_reactions": 0,
|
||||
"engagement_score": 7,
|
||||
"updated_at": "2026-04-25T03:00:00Z",
|
||||
"activity": {"new_comments": 3},
|
||||
},
|
||||
{
|
||||
"number": 22,
|
||||
"title": "Windows computer use tool fails to click buttons",
|
||||
"description": "Computer-use workflow failure",
|
||||
"url": "https://github.com/openai/codex/issues/22",
|
||||
"labels": ["app", "bug"],
|
||||
"owner_labels": ["app"],
|
||||
"kind_labels": ["bug"],
|
||||
"attention": False,
|
||||
"attention_level": 0,
|
||||
"attention_marker": "",
|
||||
"user_interactions": 3,
|
||||
"new_reactions": 0,
|
||||
"engagement_score": 6,
|
||||
"updated_at": "2026-04-25T02:00:00Z",
|
||||
"activity": {"new_comments": 3},
|
||||
},
|
||||
]
|
||||
|
||||
rows = collect_issue_digest.summary_inputs(issues, ref_map={20: 1, 21: 2, 22: 3})
|
||||
|
||||
assert rows == [
|
||||
{
|
||||
"ref": 1,
|
||||
"ref_markdown": "[1](https://github.com/openai/codex/issues/20)",
|
||||
"number": 20,
|
||||
"title": "Windows app Browser Use external navigation fails",
|
||||
"description": "Browser Use navigation or app-server failure",
|
||||
"url": "https://github.com/openai/codex/issues/20",
|
||||
"labels": ["app", "bug"],
|
||||
"owner_labels": ["app"],
|
||||
"kind_labels": ["bug"],
|
||||
"state": "",
|
||||
"attention_marker": "",
|
||||
"interactions": 3,
|
||||
"new_comments": 2,
|
||||
"new_reactions": 1,
|
||||
"new_upvotes": 0,
|
||||
"current_reactions": 0,
|
||||
},
|
||||
{
|
||||
"ref": 2,
|
||||
"ref_markdown": "[2](https://github.com/openai/codex/issues/21)",
|
||||
"number": 21,
|
||||
"title": "On Windows, cmake output waits until timeout",
|
||||
"description": "Windows command timeout/capture problem",
|
||||
"url": "https://github.com/openai/codex/issues/21",
|
||||
"labels": ["app", "bug"],
|
||||
"owner_labels": ["app"],
|
||||
"kind_labels": ["bug"],
|
||||
"state": "",
|
||||
"attention_marker": "",
|
||||
"interactions": 3,
|
||||
"new_comments": 3,
|
||||
"new_reactions": 0,
|
||||
"new_upvotes": 0,
|
||||
"current_reactions": 0,
|
||||
},
|
||||
{
|
||||
"ref": 3,
|
||||
"ref_markdown": "[3](https://github.com/openai/codex/issues/22)",
|
||||
"number": 22,
|
||||
"title": "Windows computer use tool fails to click buttons",
|
||||
"description": "Computer-use workflow failure",
|
||||
"url": "https://github.com/openai/codex/issues/22",
|
||||
"labels": ["app", "bug"],
|
||||
"owner_labels": ["app"],
|
||||
"kind_labels": ["bug"],
|
||||
"state": "",
|
||||
"attention_marker": "",
|
||||
"interactions": 3,
|
||||
"new_comments": 3,
|
||||
"new_reactions": 0,
|
||||
"new_upvotes": 0,
|
||||
"current_reactions": 0,
|
||||
},
|
||||
]
|
||||
@@ -1,59 +0,0 @@
|
||||
---
|
||||
name: codex-pr-body
|
||||
description: Update the title and body of one or more pull requests.
|
||||
---
|
||||
|
||||
## Determining the PR(s)
|
||||
|
||||
When this skill is invoked, the PR(s) to update may be specified explicitly, but in the common case, the PR(s) to update will be inferred from the branch / commit that the user is currently working on. For ordinary Git usage (i.e., not Sapling as discussed below), you may have to use a combination of `git branch` and `gh pr view <branch> --repo openai/codex --json number --jq '.number'` to determine the PR associated with the current branch / commit.
|
||||
|
||||
## PR Body Contents
|
||||
|
||||
When invoked, use `gh` to edit the pull request body and title to reflect the contents of the specified PR. Make sure to check the existing pull request body to see if there is key information that should be preserved. For example, NEVER remove an image in the existing pull request body, as the author may have no way to recover it if you remove it.
|
||||
|
||||
It is critically important to explain _why_ the change is being made. If the current conversation in which this skill is invoked has discussed the motivation, be sure to capture this in the pull request body.
|
||||
|
||||
The body should also explain _what_ changed, but this should appear after the _why_.
|
||||
|
||||
Limit discussion to the _net change_ of the commit. It is generally frowned upon to discuss changes that were attempted but later undone in the course of the development of the pull request. When rewriting the pull request body, you may need to eliminate details such as these when they are no longer appropriate / of interest to future readers.
|
||||
|
||||
Avoid references to absolute paths on my local disk. When talking about a path that is within the repository, simply use the repo-relative path.
|
||||
|
||||
It is generally helpful to discuss how the change was verified. That said, it is unnecessary to mention things that CI checks automatically, e.g., do not include "ran `just fmt`" as part of the test plan. Though identifying the new tests that were purposely introduced to verify the new behavior introduced by the pull request is often appropriate.
|
||||
|
||||
Make use of Markdown to format the pull request professionally. Ensure "code things" appear in single backticks when referenced inline. Fenced code blocks are useful when referencing code or showing a shell transcript. Also, make use of GitHub permalinks when citing existing pieces of code that are relevant to the change.
|
||||
|
||||
Make sure to reference any relevant pull requests or issues, though there should be no need to reference the pull request in its own PR body.
|
||||
|
||||
If there is documentation that should be updated on https://developers.openai.com/codex as a result of this change, please note that in a separate section near the end of the pull request. Omit this section if there is no documentation that needs to be updated.
|
||||
|
||||
## Working with Stacks
|
||||
|
||||
Sometimes a pull request is composed of a stack of commits that build on one another. In these cases, the PR body should reflect the _net_ change introduced by the stack as a whole, rather than the individual commits that make up the stack.
|
||||
|
||||
Similarly, sometimes a user may be using a tool like Sapling to leverage _stacked pull requests_, in which case the `base` of the PR may be the a branch that is the `head` of another PR in the stack rather than `main`. In this case, be sure to discuss only the net change between the `base` and `head` of the PR that is being opened against that stacked base, rather than the changes relative to `main`.
|
||||
|
||||
## Sapling
|
||||
|
||||
If `.git/sl/store` is present, then this Git repository is governed by Sapling SCM (https://sapling-scm.com).
|
||||
|
||||
In Sapling, run the following to see if there is a GitHub pull request associated with the current revision:
|
||||
|
||||
```shell
|
||||
sl log --template '{github_pull_request_url}' -r .
|
||||
```
|
||||
|
||||
Alternatively, you can run `sl sl` to see the current development branch and whether there is a GitHub pull request associated with the current commit. For example, if the output were:
|
||||
|
||||
```
|
||||
@ cb032b31cf 72 minutes ago mbolin #11412
|
||||
╭─╯ tui: show non-file layer content in /debug-config
|
||||
│
|
||||
o fdd0cd1de9 Today at 20:09 origin/main
|
||||
│
|
||||
~
|
||||
```
|
||||
|
||||
- `@` indicates the current commit is `cb032b31cf`
|
||||
- it is a development branch containing a single commit branched off of `origin/main`
|
||||
- it is associated with GitHub pull request #11412
|
||||
@@ -1,82 +0,0 @@
|
||||
FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04
|
||||
|
||||
ARG TZ
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG NODE_MAJOR=22
|
||||
ARG RUST_TOOLCHAIN=1.92.0
|
||||
# Keep this in sync with .devcontainer/codex-install/package.json and pnpm-lock.yaml.
|
||||
ARG CODEX_NPM_VERSION=0.121.0
|
||||
|
||||
ENV TZ="$TZ"
|
||||
ENV COREPACK_ENABLE_DOWNLOAD_PROMPT=0
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# Devcontainers run as a non-root user, so enable bubblewrap's setuid mode.
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
git \
|
||||
ca-certificates \
|
||||
pkg-config \
|
||||
clang \
|
||||
musl-tools \
|
||||
libssl-dev \
|
||||
libsqlite3-dev \
|
||||
just \
|
||||
python3 \
|
||||
python3-pip \
|
||||
jq \
|
||||
less \
|
||||
man-db \
|
||||
unzip \
|
||||
ripgrep \
|
||||
fzf \
|
||||
fd-find \
|
||||
zsh \
|
||||
dnsutils \
|
||||
iproute2 \
|
||||
ipset \
|
||||
iptables \
|
||||
aggregate \
|
||||
bubblewrap \
|
||||
&& chmod u+s /usr/bin/bwrap \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY .devcontainer/codex-install/package.json \
|
||||
.devcontainer/codex-install/pnpm-lock.yaml \
|
||||
.devcontainer/codex-install/pnpm-workspace.yaml \
|
||||
/opt/codex-install/
|
||||
|
||||
RUN curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | bash - \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends nodejs \
|
||||
&& test "$(node -p "require('/opt/codex-install/package.json').dependencies['@openai/codex']")" = "${CODEX_NPM_VERSION}" \
|
||||
&& cd /opt/codex-install \
|
||||
&& corepack pnpm install --prod --frozen-lockfile \
|
||||
&& ln -s /opt/codex-install/node_modules/.bin/codex /usr/local/bin/codex \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY .devcontainer/init-firewall.sh /usr/local/bin/init-firewall.sh
|
||||
COPY .devcontainer/post_install.py /opt/post_install.py
|
||||
COPY .devcontainer/post-start.sh /opt/post_start.sh
|
||||
|
||||
RUN chmod 500 /usr/local/bin/init-firewall.sh \
|
||||
&& chmod 755 /opt/post_start.sh \
|
||||
&& chmod 644 /opt/post_install.py \
|
||||
&& chown vscode:vscode /opt/post_install.py
|
||||
|
||||
RUN install -d -m 0775 -o vscode -g vscode /commandhistory /workspace \
|
||||
&& touch /commandhistory/.bash_history /commandhistory/.zsh_history \
|
||||
&& chown vscode:vscode /commandhistory/.bash_history /commandhistory/.zsh_history
|
||||
|
||||
USER vscode
|
||||
ENV PATH="/home/vscode/.cargo/bin:${PATH}"
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain "${RUST_TOOLCHAIN}" \
|
||||
&& rustup component add clippy rustfmt rust-src \
|
||||
&& rustup target add x86_64-unknown-linux-musl aarch64-unknown-linux-musl
|
||||
@@ -1,38 +1,10 @@
|
||||
# Containerized Development
|
||||
|
||||
We provide two container paths:
|
||||
|
||||
- `devcontainer.json` keeps the existing Codex contributor setup for working on this repository.
|
||||
- `devcontainer.secure.json` adds a customer-oriented profile with stricter outbound network controls.
|
||||
|
||||
## Codex contributor profile
|
||||
|
||||
Use `devcontainer.json` when you are developing Codex itself. This is the same lightweight arm64 container that already exists in the repo.
|
||||
|
||||
## Secure customer profile
|
||||
|
||||
Use `devcontainer.secure.json` when you want a stricter runtime profile for running Codex inside a project container:
|
||||
|
||||
- installs the Codex CLI plus common build tools
|
||||
- installs bubblewrap in setuid mode for Codex's Linux sandbox
|
||||
- disables Docker's outer seccomp and AppArmor profiles so bubblewrap can construct Codex's inner sandbox
|
||||
- enables firewall startup with an allowlist-driven outbound policy
|
||||
- blocks IPv6 by default so the allowlist cannot be bypassed over AAAA routes
|
||||
- requires `NET_ADMIN` and `NET_RAW` so the firewall can be installed at startup
|
||||
|
||||
This profile keeps the stricter networking isolated to the customer path instead of changing the default Codex contributor container.
|
||||
|
||||
Start it from the CLI with:
|
||||
|
||||
```bash
|
||||
devcontainer up --workspace-folder . --config .devcontainer/devcontainer.secure.json
|
||||
```
|
||||
|
||||
In VS Code, choose **Dev Containers: Open Folder in Container...** and select `.devcontainer/devcontainer.secure.json`.
|
||||
We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
|
||||
|
||||
## Docker
|
||||
|
||||
To build the contributor image locally for x64 and then run it with the repo mounted under `/workspace`:
|
||||
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
|
||||
|
||||
```shell
|
||||
CODEX_DOCKER_IMAGE_NAME=codex-linux-dev
|
||||
@@ -42,8 +14,17 @@ docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-
|
||||
|
||||
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
|
||||
|
||||
For arm64, specify `--platform=linux/arm64` instead for both `docker build` and `docker run`.
|
||||
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
|
||||
|
||||
Currently, the contributor `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
|
||||
Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
|
||||
|
||||
The secure profile's capability, seccomp, and AppArmor options are required when you want Codex's bubblewrap sandbox to run inside Docker as the non-root devcontainer user. Without them, Docker's default runtime profile can block bubblewrap's namespace setup before Codex's own seccomp filter is installed. This keeps the Docker relaxation explicit in the profile that is meant to run Codex inside a project container, while the default contributor profile stays lightweight.
|
||||
## VS Code
|
||||
|
||||
VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
|
||||
|
||||
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
|
||||
|
||||
```shell
|
||||
cargo build --target aarch64-unknown-linux-musl
|
||||
cargo build --target aarch64-unknown-linux-gnu
|
||||
```
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"name": "codex-devcontainer-install",
|
||||
"private": true,
|
||||
"description": "Locked Codex CLI install boundary for the secure devcontainer.",
|
||||
"dependencies": {
|
||||
"@openai/codex": "0.121.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=22",
|
||||
"pnpm": ">=10.33.0"
|
||||
},
|
||||
"packageManager": "pnpm@10.33.0+sha512.10568bb4a6afb58c9eb3630da90cc9516417abebd3fabbe6739f0ae795728da1491e9db5a544c76ad8eb7570f5c4bb3d6c637b2cb41bfdcdb47fa823c8649319"
|
||||
}
|
||||
85
.devcontainer/codex-install/pnpm-lock.yaml
generated
85
.devcontainer/codex-install/pnpm-lock.yaml
generated
@@ -1,85 +0,0 @@
|
||||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
importers:
|
||||
|
||||
.:
|
||||
dependencies:
|
||||
'@openai/codex':
|
||||
specifier: 0.121.0
|
||||
version: 0.121.0
|
||||
|
||||
packages:
|
||||
|
||||
'@openai/codex@0.121.0':
|
||||
resolution: {integrity: sha512-kCJ2NeATd4QBQRmqV04ymdN1ZU3MSwnJQDm/KzjpuzGvCuUVEn7no/T2mRyxQ2x77AACqriNOyPPoM/yufyvNg==}
|
||||
engines: {node: '>=16'}
|
||||
hasBin: true
|
||||
|
||||
'@openai/codex@0.121.0-darwin-arm64':
|
||||
resolution: {integrity: sha512-ZyBqIB6Fb4I0hGb/h65Vu7ePYjHSmGiqqfm+/1djEuxDPkqjfi4wkxYxNYNY+6najyNGN4UijOSTTf19eDCrqw==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@openai/codex@0.121.0-darwin-x64':
|
||||
resolution: {integrity: sha512-1/OAtdkAZ5yPI3xqaEFlHuPziS1yCqL2gOZdswE7HTmmwpIxi6Z3FCo60JWDPluIp89z4tftdjq73/OCN0YVcw==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@openai/codex@0.121.0-linux-arm64':
|
||||
resolution: {integrity: sha512-2UgMmdo237o7SCMsfb529cOSEM2HFUgN6OBkv5SBLwfNY1NO2Ex6JnUjlppEXlX6/4cXfZ5qjDghVz5j/+B9zw==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@openai/codex@0.121.0-linux-x64':
|
||||
resolution: {integrity: sha512-vlpNJXIqss800J+32Vy7TUZzv31n61b45OLxmsVQGFkTNLJcjFrj9jDUC7I62eC4F16gLioilefNfv4CdJQOEw==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@openai/codex@0.121.0-win32-arm64':
|
||||
resolution: {integrity: sha512-m88q4f3XI5npn1t6OG0nWGHWWAjO5FgjRwxh4hdujbLO6t9CiCNfhfPZIOSsoATbrCNwLC+6S77m3cjbNToPNg==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@openai/codex@0.121.0-win32-x64':
|
||||
resolution: {integrity: sha512-Fp0ecVOyM+VcBi/y4HVvRzhifO9YqRiHzhV3rhtAppC7flh22WPguLC4kmvXYAR0p3RPzbo35M2CedWnkOT+cw==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
snapshots:
|
||||
|
||||
'@openai/codex@0.121.0':
|
||||
optionalDependencies:
|
||||
'@openai/codex-darwin-arm64': '@openai/codex@0.121.0-darwin-arm64'
|
||||
'@openai/codex-darwin-x64': '@openai/codex@0.121.0-darwin-x64'
|
||||
'@openai/codex-linux-arm64': '@openai/codex@0.121.0-linux-arm64'
|
||||
'@openai/codex-linux-x64': '@openai/codex@0.121.0-linux-x64'
|
||||
'@openai/codex-win32-arm64': '@openai/codex@0.121.0-win32-arm64'
|
||||
'@openai/codex-win32-x64': '@openai/codex@0.121.0-win32-x64'
|
||||
|
||||
'@openai/codex@0.121.0-darwin-arm64':
|
||||
optional: true
|
||||
|
||||
'@openai/codex@0.121.0-darwin-x64':
|
||||
optional: true
|
||||
|
||||
'@openai/codex@0.121.0-linux-arm64':
|
||||
optional: true
|
||||
|
||||
'@openai/codex@0.121.0-linux-x64':
|
||||
optional: true
|
||||
|
||||
'@openai/codex@0.121.0-win32-arm64':
|
||||
optional: true
|
||||
|
||||
'@openai/codex@0.121.0-win32-x64':
|
||||
optional: true
|
||||
@@ -1,12 +0,0 @@
|
||||
packages:
|
||||
- "."
|
||||
|
||||
minimumReleaseAge: 10080
|
||||
minimumReleaseAgeExclude: []
|
||||
|
||||
blockExoticSubdeps: true
|
||||
strictDepBuilds: true
|
||||
trustPolicy: no-downgrade
|
||||
trustPolicyIgnoreAfter: 10080
|
||||
trustPolicyExclude: []
|
||||
allowBuilds: {}
|
||||
@@ -1,83 +0,0 @@
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json",
|
||||
"name": "Codex (Secure)",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile.secure",
|
||||
"context": "..",
|
||||
"args": {
|
||||
"TZ": "${localEnv:TZ:UTC}",
|
||||
"NODE_MAJOR": "22",
|
||||
"RUST_TOOLCHAIN": "1.92.0",
|
||||
"CODEX_NPM_VERSION": "0.121.0"
|
||||
}
|
||||
},
|
||||
"runArgs": [
|
||||
"--cap-add=SYS_ADMIN",
|
||||
"--cap-add=SYS_CHROOT",
|
||||
"--cap-add=SETUID",
|
||||
"--cap-add=SETGID",
|
||||
"--cap-add=SYS_PTRACE",
|
||||
"--security-opt=seccomp=unconfined",
|
||||
"--security-opt=apparmor=unconfined",
|
||||
"--cap-add=NET_ADMIN",
|
||||
"--cap-add=NET_RAW"
|
||||
],
|
||||
"init": true,
|
||||
"updateRemoteUserUID": true,
|
||||
"remoteUser": "vscode",
|
||||
"workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind,consistency=delegated",
|
||||
"workspaceFolder": "/workspace",
|
||||
"mounts": [
|
||||
"source=codex-commandhistory-${devcontainerId},target=/commandhistory,type=volume",
|
||||
"source=codex-home-${devcontainerId},target=/home/vscode/.codex,type=volume",
|
||||
"source=codex-gh-${devcontainerId},target=/home/vscode/.config/gh,type=volume",
|
||||
"source=codex-cargo-registry-${devcontainerId},target=/home/vscode/.cargo/registry,type=volume",
|
||||
"source=codex-cargo-git-${devcontainerId},target=/home/vscode/.cargo/git,type=volume",
|
||||
"source=codex-rustup-${devcontainerId},target=/home/vscode/.rustup,type=volume",
|
||||
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,readonly"
|
||||
],
|
||||
"containerEnv": {
|
||||
"RUST_BACKTRACE": "1",
|
||||
"CODEX_UNSAFE_ALLOW_NO_SANDBOX": "1",
|
||||
"CODEX_ENABLE_FIREWALL": "1",
|
||||
"CODEX_INCLUDE_GITHUB_META_RANGES": "1",
|
||||
"OPENAI_ALLOWED_DOMAINS": "api.openai.com auth.openai.com github.com api.github.com codeload.github.com raw.githubusercontent.com objects.githubusercontent.com crates.io index.crates.io static.crates.io static.rust-lang.org registry.npmjs.org pypi.org files.pythonhosted.org",
|
||||
"CARGO_TARGET_DIR": "/workspace/.cache/cargo-target",
|
||||
"GIT_CONFIG_GLOBAL": "/home/vscode/.gitconfig.local",
|
||||
"COREPACK_ENABLE_DOWNLOAD_PROMPT": "0",
|
||||
"PYTHONDONTWRITEBYTECODE": "1",
|
||||
"PIP_DISABLE_PIP_VERSION_CHECK": "1"
|
||||
},
|
||||
"remoteEnv": {
|
||||
"OPENAI_API_KEY": "${localEnv:OPENAI_API_KEY}"
|
||||
},
|
||||
"postCreateCommand": "python3 /opt/post_install.py",
|
||||
"postStartCommand": "bash /opt/post_start.sh",
|
||||
"waitFor": "postStartCommand",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"settings": {
|
||||
"terminal.integrated.defaultProfile.linux": "zsh",
|
||||
"terminal.integrated.profiles.linux": {
|
||||
"bash": {
|
||||
"path": "bash",
|
||||
"icon": "terminal-bash"
|
||||
},
|
||||
"zsh": {
|
||||
"path": "zsh"
|
||||
}
|
||||
},
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimFinalNewlines": true
|
||||
},
|
||||
"extensions": [
|
||||
"openai.chatgpt",
|
||||
"rust-lang.rust-analyzer",
|
||||
"tamasfe.even-better-toml",
|
||||
"vadimcn.vscode-lldb",
|
||||
"ms-azuretools.vscode-docker"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
allowed_domains_file="/etc/codex/allowed_domains.txt"
|
||||
include_github_meta_ranges="${CODEX_INCLUDE_GITHUB_META_RANGES:-1}"
|
||||
|
||||
if [ -f "$allowed_domains_file" ]; then
|
||||
mapfile -t allowed_domains < <(sed '/^\s*#/d;/^\s*$/d' "$allowed_domains_file")
|
||||
else
|
||||
allowed_domains=("api.openai.com")
|
||||
fi
|
||||
|
||||
if [ "${#allowed_domains[@]}" -eq 0 ]; then
|
||||
echo "ERROR: No allowed domains configured"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
add_ipv4_cidr_to_allowlist() {
|
||||
local source="$1"
|
||||
local cidr="$2"
|
||||
|
||||
if [[ ! "$cidr" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}/[0-9]{1,2}$ ]]; then
|
||||
echo "ERROR: Invalid ${source} CIDR range: $cidr"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ipset add allowed-domains "$cidr" -exist
|
||||
}
|
||||
|
||||
configure_ipv6_default_deny() {
|
||||
if ! command -v ip6tables >/dev/null 2>&1; then
|
||||
echo "ERROR: ip6tables is required to enforce IPv6 default-deny policy"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ip6tables -F
|
||||
ip6tables -X
|
||||
ip6tables -t mangle -F
|
||||
ip6tables -t mangle -X
|
||||
ip6tables -t nat -F 2>/dev/null || true
|
||||
ip6tables -t nat -X 2>/dev/null || true
|
||||
|
||||
ip6tables -A INPUT -i lo -j ACCEPT
|
||||
ip6tables -A OUTPUT -o lo -j ACCEPT
|
||||
ip6tables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
ip6tables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
|
||||
ip6tables -P INPUT DROP
|
||||
ip6tables -P FORWARD DROP
|
||||
ip6tables -P OUTPUT DROP
|
||||
|
||||
echo "IPv6 firewall policy configured (default-deny)"
|
||||
}
|
||||
|
||||
# Preserve docker-managed DNS NAT rules before clearing tables.
|
||||
docker_dns_rules="$(iptables-save -t nat | grep "127\\.0\\.0\\.11" || true)"
|
||||
|
||||
iptables -F
|
||||
iptables -X
|
||||
iptables -t nat -F
|
||||
iptables -t nat -X
|
||||
iptables -t mangle -F
|
||||
iptables -t mangle -X
|
||||
ipset destroy allowed-domains 2>/dev/null || true
|
||||
|
||||
if [ -n "$docker_dns_rules" ]; then
|
||||
echo "Restoring Docker DNS NAT rules"
|
||||
iptables -t nat -N DOCKER_OUTPUT 2>/dev/null || true
|
||||
iptables -t nat -N DOCKER_POSTROUTING 2>/dev/null || true
|
||||
while IFS= read -r rule; do
|
||||
[ -z "$rule" ] && continue
|
||||
iptables -t nat $rule
|
||||
done <<< "$docker_dns_rules"
|
||||
fi
|
||||
|
||||
# Allow DNS resolution and localhost communication.
|
||||
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
|
||||
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
|
||||
iptables -A INPUT -p udp --sport 53 -j ACCEPT
|
||||
iptables -A INPUT -p tcp --sport 53 -j ACCEPT
|
||||
iptables -A INPUT -i lo -j ACCEPT
|
||||
iptables -A OUTPUT -o lo -j ACCEPT
|
||||
|
||||
ipset create allowed-domains hash:net
|
||||
|
||||
for domain in "${allowed_domains[@]}"; do
|
||||
echo "Resolving $domain"
|
||||
ips="$(dig +short A "$domain" | sed '/^\s*$/d')"
|
||||
if [ -z "$ips" ]; then
|
||||
echo "ERROR: Failed to resolve $domain"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while IFS= read -r ip; do
|
||||
if [[ ! "$ip" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}$ ]]; then
|
||||
echo "ERROR: Invalid IPv4 address from DNS for $domain: $ip"
|
||||
exit 1
|
||||
fi
|
||||
ipset add allowed-domains "$ip" -exist
|
||||
done <<< "$ips"
|
||||
done
|
||||
|
||||
if [ "$include_github_meta_ranges" = "1" ]; then
|
||||
echo "Fetching GitHub meta ranges"
|
||||
github_meta="$(curl -fsSL --connect-timeout 10 https://api.github.com/meta)"
|
||||
|
||||
if ! echo "$github_meta" | jq -e '.web and .api and .git' >/dev/null; then
|
||||
echo "ERROR: GitHub meta response missing expected fields"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while IFS= read -r cidr; do
|
||||
[ -z "$cidr" ] && continue
|
||||
if [[ "$cidr" == *:* ]]; then
|
||||
# Current policy enforces IPv4-only ipset entries.
|
||||
continue
|
||||
fi
|
||||
add_ipv4_cidr_to_allowlist "GitHub" "$cidr"
|
||||
done < <(echo "$github_meta" | jq -r '((.web // []) + (.api // []) + (.git // []))[]' | sort -u)
|
||||
fi
|
||||
|
||||
host_ip="$(ip route | awk '/default/ {print $3; exit}')"
|
||||
if [ -z "$host_ip" ]; then
|
||||
echo "ERROR: Failed to detect host IP"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
host_network="$(echo "$host_ip" | sed 's/\.[0-9]*$/.0\/24/')"
|
||||
iptables -A INPUT -s "$host_network" -j ACCEPT
|
||||
iptables -A OUTPUT -d "$host_network" -j ACCEPT
|
||||
|
||||
iptables -P INPUT DROP
|
||||
iptables -P FORWARD DROP
|
||||
iptables -P OUTPUT DROP
|
||||
|
||||
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
iptables -A OUTPUT -m set --match-set allowed-domains dst -j ACCEPT
|
||||
|
||||
# Reject rather than silently drop to make policy failures obvious.
|
||||
iptables -A INPUT -j REJECT --reject-with icmp-admin-prohibited
|
||||
iptables -A OUTPUT -j REJECT --reject-with icmp-admin-prohibited
|
||||
iptables -A FORWARD -j REJECT --reject-with icmp-admin-prohibited
|
||||
|
||||
configure_ipv6_default_deny
|
||||
|
||||
echo "Firewall configuration complete"
|
||||
|
||||
if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then
|
||||
echo "ERROR: Firewall verification failed - was able to reach https://example.com"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then
|
||||
echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$include_github_meta_ranges" = "1" ] && ! curl --connect-timeout 5 https://api.github.com/zen >/dev/null 2>&1; then
|
||||
echo "ERROR: Firewall verification failed - unable to reach https://api.github.com"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if curl --connect-timeout 5 -6 https://example.com >/dev/null 2>&1; then
|
||||
echo "ERROR: Firewall verification failed - was able to reach https://example.com over IPv6"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Firewall verification passed"
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${CODEX_ENABLE_FIREWALL:-1}" != "1" ]; then
|
||||
echo "[devcontainer] Firewall mode: permissive (CODEX_ENABLE_FIREWALL=${CODEX_ENABLE_FIREWALL:-unset})."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[devcontainer] Firewall mode: strict"
|
||||
|
||||
domains_raw="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}"
|
||||
mapfile -t domains < <(printf '%s\n' "$domains_raw" | tr ', ' '\n\n' | sed '/^$/d' | sort -u)
|
||||
|
||||
if [ "${#domains[@]}" -eq 0 ]; then
|
||||
echo "[devcontainer] No allowed domains configured."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tmp_file="$(mktemp)"
|
||||
for domain in "${domains[@]}"; do
|
||||
if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]*\.[a-zA-Z]{2,}$ ]]; then
|
||||
echo "[devcontainer] Invalid domain in OPENAI_ALLOWED_DOMAINS: $domain"
|
||||
rm -f "$tmp_file"
|
||||
exit 1
|
||||
fi
|
||||
printf '%s\n' "$domain" >> "$tmp_file"
|
||||
done
|
||||
|
||||
sudo install -d -m 0755 /etc/codex
|
||||
sudo cp "$tmp_file" /etc/codex/allowed_domains.txt
|
||||
sudo chown root:root /etc/codex/allowed_domains.txt
|
||||
sudo chmod 0444 /etc/codex/allowed_domains.txt
|
||||
rm -f "$tmp_file"
|
||||
|
||||
echo "[devcontainer] Applying firewall policy for domains: ${domains[*]}"
|
||||
sudo --preserve-env=CODEX_INCLUDE_GITHUB_META_RANGES /usr/local/bin/init-firewall.sh
|
||||
@@ -1,113 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Post-install configuration for the Codex devcontainer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def ensure_history_files() -> None:
|
||||
command_history_dir = Path("/commandhistory")
|
||||
command_history_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for filename in (".bash_history", ".zsh_history"):
|
||||
(command_history_dir / filename).touch(exist_ok=True)
|
||||
|
||||
|
||||
def fix_directory_ownership() -> None:
|
||||
uid = os.getuid()
|
||||
gid = os.getgid()
|
||||
|
||||
paths = [
|
||||
Path.home() / ".codex",
|
||||
Path.home() / ".config" / "gh",
|
||||
Path.home() / ".cargo",
|
||||
Path.home() / ".rustup",
|
||||
Path("/commandhistory"),
|
||||
]
|
||||
|
||||
for path in paths:
|
||||
if not path.exists():
|
||||
continue
|
||||
|
||||
stat_info = path.stat()
|
||||
if stat_info.st_uid == uid and stat_info.st_gid == gid:
|
||||
continue
|
||||
|
||||
try:
|
||||
subprocess.run(
|
||||
["sudo", "chown", "-R", f"{uid}:{gid}", str(path)],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
print(f"[post_install] fixed ownership: {path}", file=sys.stderr)
|
||||
except subprocess.CalledProcessError as err:
|
||||
print(
|
||||
f"[post_install] warning: could not fix ownership of {path}: {err.stderr.strip()}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
|
||||
def setup_git_config() -> None:
|
||||
home = Path.home()
|
||||
host_gitconfig = home / ".gitconfig"
|
||||
local_gitconfig = home / ".gitconfig.local"
|
||||
gitignore_global = home / ".gitignore_global"
|
||||
|
||||
gitignore_global.write_text(
|
||||
"""# Codex
|
||||
.codex/
|
||||
|
||||
# Rust
|
||||
/target/
|
||||
|
||||
# Node
|
||||
node_modules/
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
||||
# Editors
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
""",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
include_line = (
|
||||
f"[include]\n path = {host_gitconfig}\n\n" if host_gitconfig.exists() else ""
|
||||
)
|
||||
|
||||
local_gitconfig.write_text(
|
||||
f"""# Container-local git configuration
|
||||
{include_line}[core]
|
||||
excludesfile = {gitignore_global}
|
||||
|
||||
[merge]
|
||||
conflictstyle = diff3
|
||||
|
||||
[diff]
|
||||
colorMoved = default
|
||||
""",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
print("[post_install] configuring devcontainer...", file=sys.stderr)
|
||||
ensure_history_files()
|
||||
fix_directory_ownership()
|
||||
setup_git_config()
|
||||
print("[post_install] complete", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,2 +0,0 @@
|
||||
codex-rs/app-server-protocol/schema/** linguist-generated
|
||||
codex-rs/hooks/schema/generated/** linguist-generated
|
||||
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@@ -1,5 +0,0 @@
|
||||
# Core crate ownership.
|
||||
/codex-rs/core/ @openai/codex-core-agent-team
|
||||
|
||||
# Keep ownership changes reviewed by the same team.
|
||||
/.github/CODEOWNERS @openai/codex-core-agent-team
|
||||
8
.github/actions/linux-code-sign/action.yml
vendored
8
.github/actions/linux-code-sign/action.yml
vendored
@@ -7,21 +7,17 @@ inputs:
|
||||
artifacts-dir:
|
||||
description: Absolute path to the directory containing built binaries to sign.
|
||||
required: true
|
||||
binaries:
|
||||
description: Space-delimited binary basenames to sign.
|
||||
default: "codex codex-responses-api-proxy"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0
|
||||
uses: sigstore/cosign-installer@v3.7.0
|
||||
|
||||
- name: Cosign Linux artifacts
|
||||
shell: bash
|
||||
env:
|
||||
ARTIFACTS_DIR: ${{ inputs.artifacts-dir }}
|
||||
BINARIES: ${{ inputs.binaries }}
|
||||
COSIGN_EXPERIMENTAL: "1"
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_OIDC_CLIENT_ID: "sigstore"
|
||||
@@ -35,7 +31,7 @@ runs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for binary in ${BINARIES}; do
|
||||
for binary in codex codex-responses-api-proxy; do
|
||||
artifact="${dest}/${binary}"
|
||||
if [[ ! -f "$artifact" ]]; then
|
||||
echo "Binary $artifact not found"
|
||||
|
||||
12
.github/actions/macos-code-sign/action.yml
vendored
12
.github/actions/macos-code-sign/action.yml
vendored
@@ -4,9 +4,6 @@ inputs:
|
||||
target:
|
||||
description: Rust compilation target triple (e.g. aarch64-apple-darwin).
|
||||
required: true
|
||||
binaries:
|
||||
description: Space-delimited binary basenames to sign and notarize.
|
||||
default: "codex codex-responses-api-proxy"
|
||||
sign-binaries:
|
||||
description: Whether to sign and notarize the macOS binaries.
|
||||
required: false
|
||||
@@ -122,7 +119,6 @@ runs:
|
||||
shell: bash
|
||||
env:
|
||||
TARGET: ${{ inputs.target }}
|
||||
BINARIES: ${{ inputs.binaries }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
@@ -138,7 +134,7 @@ runs:
|
||||
|
||||
entitlements_path="$GITHUB_ACTION_PATH/codex.entitlements.plist"
|
||||
|
||||
for binary in ${BINARIES}; do
|
||||
for binary in codex codex-responses-api-proxy; do
|
||||
path="codex-rs/target/${TARGET}/release/${binary}"
|
||||
codesign --force --options runtime --timestamp --entitlements "$entitlements_path" --sign "$APPLE_CODESIGN_IDENTITY" "${keychain_args[@]}" "$path"
|
||||
done
|
||||
@@ -148,7 +144,6 @@ runs:
|
||||
shell: bash
|
||||
env:
|
||||
TARGET: ${{ inputs.target }}
|
||||
BINARIES: ${{ inputs.binaries }}
|
||||
APPLE_NOTARIZATION_KEY_P8: ${{ inputs.apple-notarization-key-p8 }}
|
||||
APPLE_NOTARIZATION_KEY_ID: ${{ inputs.apple-notarization-key-id }}
|
||||
APPLE_NOTARIZATION_ISSUER_ID: ${{ inputs.apple-notarization-issuer-id }}
|
||||
@@ -187,9 +182,8 @@ runs:
|
||||
notarize_submission "$binary" "$archive_path" "$notary_key_path"
|
||||
}
|
||||
|
||||
for binary in ${BINARIES}; do
|
||||
notarize_binary "${binary}"
|
||||
done
|
||||
notarize_binary "codex"
|
||||
notarize_binary "codex-responses-api-proxy"
|
||||
|
||||
- name: Sign and notarize macOS dmg
|
||||
if: ${{ inputs.sign-dmg == 'true' }}
|
||||
|
||||
64
.github/actions/prepare-bazel-ci/action.yml
vendored
64
.github/actions/prepare-bazel-ci/action.yml
vendored
@@ -1,64 +0,0 @@
|
||||
name: prepare-bazel-ci
|
||||
description: Prepare a Bazel CI job with shared setup, repository cache restore, and execution logs.
|
||||
inputs:
|
||||
target:
|
||||
description: Target triple used for setup and cache namespacing.
|
||||
required: true
|
||||
cache-scope:
|
||||
description: Logical namespace used to keep concurrent Bazel jobs from reserving the same repository cache key.
|
||||
required: true
|
||||
install-test-prereqs:
|
||||
description: Install DotSlash for Bazel-backed test jobs.
|
||||
required: false
|
||||
default: "false"
|
||||
outputs:
|
||||
repository-cache-path:
|
||||
description: Filesystem path used for the Bazel repository cache.
|
||||
value: ${{ steps.setup_bazel.outputs.repository-cache-path }}
|
||||
repository-cache-key:
|
||||
description: Primary actions/cache key for the Bazel repository cache.
|
||||
value: ${{ steps.cache_bazel_repository_key.outputs.repository-cache-key }}
|
||||
repository-cache-hit:
|
||||
description: Whether the Bazel repository cache restore found an exact key match.
|
||||
value: ${{ steps.cache_bazel_repository_restore.outputs.cache-hit }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Set up Bazel CI
|
||||
id: setup_bazel
|
||||
uses: ./.github/actions/setup-bazel-ci
|
||||
with:
|
||||
target: ${{ inputs.target }}
|
||||
install-test-prereqs: ${{ inputs.install-test-prereqs }}
|
||||
|
||||
- name: Compute bazel repository cache key
|
||||
id: cache_bazel_repository_key
|
||||
shell: bash
|
||||
env:
|
||||
CACHE_SCOPE: ${{ inputs.cache-scope }}
|
||||
TARGET: ${{ inputs.target }}
|
||||
CACHE_HASH: ${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
|
||||
run: |
|
||||
echo "repository-cache-key=bazel-cache-${CACHE_SCOPE}-${TARGET}-${CACHE_HASH}" >> "${GITHUB_OUTPUT}"
|
||||
echo "repository-cache-restore-key=bazel-cache-${CACHE_SCOPE}-${TARGET}-" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Restore the Bazel repository cache explicitly so external dependencies
|
||||
# do not need to be re-downloaded on every CI run. Keep restore failures
|
||||
# non-fatal so transient cache-service errors degrade to a cold build
|
||||
# instead of failing the job.
|
||||
- name: Restore bazel repository cache
|
||||
id: cache_bazel_repository_restore
|
||||
continue-on-error: true
|
||||
uses: actions/cache/restore@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.setup_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.cache_bazel_repository_key.outputs.repository-cache-key }}
|
||||
restore-keys: |
|
||||
${{ steps.cache_bazel_repository_key.outputs.repository-cache-restore-key }}
|
||||
|
||||
- name: Set up Bazel execution logs
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p "${RUNNER_TEMP}/bazel-execution-logs"
|
||||
echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}"
|
||||
@@ -1,54 +0,0 @@
|
||||
name: Run argument comment lint
|
||||
description: Run argument-comment-lint on codex-rs via Bazel.
|
||||
|
||||
inputs:
|
||||
target:
|
||||
description: Runner target passed to setup-bazel-ci.
|
||||
required: true
|
||||
buildbuddy-api-key:
|
||||
description: BuildBuddy API key used by Bazel CI.
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: ./.github/actions/setup-bazel-ci
|
||||
with:
|
||||
target: ${{ inputs.target }}
|
||||
install-test-prereqs: true
|
||||
|
||||
- name: Install Linux sandbox build dependencies
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get update
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
|
||||
|
||||
- name: Run argument comment lint on codex-rs via Bazel
|
||||
if: ${{ runner.os != 'Windows' }}
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ inputs.buildbuddy-api-key }}
|
||||
shell: bash
|
||||
run: |
|
||||
bazel_targets="$(./tools/argument-comment-lint/list-bazel-targets.sh)"
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
-- \
|
||||
build \
|
||||
--config=argument-comment-lint \
|
||||
--keep_going \
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
|
||||
-- \
|
||||
${bazel_targets}
|
||||
|
||||
- name: Run argument comment lint on codex-rs via Bazel
|
||||
if: ${{ runner.os == 'Windows' }}
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ inputs.buildbuddy-api-key }}
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/run-argument-comment-lint-bazel.sh \
|
||||
--config=argument-comment-lint \
|
||||
--platforms=//:local_windows \
|
||||
--keep_going \
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
17
.github/actions/setup-bazel-ci/action.yml
vendored
17
.github/actions/setup-bazel-ci/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
description: Target triple used for cache namespacing.
|
||||
required: true
|
||||
install-test-prereqs:
|
||||
description: Install DotSlash for Bazel-backed test jobs.
|
||||
description: Install Node.js and DotSlash for Bazel-backed test jobs.
|
||||
required: false
|
||||
default: "false"
|
||||
outputs:
|
||||
@@ -16,11 +16,17 @@ outputs:
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Set up Node.js for js_repl tests
|
||||
if: inputs.install-test-prereqs == 'true'
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version-file: codex-rs/node-version.txt
|
||||
|
||||
# Some integration tests rely on DotSlash being installed.
|
||||
# See https://github.com/openai/codex/pull/7617.
|
||||
- name: Install DotSlash
|
||||
if: inputs.install-test-prereqs == 'true'
|
||||
uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
|
||||
uses: facebook/install-dotslash@v2
|
||||
|
||||
- name: Make DotSlash available in PATH (Unix)
|
||||
if: inputs.install-test-prereqs == 'true' && runner.os != 'Windows'
|
||||
@@ -33,7 +39,7 @@ runs:
|
||||
run: Copy-Item (Get-Command dotslash).Source -Destination "$env:LOCALAPPDATA\Microsoft\WindowsApps\dotslash.exe"
|
||||
|
||||
- name: Set up Bazel
|
||||
uses: bazel-contrib/setup-bazel@c5acdfb288317d0b5c0bbd7a396a3dc868bb0f86 # 0.19.0
|
||||
uses: bazelbuild/setup-bazelisk@v3
|
||||
|
||||
- name: Configure Bazel repository cache
|
||||
id: configure_bazel_repository_cache
|
||||
@@ -116,11 +122,6 @@ runs:
|
||||
}
|
||||
}
|
||||
|
||||
- name: Compute cache-stable Windows Bazel PATH
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: ./.github/scripts/compute-bazel-windows-path.ps1
|
||||
|
||||
- name: Enable Git long paths (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
|
||||
49
.github/actions/setup-rusty-v8-musl/action.yml
vendored
49
.github/actions/setup-rusty-v8-musl/action.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: setup-rusty-v8-musl
|
||||
description: Download and verify musl rusty_v8 artifacts for Cargo builds.
|
||||
inputs:
|
||||
target:
|
||||
description: Rust musl target triple.
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Configure musl rusty_v8 artifact overrides and verify checksums
|
||||
shell: bash
|
||||
env:
|
||||
TARGET: ${{ inputs.target }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
case "${TARGET}" in
|
||||
x86_64-unknown-linux-musl|aarch64-unknown-linux-musl)
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported musl rusty_v8 target: ${TARGET}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
|
||||
release_tag="rusty-v8-v${version}"
|
||||
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
|
||||
binding_dir="${RUNNER_TEMP}/rusty_v8"
|
||||
archive_path="${binding_dir}/librusty_v8_release_${TARGET}.a.gz"
|
||||
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
|
||||
checksums_path="${binding_dir}/rusty_v8_release_${TARGET}.sha256"
|
||||
checksums_source="${GITHUB_WORKSPACE}/third_party/v8/rusty_v8_${version//./_}.sha256"
|
||||
|
||||
mkdir -p "${binding_dir}"
|
||||
curl -fsSL "${base_url}/librusty_v8_release_${TARGET}.a.gz" -o "${archive_path}"
|
||||
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
|
||||
grep -E " (librusty_v8_release_${TARGET}[.]a[.]gz|src_binding_release_${TARGET}[.]rs)$" \
|
||||
"${checksums_source}" > "${checksums_path}"
|
||||
|
||||
if [[ "$(wc -l < "${checksums_path}")" -ne 2 ]]; then
|
||||
echo "Expected exactly two checksums for ${TARGET} in ${checksums_source}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${binding_dir}" && sha256sum -c "${checksums_path}")
|
||||
echo "RUSTY_V8_ARCHIVE=${archive_path}" >> "${GITHUB_ENV}"
|
||||
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "${GITHUB_ENV}"
|
||||
30
.github/actions/windows-code-sign/action.yml
vendored
30
.github/actions/windows-code-sign/action.yml
vendored
@@ -4,9 +4,6 @@ inputs:
|
||||
target:
|
||||
description: Target triple for the artifacts to sign.
|
||||
required: true
|
||||
binaries:
|
||||
description: Space-delimited binary basenames to sign.
|
||||
default: "codex codex-responses-api-proxy codex-windows-sandbox-setup codex-command-runner"
|
||||
client-id:
|
||||
description: Azure Trusted Signing client ID.
|
||||
required: true
|
||||
@@ -30,31 +27,14 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Azure login for Trusted Signing (OIDC)
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2
|
||||
uses: azure/login@v2
|
||||
with:
|
||||
client-id: ${{ inputs.client-id }}
|
||||
tenant-id: ${{ inputs.tenant-id }}
|
||||
subscription-id: ${{ inputs.subscription-id }}
|
||||
|
||||
- name: Prepare file list
|
||||
id: prepare
|
||||
shell: bash
|
||||
env:
|
||||
TARGET: ${{ inputs.target }}
|
||||
BINARIES: ${{ inputs.binaries }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
{
|
||||
echo "files<<EOF"
|
||||
for binary in ${BINARIES}; do
|
||||
echo "${GITHUB_WORKSPACE}/codex-rs/target/${TARGET}/release/${binary}.exe"
|
||||
done
|
||||
echo "EOF"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Sign Windows binaries with Azure Trusted Signing
|
||||
uses: azure/trusted-signing-action@1d365fec12862c4aa68fcac418143d73f0cea293 # v0
|
||||
uses: azure/trusted-signing-action@v0
|
||||
with:
|
||||
endpoint: ${{ inputs.endpoint }}
|
||||
trusted-signing-account-name: ${{ inputs.account-name }}
|
||||
@@ -70,4 +50,8 @@ runs:
|
||||
exclude-azure-developer-cli-credential: true
|
||||
exclude-interactive-browser-credential: true
|
||||
cache-dependencies: false
|
||||
files: ${{ steps.prepare.outputs.files }}
|
||||
files: |
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex.exe
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-responses-api-proxy.exe
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-windows-sandbox-setup.exe
|
||||
${{ github.workspace }}/codex-rs/target/${{ inputs.target }}/release/codex-command-runner.exe
|
||||
|
||||
1
.github/blob-size-allowlist.txt
vendored
1
.github/blob-size-allowlist.txt
vendored
@@ -7,4 +7,3 @@ codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json
|
||||
codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json
|
||||
codex-rs/tui/tests/fixtures/oss-story.jsonl
|
||||
codex-rs/tui_app_server/tests/fixtures/oss-story.jsonl
|
||||
codex-rs/tui/src/app.rs
|
||||
|
||||
44
.github/dotslash-config.json
vendored
44
.github/dotslash-config.json
vendored
@@ -11,11 +11,11 @@
|
||||
"path": "codex"
|
||||
},
|
||||
"linux-x86_64": {
|
||||
"regex": "^codex-x86_64-unknown-linux-musl-bundle\\.tar\\.zst$",
|
||||
"regex": "^codex-x86_64-unknown-linux-musl\\.zst$",
|
||||
"path": "codex"
|
||||
},
|
||||
"linux-aarch64": {
|
||||
"regex": "^codex-aarch64-unknown-linux-musl-bundle\\.tar\\.zst$",
|
||||
"regex": "^codex-aarch64-unknown-linux-musl\\.zst$",
|
||||
"path": "codex"
|
||||
},
|
||||
"windows-x86_64": {
|
||||
@@ -28,34 +28,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codex-app-server": {
|
||||
"platforms": {
|
||||
"macos-aarch64": {
|
||||
"regex": "^codex-app-server-aarch64-apple-darwin\\.zst$",
|
||||
"path": "codex-app-server"
|
||||
},
|
||||
"macos-x86_64": {
|
||||
"regex": "^codex-app-server-x86_64-apple-darwin\\.zst$",
|
||||
"path": "codex-app-server"
|
||||
},
|
||||
"linux-x86_64": {
|
||||
"regex": "^codex-app-server-x86_64-unknown-linux-musl\\.zst$",
|
||||
"path": "codex-app-server"
|
||||
},
|
||||
"linux-aarch64": {
|
||||
"regex": "^codex-app-server-aarch64-unknown-linux-musl\\.zst$",
|
||||
"path": "codex-app-server"
|
||||
},
|
||||
"windows-x86_64": {
|
||||
"regex": "^codex-app-server-x86_64-pc-windows-msvc\\.exe\\.zst$",
|
||||
"path": "codex-app-server.exe"
|
||||
},
|
||||
"windows-aarch64": {
|
||||
"regex": "^codex-app-server-aarch64-pc-windows-msvc\\.exe\\.zst$",
|
||||
"path": "codex-app-server.exe"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codex-responses-api-proxy": {
|
||||
"platforms": {
|
||||
"macos-aarch64": {
|
||||
@@ -84,18 +56,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"bwrap": {
|
||||
"platforms": {
|
||||
"linux-x86_64": {
|
||||
"regex": "^bwrap-x86_64-unknown-linux-musl\\.zst$",
|
||||
"path": "bwrap"
|
||||
},
|
||||
"linux-aarch64": {
|
||||
"regex": "^bwrap-aarch64-unknown-linux-musl\\.zst$",
|
||||
"path": "bwrap"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codex-command-runner": {
|
||||
"platforms": {
|
||||
"windows-x86_64": {
|
||||
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -1,6 +1,6 @@
|
||||
# External (non-OpenAI) Pull Request Requirements
|
||||
|
||||
External code contributions are by invitation only. Please read the dedicated "Contributing" markdown file for details:
|
||||
Before opening this Pull Request, please read the dedicated "Contributing" markdown file or your PR may be closed:
|
||||
https://github.com/openai/codex/blob/main/docs/contributing.md
|
||||
|
||||
If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes.
|
||||
|
||||
113
.github/scripts/compute-bazel-windows-path.ps1
vendored
113
.github/scripts/compute-bazel-windows-path.ps1
vendored
@@ -1,113 +0,0 @@
|
||||
<#
|
||||
BuildBuddy cache keys include the action and test environment, so Bazel should
|
||||
not inherit the full hosted-runner PATH on Windows. That PATH includes volatile
|
||||
tool entries, such as Maven, that can change independently of this repo and
|
||||
cause avoidable cache misses.
|
||||
|
||||
This script derives a smaller, cache-stable PATH that keeps the Windows
|
||||
toolchain entries Bazel-backed CI tasks need: MSVC and Windows SDK paths,
|
||||
MinGW runtime DLL paths for gnullvm-built tests, Git, PowerShell, Node, Python,
|
||||
DotSlash, and the standard Windows system directories.
|
||||
`setup-bazel-ci` runs this after exporting the MSVC environment, and the script
|
||||
publishes the result via `GITHUB_ENV` as `CODEX_BAZEL_WINDOWS_PATH` so later
|
||||
steps can pass that explicit PATH to Bazel.
|
||||
#>
|
||||
|
||||
$stablePathEntries = New-Object System.Collections.Generic.List[string]
|
||||
$seenEntries = [System.Collections.Generic.HashSet[string]]::new([System.StringComparer]::OrdinalIgnoreCase)
|
||||
$windowsAppsPath = if ([string]::IsNullOrWhiteSpace($env:LOCALAPPDATA)) {
|
||||
$null
|
||||
} else {
|
||||
"$($env:LOCALAPPDATA)\Microsoft\WindowsApps"
|
||||
}
|
||||
$windowsDir = if ($env:WINDIR) {
|
||||
$env:WINDIR
|
||||
} elseif ($env:SystemRoot) {
|
||||
$env:SystemRoot
|
||||
} else {
|
||||
$null
|
||||
}
|
||||
|
||||
function Add-StablePathEntry {
|
||||
param([string]$PathEntry)
|
||||
|
||||
if ([string]::IsNullOrWhiteSpace($PathEntry)) {
|
||||
return
|
||||
}
|
||||
|
||||
if ($seenEntries.Add($PathEntry)) {
|
||||
[void]$stablePathEntries.Add($PathEntry)
|
||||
}
|
||||
}
|
||||
|
||||
foreach ($pathEntry in ($env:PATH -split ';')) {
|
||||
if ([string]::IsNullOrWhiteSpace($pathEntry)) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (
|
||||
$pathEntry -like '*Microsoft Visual Studio*' -or
|
||||
$pathEntry -like '*Windows Kits*' -or
|
||||
$pathEntry -like '*Microsoft SDKs*' -or
|
||||
$pathEntry -eq 'C:\mingw64\bin' -or
|
||||
$pathEntry -like 'C:\msys64\*\bin' -or
|
||||
$pathEntry -like 'C:\Program Files\Git\*' -or
|
||||
$pathEntry -like 'C:\Program Files\PowerShell\*' -or
|
||||
$pathEntry -like 'C:\hostedtoolcache\windows\node\*' -or
|
||||
$pathEntry -like 'C:\hostedtoolcache\windows\Python\*' -or
|
||||
$pathEntry -eq 'D:\a\_temp\install-dotslash\bin' -or
|
||||
($windowsDir -and ($pathEntry -eq $windowsDir -or $pathEntry -like "${windowsDir}\*"))
|
||||
) {
|
||||
Add-StablePathEntry $pathEntry
|
||||
}
|
||||
}
|
||||
|
||||
$gitCommand = Get-Command git -ErrorAction SilentlyContinue
|
||||
if ($gitCommand) {
|
||||
Add-StablePathEntry (Split-Path $gitCommand.Source -Parent)
|
||||
}
|
||||
|
||||
$nodeCommand = Get-Command node -ErrorAction SilentlyContinue
|
||||
if ($nodeCommand) {
|
||||
Add-StablePathEntry (Split-Path $nodeCommand.Source -Parent)
|
||||
}
|
||||
|
||||
$python3Command = Get-Command python3 -ErrorAction SilentlyContinue
|
||||
if ($python3Command) {
|
||||
Add-StablePathEntry (Split-Path $python3Command.Source -Parent)
|
||||
}
|
||||
|
||||
$pythonCommand = Get-Command python -ErrorAction SilentlyContinue
|
||||
if ($pythonCommand) {
|
||||
Add-StablePathEntry (Split-Path $pythonCommand.Source -Parent)
|
||||
}
|
||||
|
||||
$pwshCommand = Get-Command pwsh -ErrorAction SilentlyContinue
|
||||
if ($pwshCommand) {
|
||||
Add-StablePathEntry (Split-Path $pwshCommand.Source -Parent)
|
||||
}
|
||||
|
||||
foreach ($mingwPath in @('C:\mingw64\bin', 'C:\msys64\mingw64\bin', 'C:\msys64\ucrt64\bin')) {
|
||||
if (Test-Path $mingwPath) {
|
||||
Add-StablePathEntry $mingwPath
|
||||
}
|
||||
}
|
||||
|
||||
if ($windowsAppsPath) {
|
||||
Add-StablePathEntry $windowsAppsPath
|
||||
}
|
||||
|
||||
if ($stablePathEntries.Count -eq 0) {
|
||||
throw 'Failed to derive cache-stable Windows PATH.'
|
||||
}
|
||||
|
||||
if ([string]::IsNullOrWhiteSpace($env:GITHUB_ENV)) {
|
||||
throw 'GITHUB_ENV must be set.'
|
||||
}
|
||||
|
||||
$stablePath = $stablePathEntries -join ';'
|
||||
Write-Host 'Derived CODEX_BAZEL_WINDOWS_PATH entries:'
|
||||
foreach ($pathEntry in $stablePathEntries) {
|
||||
Write-Host " $pathEntry"
|
||||
}
|
||||
"CODEX_BAZEL_WINDOWS_PATH=$stablePath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ci_config=ci-linux
|
||||
case "${RUNNER_OS:-}" in
|
||||
macOS)
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
ci_config=ci-windows
|
||||
;;
|
||||
esac
|
||||
|
||||
bazel_lint_args=("$@")
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
has_host_platform_override=0
|
||||
@@ -34,6 +44,29 @@ if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
bazel_lint_args+=("--skip_incompatible_explicit_targets")
|
||||
fi
|
||||
|
||||
bazel_startup_args=()
|
||||
if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
|
||||
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
|
||||
fi
|
||||
|
||||
run_bazel() {
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
|
||||
return
|
||||
fi
|
||||
|
||||
bazel "$@"
|
||||
}
|
||||
|
||||
run_bazel_with_startup_args() {
|
||||
if [[ ${#bazel_startup_args[@]} -gt 0 ]]; then
|
||||
run_bazel "${bazel_startup_args[@]}" "$@"
|
||||
return
|
||||
fi
|
||||
|
||||
run_bazel "$@"
|
||||
}
|
||||
|
||||
read_query_labels() {
|
||||
local query="$1"
|
||||
local query_stdout
|
||||
@@ -41,10 +74,12 @@ read_query_labels() {
|
||||
query_stdout="$(mktemp)"
|
||||
query_stderr="$(mktemp)"
|
||||
|
||||
if ! ./.github/scripts/run-bazel-query-ci.sh \
|
||||
if ! run_bazel_with_startup_args \
|
||||
--noexperimental_remote_repo_contents_cache \
|
||||
query \
|
||||
--keep_going \
|
||||
--output=label \
|
||||
-- "$query" >"$query_stdout" 2>"$query_stderr"; then
|
||||
"$query" >"$query_stdout" 2>"$query_stderr"; then
|
||||
cat "$query_stderr" >&2
|
||||
rm -f "$query_stdout" "$query_stderr"
|
||||
exit 1
|
||||
|
||||
257
.github/scripts/run-bazel-ci.sh
vendored
257
.github/scripts/run-bazel-ci.sh
vendored
@@ -3,10 +3,9 @@
|
||||
set -euo pipefail
|
||||
|
||||
print_failed_bazel_test_logs=0
|
||||
print_failed_bazel_action_summary=0
|
||||
use_node_test_env=0
|
||||
remote_download_toplevel=0
|
||||
windows_msvc_host_platform=0
|
||||
windows_cross_compile=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
@@ -14,8 +13,8 @@ while [[ $# -gt 0 ]]; do
|
||||
print_failed_bazel_test_logs=1
|
||||
shift
|
||||
;;
|
||||
--print-failed-action-summary)
|
||||
print_failed_bazel_action_summary=1
|
||||
--use-node-test-env)
|
||||
use_node_test_env=1
|
||||
shift
|
||||
;;
|
||||
--remote-download-toplevel)
|
||||
@@ -26,10 +25,6 @@ while [[ $# -gt 0 ]]; do
|
||||
windows_msvc_host_platform=1
|
||||
shift
|
||||
;;
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@@ -42,7 +37,7 @@ while [[ $# -gt 0 ]]; do
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--print-failed-action-summary] [--remote-download-toplevel] [--windows-msvc-host-platform] [--windows-cross-compile] -- <bazel args> -- <targets>" >&2
|
||||
echo "Usage: $0 [--print-failed-test-logs] [--use-node-test-env] [--remote-download-toplevel] [--windows-msvc-host-platform] -- <bazel args> -- <targets>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -66,11 +61,7 @@ case "${RUNNER_OS:-}" in
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
ci_config=ci-windows
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -78,44 +69,19 @@ print_bazel_test_log_tails() {
|
||||
local console_log="$1"
|
||||
local testlogs_dir
|
||||
local -a bazel_info_cmd=(bazel)
|
||||
local -a bazel_info_args=(info)
|
||||
|
||||
if (( ${#bazel_startup_args[@]} > 0 )); then
|
||||
bazel_info_cmd+=("${bazel_startup_args[@]}")
|
||||
fi
|
||||
|
||||
# `bazel info` needs the same CI config as the failed test invocation so
|
||||
# platform-specific output roots match. On Windows, omitting `ci-windows`
|
||||
# would point at `local_windows-fastbuild` even when the test ran with the
|
||||
# MSVC host platform under `local_windows_msvc-fastbuild`.
|
||||
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
bazel_info_args+=(
|
||||
"--config=${ci_config}"
|
||||
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
|
||||
)
|
||||
fi
|
||||
# Only pass flags that affect Bazel's output-root selection or repository
|
||||
# lookup. Test/build-only flags such as execution logs or remote download
|
||||
# mode can make `bazel info` fail, which would hide the real test log path.
|
||||
for arg in "${post_config_bazel_args[@]}"; do
|
||||
case "$arg" in
|
||||
--host_platform=* | --repo_contents_cache=* | --repository_cache=*)
|
||||
bazel_info_args+=("$arg")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" \
|
||||
--noexperimental_remote_repo_contents_cache \
|
||||
"${bazel_info_args[@]}" \
|
||||
bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
|
||||
testlogs_dir="$(run_bazel "${bazel_info_cmd[@]:1}" info bazel-testlogs 2>/dev/null || echo bazel-testlogs)"
|
||||
|
||||
local failed_targets=()
|
||||
while IFS= read -r target; do
|
||||
failed_targets+=("$target")
|
||||
done < <(
|
||||
grep -E '^(FAIL: //|ERROR: .* Testing //)' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#; s#^ERROR: .* Testing (//[^ ]+) failed:.*#\1#' \
|
||||
grep -E '^FAIL: //' "$console_log" \
|
||||
| sed -E 's#^FAIL: (//[^ ]+).*#\1#' \
|
||||
| sort -u
|
||||
)
|
||||
|
||||
@@ -126,14 +92,8 @@ print_bazel_test_log_tails() {
|
||||
|
||||
for target in "${failed_targets[@]}"; do
|
||||
local rel_path="${target#//}"
|
||||
rel_path="${rel_path/://}"
|
||||
rel_path="${rel_path/:/\/}"
|
||||
local test_log="${testlogs_dir}/${rel_path}/test.log"
|
||||
local reported_test_log
|
||||
reported_test_log="$(grep -F "FAIL: ${target} " "$console_log" | sed -nE 's#.* \(see (.*[\\/]test\.log)\).*#\1#p' | head -n 1 || true)"
|
||||
if [[ -n "$reported_test_log" ]]; then
|
||||
reported_test_log="${reported_test_log//\\//}"
|
||||
test_log="$reported_test_log"
|
||||
fi
|
||||
|
||||
echo "::group::Bazel test log tail for ${target}"
|
||||
if [[ -f "$test_log" ]]; then
|
||||
@@ -145,93 +105,6 @@ print_bazel_test_log_tails() {
|
||||
done
|
||||
}
|
||||
|
||||
print_bazel_action_failure_summary() {
|
||||
local console_log="$1"
|
||||
local escaped_summary
|
||||
local summary
|
||||
|
||||
summary="$(
|
||||
awk '
|
||||
function clean(line) {
|
||||
gsub(sprintf("%c", 27) "\\[[0-9;]*m", "", line)
|
||||
sub(/^.*\t[^\t]*\t[0-9TZ:._-]+ /, "", line)
|
||||
return line
|
||||
}
|
||||
|
||||
function is_diagnostic(line) {
|
||||
return line ~ /^(error(\[[^]]+\])?:|warning:|note:|help:)/ ||
|
||||
line ~ /^[[:space:]]+-->/ ||
|
||||
line ~ /^[[:space:]]*[0-9]+[[:space:]]+\|/ ||
|
||||
line ~ /^[[:space:]]*\|/ ||
|
||||
line ~ /^[[:space:]]+= (note|help):/ ||
|
||||
line ~ /^[[:space:]]*\^[[:space:]^~-]*$/ ||
|
||||
line ~ /^For more information/ ||
|
||||
line ~ /^error: aborting/
|
||||
}
|
||||
|
||||
{
|
||||
line = clean($0)
|
||||
}
|
||||
|
||||
line ~ /^ERROR: .* failed:/ {
|
||||
if (printed) {
|
||||
print ""
|
||||
}
|
||||
print line
|
||||
in_failure = 1
|
||||
seen_diagnostic = 0
|
||||
printed = 1
|
||||
next
|
||||
}
|
||||
|
||||
in_failure && is_diagnostic(line) {
|
||||
print line
|
||||
seen_diagnostic = 1
|
||||
next
|
||||
}
|
||||
|
||||
in_failure && seen_diagnostic && line == "" {
|
||||
print ""
|
||||
next
|
||||
}
|
||||
|
||||
in_failure && seen_diagnostic {
|
||||
in_failure = 0
|
||||
seen_diagnostic = 0
|
||||
next
|
||||
}
|
||||
' "$console_log"
|
||||
)"
|
||||
|
||||
if [[ -z "$summary" ]]; then
|
||||
summary="$(grep -E '^ERROR: |^FAILED: ' "$console_log" | tail -n 50 || true)"
|
||||
fi
|
||||
|
||||
if [[ -z "$summary" ]]; then
|
||||
echo "No Bazel action failures were found in the captured console output."
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "${GITHUB_ACTIONS:-}" == "true" ]]; then
|
||||
escaped_summary="$(
|
||||
printf '%s' "$summary" \
|
||||
| awk 'BEGIN { ORS = "" } {
|
||||
gsub(/%/, "%25")
|
||||
gsub(/\r/, "%0D")
|
||||
print sep $0
|
||||
sep = "%0A"
|
||||
}'
|
||||
)"
|
||||
echo "::error title=Bazel failed action diagnostics::${escaped_summary}"
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Bazel failed action diagnostics:"
|
||||
echo "--------------------------------"
|
||||
printf '%s\n' "$summary"
|
||||
echo "--------------------------------"
|
||||
}
|
||||
|
||||
bazel_args=()
|
||||
bazel_targets=()
|
||||
found_target_separator=0
|
||||
@@ -253,10 +126,14 @@ if [[ ${#bazel_args[@]} -eq 0 || ${#bazel_targets[@]} -eq 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Fork PRs do not receive the BuildBuddy secret needed for the remote
|
||||
# cross-compile config. Preserve the previous local Windows build shape.
|
||||
windows_msvc_host_platform=1
|
||||
if [[ $use_node_test_env -eq 1 ]]; then
|
||||
# Bazel test sandboxes on macOS may resolve an older Homebrew `node`
|
||||
# before the `actions/setup-node` runtime on PATH.
|
||||
node_bin="$(which node)"
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
node_bin="$(cygpath -w "${node_bin}")"
|
||||
fi
|
||||
bazel_args+=("--test_env=CODEX_JS_REPL_NODE_PATH=${node_bin}")
|
||||
fi
|
||||
|
||||
post_config_bazel_args=()
|
||||
@@ -270,10 +147,10 @@ if [[ "${RUNNER_OS:-}" == "Windows" && $windows_msvc_host_platform -eq 1 ]]; the
|
||||
done
|
||||
|
||||
if [[ $has_host_platform_override -eq 0 ]]; then
|
||||
# Use the MSVC Windows platform for jobs that need helper binaries like
|
||||
# Rust test wrappers and V8 generators to resolve a compatible toolchain.
|
||||
# Callers that need a different Windows target platform should pass an
|
||||
# explicit `--platforms=...` flag.
|
||||
# Keep Windows Bazel targets on `windows-gnullvm` for cfg coverage, but opt
|
||||
# specific jobs into an MSVC exec platform when they need helper binaries
|
||||
# like Rust test wrappers and V8 generators to resolve a compatible host
|
||||
# toolchain.
|
||||
post_config_bazel_args+=("--host_platform=//:local_windows_msvc")
|
||||
fi
|
||||
fi
|
||||
@@ -284,25 +161,6 @@ if [[ $remote_download_toplevel -eq 1 ]]; then
|
||||
post_config_bazel_args+=(--remote_download_toplevel)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# `--enable_platform_specific_config` expands `common:windows` on Windows
|
||||
# hosts after ordinary rc configs, which can override `ci-windows-cross`'s
|
||||
# RBE host platform. Repeat the host platform on the command line so V8 and
|
||||
# other genrules execute on Linux RBE workers instead of Git Bash locally.
|
||||
#
|
||||
# Bazel also derives the default genrule shell from the client host. Without
|
||||
# an explicit shell executable, remote Linux actions can be asked to run
|
||||
# `C:\Program Files\Git\usr\bin\bash.exe`.
|
||||
post_config_bazel_args+=(--host_platform=//:rbe --shell_executable=/bin/bash)
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" && $windows_cross_compile -eq 1 && -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The Windows cross-compile config depends on remote execution. Fork PRs do
|
||||
# not receive the BuildBuddy secret, so fall back to the existing local build
|
||||
# shape and keep its lower concurrency cap.
|
||||
post_config_bazel_args+=(--jobs=8)
|
||||
fi
|
||||
|
||||
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
|
||||
# Windows self-hosted runners can run multiple Bazel jobs concurrently. Give
|
||||
# each job its own repo contents cache so they do not fight over the shared
|
||||
@@ -321,57 +179,27 @@ if [[ -n "${CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR:-}" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
pass_windows_build_env=1
|
||||
if [[ $windows_cross_compile -eq 1 && -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# Remote build actions execute on Linux RBE workers. Passing the Windows
|
||||
# runner's build environment there makes Bazel genrules try to execute
|
||||
# C:\Program Files\Git\usr\bin\bash.exe on Linux.
|
||||
pass_windows_build_env=0
|
||||
fi
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
PATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
windows_action_env_vars=(
|
||||
INCLUDE
|
||||
LIB
|
||||
LIBPATH
|
||||
UCRTVersion
|
||||
UniversalCRTSdkDir
|
||||
VCINSTALLDIR
|
||||
VCToolsInstallDir
|
||||
WindowsLibPath
|
||||
WindowsSdkBinPath
|
||||
WindowsSdkDir
|
||||
WindowsSDKLibVersion
|
||||
WindowsSDKVersion
|
||||
)
|
||||
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -z "${CODEX_BAZEL_WINDOWS_PATH:-}" ]]; then
|
||||
echo "CODEX_BAZEL_WINDOWS_PATH must be set for Windows Bazel CI." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $pass_windows_build_env -eq 1 ]]; then
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
"--host_action_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}"
|
||||
)
|
||||
elif [[ $windows_cross_compile -eq 1 ]]; then
|
||||
# Remote build actions run on Linux RBE workers. Give their shell snippets
|
||||
# a Linux PATH while preserving CODEX_BAZEL_WINDOWS_PATH below for local
|
||||
# Windows test execution.
|
||||
post_config_bazel_args+=(
|
||||
"--action_env=PATH=/usr/bin:/bin"
|
||||
"--host_action_env=PATH=/usr/bin:/bin"
|
||||
)
|
||||
fi
|
||||
post_config_bazel_args+=("--test_env=PATH=${CODEX_BAZEL_WINDOWS_PATH}")
|
||||
for env_var in "${windows_action_env_vars[@]}"; do
|
||||
if [[ -n "${!env_var:-}" ]]; then
|
||||
post_config_bazel_args+=("--action_env=${env_var}" "--host_action_env=${env_var}")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
bazel_console_log="$(mktemp)"
|
||||
@@ -443,9 +271,6 @@ else
|
||||
fi
|
||||
|
||||
if [[ ${bazel_status:-0} -ne 0 ]]; then
|
||||
if [[ $print_failed_bazel_action_summary -eq 1 ]]; then
|
||||
print_bazel_action_failure_summary "$bazel_console_log"
|
||||
fi
|
||||
if [[ $print_failed_bazel_test_logs -eq 1 ]]; then
|
||||
print_bazel_test_log_tails "$bazel_console_log"
|
||||
fi
|
||||
|
||||
84
.github/scripts/run-bazel-query-ci.sh
vendored
84
.github/scripts/run-bazel-query-ci.sh
vendored
@@ -1,84 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Run Bazel queries with the same CI startup settings as the main build/test
|
||||
# invocation so target-discovery queries can reuse the same Bazel server.
|
||||
|
||||
query_args=()
|
||||
windows_cross_compile=0
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--windows-cross-compile)
|
||||
windows_cross_compile=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
query_args+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 [--windows-cross-compile] [<bazel query args>...] -- <query expression>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
query_expression="$1"
|
||||
|
||||
ci_config=ci-linux
|
||||
case "${RUNNER_OS:-}" in
|
||||
macOS)
|
||||
ci_config=ci-macos
|
||||
;;
|
||||
Windows)
|
||||
if [[ $windows_cross_compile -eq 1 ]]; then
|
||||
ci_config=ci-windows-cross
|
||||
else
|
||||
ci_config=ci-windows
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
bazel_startup_args=()
|
||||
if [[ -n "${BAZEL_OUTPUT_USER_ROOT:-}" ]]; then
|
||||
bazel_startup_args+=("--output_user_root=${BAZEL_OUTPUT_USER_ROOT}")
|
||||
fi
|
||||
|
||||
run_bazel() {
|
||||
if [[ "${RUNNER_OS:-}" == "Windows" ]]; then
|
||||
MSYS2_ARG_CONV_EXCL='*' bazel "$@"
|
||||
return
|
||||
fi
|
||||
|
||||
bazel "$@"
|
||||
}
|
||||
|
||||
bazel_query_args=(--noexperimental_remote_repo_contents_cache query)
|
||||
if [[ -n "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
bazel_query_args+=(
|
||||
"--config=${ci_config}"
|
||||
"--remote_header=x-buildbuddy-api-key=${BUILDBUDDY_API_KEY}"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n "${BAZEL_REPO_CONTENTS_CACHE:-}" ]]; then
|
||||
bazel_query_args+=("--repo_contents_cache=${BAZEL_REPO_CONTENTS_CACHE}")
|
||||
fi
|
||||
|
||||
if [[ -n "${BAZEL_REPOSITORY_CACHE:-}" ]]; then
|
||||
bazel_query_args+=("--repository_cache=${BAZEL_REPOSITORY_CACHE}")
|
||||
fi
|
||||
|
||||
bazel_query_args+=("${query_args[@]}" "$query_expression")
|
||||
|
||||
if (( ${#bazel_startup_args[@]} > 0 )); then
|
||||
run_bazel "${bazel_startup_args[@]}" "${bazel_query_args[@]}"
|
||||
else
|
||||
run_bazel "${bazel_query_args[@]}"
|
||||
fi
|
||||
110
.github/scripts/rusty_v8_bazel.py
vendored
110
.github/scripts/rusty_v8_bazel.py
vendored
@@ -4,7 +4,6 @@ from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import gzip
|
||||
import hashlib
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
@@ -13,16 +12,8 @@ import tempfile
|
||||
import tomllib
|
||||
from pathlib import Path
|
||||
|
||||
from rusty_v8_module_bazel import (
|
||||
RustyV8ChecksumError,
|
||||
check_module_bazel,
|
||||
update_module_bazel,
|
||||
)
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[2]
|
||||
MODULE_BAZEL = ROOT / "MODULE.bazel"
|
||||
RUSTY_V8_CHECKSUMS_DIR = ROOT / "third_party" / "v8"
|
||||
MUSL_RUNTIME_ARCHIVE_LABELS = [
|
||||
"@llvm//runtimes/libcxx:libcxx.static",
|
||||
"@llvm//runtimes/libcxx:libcxxabi.static",
|
||||
@@ -63,10 +54,8 @@ def bazel_output_files(
|
||||
platform: str,
|
||||
labels: list[str],
|
||||
compilation_mode: str = "fastbuild",
|
||||
bazel_configs: list[str] | None = None,
|
||||
) -> list[Path]:
|
||||
expression = "set(" + " ".join(labels) + ")"
|
||||
bazel_configs = bazel_configs or []
|
||||
result = subprocess.run(
|
||||
[
|
||||
"bazel",
|
||||
@@ -74,7 +63,6 @@ def bazel_output_files(
|
||||
"-c",
|
||||
compilation_mode,
|
||||
f"--platforms=@llvm//platforms:{platform}",
|
||||
*[f"--config={config}" for config in bazel_configs],
|
||||
"--output=files",
|
||||
expression,
|
||||
],
|
||||
@@ -90,9 +78,7 @@ def bazel_build(
|
||||
platform: str,
|
||||
labels: list[str],
|
||||
compilation_mode: str = "fastbuild",
|
||||
bazel_configs: list[str] | None = None,
|
||||
) -> None:
|
||||
bazel_configs = bazel_configs or []
|
||||
subprocess.run(
|
||||
[
|
||||
"bazel",
|
||||
@@ -100,7 +86,6 @@ def bazel_build(
|
||||
"-c",
|
||||
compilation_mode,
|
||||
f"--platforms=@llvm//platforms:{platform}",
|
||||
*[f"--config={config}" for config in bazel_configs],
|
||||
*labels,
|
||||
],
|
||||
cwd=ROOT,
|
||||
@@ -112,14 +97,13 @@ def ensure_bazel_output_files(
|
||||
platform: str,
|
||||
labels: list[str],
|
||||
compilation_mode: str = "fastbuild",
|
||||
bazel_configs: list[str] | None = None,
|
||||
) -> list[Path]:
|
||||
outputs = bazel_output_files(platform, labels, compilation_mode, bazel_configs)
|
||||
outputs = bazel_output_files(platform, labels, compilation_mode)
|
||||
if all(path.exists() for path in outputs):
|
||||
return outputs
|
||||
|
||||
bazel_build(platform, labels, compilation_mode, bazel_configs)
|
||||
outputs = bazel_output_files(platform, labels, compilation_mode, bazel_configs)
|
||||
bazel_build(platform, labels, compilation_mode)
|
||||
outputs = bazel_output_files(platform, labels, compilation_mode)
|
||||
missing = [str(path) for path in outputs if not path.exists()]
|
||||
if missing:
|
||||
raise SystemExit(f"missing built outputs for {labels}: {missing}")
|
||||
@@ -162,24 +146,6 @@ def resolved_v8_crate_version() -> str:
|
||||
return matches[0]
|
||||
|
||||
|
||||
def rusty_v8_checksum_manifest_path(version: str) -> Path:
|
||||
return RUSTY_V8_CHECKSUMS_DIR / f"rusty_v8_{version.replace('.', '_')}.sha256"
|
||||
|
||||
|
||||
def command_version(version: str | None) -> str:
|
||||
if version is not None:
|
||||
return version
|
||||
return resolved_v8_crate_version()
|
||||
|
||||
|
||||
def command_manifest_path(manifest: Path | None, version: str) -> Path:
|
||||
if manifest is None:
|
||||
return rusty_v8_checksum_manifest_path(version)
|
||||
if manifest.is_absolute():
|
||||
return manifest
|
||||
return ROOT / manifest
|
||||
|
||||
|
||||
def staged_archive_name(target: str, source_path: Path) -> str:
|
||||
if source_path.suffix == ".lib":
|
||||
return f"rusty_v8_release_{target}.lib.gz"
|
||||
@@ -194,9 +160,8 @@ def single_bazel_output_file(
|
||||
platform: str,
|
||||
label: str,
|
||||
compilation_mode: str = "fastbuild",
|
||||
bazel_configs: list[str] | None = None,
|
||||
) -> Path:
|
||||
outputs = ensure_bazel_output_files(platform, [label], compilation_mode, bazel_configs)
|
||||
outputs = ensure_bazel_output_files(platform, [label], compilation_mode)
|
||||
if len(outputs) != 1:
|
||||
raise SystemExit(f"expected exactly one output for {label}, found {outputs}")
|
||||
return outputs[0]
|
||||
@@ -206,17 +171,11 @@ def merged_musl_archive(
|
||||
platform: str,
|
||||
lib_path: Path,
|
||||
compilation_mode: str = "fastbuild",
|
||||
bazel_configs: list[str] | None = None,
|
||||
) -> Path:
|
||||
llvm_ar = single_bazel_output_file(platform, LLVM_AR_LABEL, compilation_mode, bazel_configs)
|
||||
llvm_ranlib = single_bazel_output_file(
|
||||
platform,
|
||||
LLVM_RANLIB_LABEL,
|
||||
compilation_mode,
|
||||
bazel_configs,
|
||||
)
|
||||
llvm_ar = single_bazel_output_file(platform, LLVM_AR_LABEL, compilation_mode)
|
||||
llvm_ranlib = single_bazel_output_file(platform, LLVM_RANLIB_LABEL, compilation_mode)
|
||||
runtime_archives = [
|
||||
single_bazel_output_file(platform, label, compilation_mode, bazel_configs)
|
||||
single_bazel_output_file(platform, label, compilation_mode)
|
||||
for label in MUSL_RUNTIME_ARCHIVE_LABELS
|
||||
]
|
||||
|
||||
@@ -247,13 +206,11 @@ def stage_release_pair(
|
||||
target: str,
|
||||
output_dir: Path,
|
||||
compilation_mode: str = "fastbuild",
|
||||
bazel_configs: list[str] | None = None,
|
||||
) -> None:
|
||||
outputs = ensure_bazel_output_files(
|
||||
platform,
|
||||
[release_pair_label(target)],
|
||||
compilation_mode,
|
||||
bazel_configs,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -270,7 +227,7 @@ def stage_release_pair(
|
||||
staged_library = output_dir / staged_archive_name(target, lib_path)
|
||||
staged_binding = output_dir / f"src_binding_release_{target}.rs"
|
||||
source_archive = (
|
||||
merged_musl_archive(platform, lib_path, compilation_mode, bazel_configs)
|
||||
merged_musl_archive(platform, lib_path, compilation_mode)
|
||||
if is_musl_archive_target(target, lib_path)
|
||||
else lib_path
|
||||
)
|
||||
@@ -287,18 +244,8 @@ def stage_release_pair(
|
||||
|
||||
shutil.copyfile(binding_path, staged_binding)
|
||||
|
||||
staged_checksums = output_dir / f"rusty_v8_release_{target}.sha256"
|
||||
with staged_checksums.open("w", encoding="utf-8") as checksums:
|
||||
for path in [staged_library, staged_binding]:
|
||||
digest = hashlib.sha256()
|
||||
with path.open("rb") as artifact:
|
||||
for chunk in iter(lambda: artifact.read(1024 * 1024), b""):
|
||||
digest.update(chunk)
|
||||
checksums.write(f"{digest.hexdigest()} {path.name}\n")
|
||||
|
||||
print(staged_library)
|
||||
print(staged_binding)
|
||||
print(staged_checksums)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
@@ -309,12 +256,6 @@ def parse_args() -> argparse.Namespace:
|
||||
stage_release_pair_parser.add_argument("--platform", required=True)
|
||||
stage_release_pair_parser.add_argument("--target", required=True)
|
||||
stage_release_pair_parser.add_argument("--output-dir", required=True)
|
||||
stage_release_pair_parser.add_argument(
|
||||
"--bazel-config",
|
||||
action="append",
|
||||
default=[],
|
||||
dest="bazel_configs",
|
||||
)
|
||||
stage_release_pair_parser.add_argument(
|
||||
"--compilation-mode",
|
||||
default="fastbuild",
|
||||
@@ -323,24 +264,6 @@ def parse_args() -> argparse.Namespace:
|
||||
|
||||
subparsers.add_parser("resolved-v8-crate-version")
|
||||
|
||||
check_module_bazel_parser = subparsers.add_parser("check-module-bazel")
|
||||
check_module_bazel_parser.add_argument("--version")
|
||||
check_module_bazel_parser.add_argument("--manifest", type=Path)
|
||||
check_module_bazel_parser.add_argument(
|
||||
"--module-bazel",
|
||||
type=Path,
|
||||
default=MODULE_BAZEL,
|
||||
)
|
||||
|
||||
update_module_bazel_parser = subparsers.add_parser("update-module-bazel")
|
||||
update_module_bazel_parser.add_argument("--version")
|
||||
update_module_bazel_parser.add_argument("--manifest", type=Path)
|
||||
update_module_bazel_parser.add_argument(
|
||||
"--module-bazel",
|
||||
type=Path,
|
||||
default=MODULE_BAZEL,
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@@ -352,28 +275,11 @@ def main() -> int:
|
||||
target=args.target,
|
||||
output_dir=Path(args.output_dir),
|
||||
compilation_mode=args.compilation_mode,
|
||||
bazel_configs=args.bazel_configs,
|
||||
)
|
||||
return 0
|
||||
if args.command == "resolved-v8-crate-version":
|
||||
print(resolved_v8_crate_version())
|
||||
return 0
|
||||
if args.command == "check-module-bazel":
|
||||
version = command_version(args.version)
|
||||
manifest_path = command_manifest_path(args.manifest, version)
|
||||
try:
|
||||
check_module_bazel(args.module_bazel, manifest_path, version)
|
||||
except RustyV8ChecksumError as exc:
|
||||
raise SystemExit(str(exc)) from exc
|
||||
return 0
|
||||
if args.command == "update-module-bazel":
|
||||
version = command_version(args.version)
|
||||
manifest_path = command_manifest_path(args.manifest, version)
|
||||
try:
|
||||
update_module_bazel(args.module_bazel, manifest_path, version)
|
||||
except RustyV8ChecksumError as exc:
|
||||
raise SystemExit(str(exc)) from exc
|
||||
return 0
|
||||
raise SystemExit(f"unsupported command: {args.command}")
|
||||
|
||||
|
||||
|
||||
230
.github/scripts/rusty_v8_module_bazel.py
vendored
230
.github/scripts/rusty_v8_module_bazel.py
vendored
@@ -1,230 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
SHA256_RE = re.compile(r"[0-9a-f]{64}")
|
||||
HTTP_FILE_BLOCK_RE = re.compile(r"(?ms)^http_file\(\n.*?^\)\n?")
|
||||
|
||||
|
||||
class RustyV8ChecksumError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RustyV8HttpFile:
|
||||
start: int
|
||||
end: int
|
||||
block: str
|
||||
name: str
|
||||
downloaded_file_path: str
|
||||
sha256: str | None
|
||||
|
||||
|
||||
def parse_checksum_manifest(path: Path) -> dict[str, str]:
|
||||
try:
|
||||
lines = path.read_text(encoding="utf-8").splitlines()
|
||||
except FileNotFoundError as exc:
|
||||
raise RustyV8ChecksumError(f"missing checksum manifest: {path}") from exc
|
||||
|
||||
checksums: dict[str, str] = {}
|
||||
for line_number, line in enumerate(lines, 1):
|
||||
if not line.strip():
|
||||
continue
|
||||
parts = line.split()
|
||||
if len(parts) != 2:
|
||||
raise RustyV8ChecksumError(
|
||||
f"{path}:{line_number}: expected '<sha256> <filename>'"
|
||||
)
|
||||
checksum, filename = parts
|
||||
if not SHA256_RE.fullmatch(checksum):
|
||||
raise RustyV8ChecksumError(
|
||||
f"{path}:{line_number}: invalid SHA-256 digest for {filename}"
|
||||
)
|
||||
if not filename or filename in {".", ".."} or "/" in filename:
|
||||
raise RustyV8ChecksumError(
|
||||
f"{path}:{line_number}: expected a bare artifact filename"
|
||||
)
|
||||
if filename in checksums:
|
||||
raise RustyV8ChecksumError(
|
||||
f"{path}:{line_number}: duplicate checksum for {filename}"
|
||||
)
|
||||
checksums[filename] = checksum
|
||||
|
||||
if not checksums:
|
||||
raise RustyV8ChecksumError(f"empty checksum manifest: {path}")
|
||||
return checksums
|
||||
|
||||
|
||||
def string_field(block: str, field: str) -> str | None:
|
||||
# Matches one-line string fields inside http_file blocks, e.g. `sha256 = "...",`.
|
||||
match = re.search(rf'^\s*{re.escape(field)}\s*=\s*"([^"]+)",\s*$', block, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
return None
|
||||
|
||||
|
||||
def rusty_v8_http_files(module_bazel: str, version: str) -> list[RustyV8HttpFile]:
|
||||
version_slug = version.replace(".", "_")
|
||||
name_prefix = f"rusty_v8_{version_slug}_"
|
||||
entries = []
|
||||
for match in HTTP_FILE_BLOCK_RE.finditer(module_bazel):
|
||||
block = match.group(0)
|
||||
name = string_field(block, "name")
|
||||
if not name or not name.startswith(name_prefix):
|
||||
continue
|
||||
downloaded_file_path = string_field(block, "downloaded_file_path")
|
||||
if not downloaded_file_path:
|
||||
raise RustyV8ChecksumError(
|
||||
f"MODULE.bazel {name} is missing downloaded_file_path"
|
||||
)
|
||||
entries.append(
|
||||
RustyV8HttpFile(
|
||||
start=match.start(),
|
||||
end=match.end(),
|
||||
block=block,
|
||||
name=name,
|
||||
downloaded_file_path=downloaded_file_path,
|
||||
sha256=string_field(block, "sha256"),
|
||||
)
|
||||
)
|
||||
return entries
|
||||
|
||||
|
||||
def module_entry_set_errors(
|
||||
entries: list[RustyV8HttpFile],
|
||||
checksums: dict[str, str],
|
||||
version: str,
|
||||
) -> list[str]:
|
||||
errors = []
|
||||
if not entries:
|
||||
errors.append(f"MODULE.bazel has no rusty_v8 http_file entries for {version}")
|
||||
return errors
|
||||
|
||||
module_files: dict[str, RustyV8HttpFile] = {}
|
||||
duplicate_files = set()
|
||||
for entry in entries:
|
||||
if entry.downloaded_file_path in module_files:
|
||||
duplicate_files.add(entry.downloaded_file_path)
|
||||
module_files[entry.downloaded_file_path] = entry
|
||||
|
||||
for filename in sorted(duplicate_files):
|
||||
errors.append(f"MODULE.bazel has duplicate http_file entries for {filename}")
|
||||
|
||||
for filename in sorted(set(module_files) - set(checksums)):
|
||||
entry = module_files[filename]
|
||||
errors.append(f"MODULE.bazel {entry.name} has no checksum in the manifest")
|
||||
|
||||
for filename in sorted(set(checksums) - set(module_files)):
|
||||
errors.append(f"manifest has {filename}, but MODULE.bazel has no http_file")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def module_checksum_errors(
|
||||
entries: list[RustyV8HttpFile],
|
||||
checksums: dict[str, str],
|
||||
) -> list[str]:
|
||||
errors = []
|
||||
for entry in entries:
|
||||
expected = checksums.get(entry.downloaded_file_path)
|
||||
if expected is None:
|
||||
continue
|
||||
if entry.sha256 is None:
|
||||
errors.append(f"MODULE.bazel {entry.name} is missing sha256")
|
||||
elif entry.sha256 != expected:
|
||||
errors.append(
|
||||
f"MODULE.bazel {entry.name} has sha256 {entry.sha256}, "
|
||||
f"expected {expected}"
|
||||
)
|
||||
return errors
|
||||
|
||||
|
||||
def raise_checksum_errors(message: str, errors: list[str]) -> None:
|
||||
if errors:
|
||||
formatted_errors = "\n".join(f"- {error}" for error in errors)
|
||||
raise RustyV8ChecksumError(f"{message}:\n{formatted_errors}")
|
||||
|
||||
|
||||
def check_module_bazel_text(
|
||||
module_bazel: str,
|
||||
checksums: dict[str, str],
|
||||
version: str,
|
||||
) -> None:
|
||||
entries = rusty_v8_http_files(module_bazel, version)
|
||||
errors = [
|
||||
*module_entry_set_errors(entries, checksums, version),
|
||||
*module_checksum_errors(entries, checksums),
|
||||
]
|
||||
raise_checksum_errors("rusty_v8 MODULE.bazel checksum drift", errors)
|
||||
|
||||
|
||||
def block_with_sha256(block: str, checksum: str) -> str:
|
||||
sha256_line_re = re.compile(r'(?m)^(\s*)sha256\s*=\s*"[0-9a-f]+",\s*$')
|
||||
if sha256_line_re.search(block):
|
||||
return sha256_line_re.sub(
|
||||
lambda match: f'{match.group(1)}sha256 = "{checksum}",',
|
||||
block,
|
||||
count=1,
|
||||
)
|
||||
|
||||
downloaded_file_path_match = re.search(
|
||||
r'(?m)^(\s*)downloaded_file_path\s*=\s*"[^"]+",\n',
|
||||
block,
|
||||
)
|
||||
if not downloaded_file_path_match:
|
||||
raise RustyV8ChecksumError("http_file block is missing downloaded_file_path")
|
||||
insert_at = downloaded_file_path_match.end()
|
||||
indent = downloaded_file_path_match.group(1)
|
||||
return f'{block[:insert_at]}{indent}sha256 = "{checksum}",\n{block[insert_at:]}'
|
||||
|
||||
|
||||
def update_module_bazel_text(
|
||||
module_bazel: str,
|
||||
checksums: dict[str, str],
|
||||
version: str,
|
||||
) -> str:
|
||||
entries = rusty_v8_http_files(module_bazel, version)
|
||||
errors = module_entry_set_errors(entries, checksums, version)
|
||||
raise_checksum_errors("cannot update rusty_v8 MODULE.bazel checksums", errors)
|
||||
|
||||
updated = []
|
||||
previous_end = 0
|
||||
for entry in entries:
|
||||
updated.append(module_bazel[previous_end : entry.start])
|
||||
updated.append(
|
||||
block_with_sha256(entry.block, checksums[entry.downloaded_file_path])
|
||||
)
|
||||
previous_end = entry.end
|
||||
updated.append(module_bazel[previous_end:])
|
||||
return "".join(updated)
|
||||
|
||||
|
||||
def check_module_bazel(
|
||||
module_bazel_path: Path,
|
||||
manifest_path: Path,
|
||||
version: str,
|
||||
) -> None:
|
||||
checksums = parse_checksum_manifest(manifest_path)
|
||||
module_bazel = module_bazel_path.read_text(encoding="utf-8")
|
||||
check_module_bazel_text(module_bazel, checksums, version)
|
||||
print(f"{module_bazel_path} rusty_v8 {version} checksums match {manifest_path}")
|
||||
|
||||
|
||||
def update_module_bazel(
|
||||
module_bazel_path: Path,
|
||||
manifest_path: Path,
|
||||
version: str,
|
||||
) -> None:
|
||||
checksums = parse_checksum_manifest(manifest_path)
|
||||
module_bazel = module_bazel_path.read_text(encoding="utf-8")
|
||||
updated_module_bazel = update_module_bazel_text(module_bazel, checksums, version)
|
||||
if updated_module_bazel == module_bazel:
|
||||
print(f"{module_bazel_path} rusty_v8 {version} checksums are already current")
|
||||
return
|
||||
module_bazel_path.write_text(updated_module_bazel, encoding="utf-8")
|
||||
print(f"updated {module_bazel_path} rusty_v8 {version} checksums")
|
||||
126
.github/scripts/test_rusty_v8_bazel.py
vendored
126
.github/scripts/test_rusty_v8_bazel.py
vendored
@@ -1,126 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import textwrap
|
||||
import unittest
|
||||
|
||||
import rusty_v8_module_bazel
|
||||
|
||||
|
||||
class RustyV8BazelTest(unittest.TestCase):
|
||||
def test_update_module_bazel_replaces_and_inserts_sha256(self) -> None:
|
||||
module_bazel = textwrap.dedent(
|
||||
"""\
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
sha256 = "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
urls = [
|
||||
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
|
||||
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
|
||||
urls = [
|
||||
"https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs",
|
||||
],
|
||||
)
|
||||
|
||||
http_file(
|
||||
name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
urls = [
|
||||
"https://example.test/old.gz",
|
||||
],
|
||||
)
|
||||
"""
|
||||
)
|
||||
checksums = {
|
||||
"librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": (
|
||||
"1111111111111111111111111111111111111111111111111111111111111111"
|
||||
),
|
||||
"src_binding_release_x86_64-unknown-linux-musl.rs": (
|
||||
"2222222222222222222222222222222222222222222222222222222222222222"
|
||||
),
|
||||
}
|
||||
|
||||
updated = rusty_v8_module_bazel.update_module_bazel_text(
|
||||
module_bazel,
|
||||
checksums,
|
||||
"146.4.0",
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
sha256 = "1111111111111111111111111111111111111111111111111111111111111111",
|
||||
urls = [
|
||||
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
|
||||
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
|
||||
sha256 = "2222222222222222222222222222222222222222222222222222222222222222",
|
||||
urls = [
|
||||
"https://example.test/src_binding_release_x86_64-unknown-linux-musl.rs",
|
||||
],
|
||||
)
|
||||
|
||||
http_file(
|
||||
name = "rusty_v8_145_0_0_x86_64_unknown_linux_gnu_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
sha256 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
urls = [
|
||||
"https://example.test/old.gz",
|
||||
],
|
||||
)
|
||||
"""
|
||||
),
|
||||
updated,
|
||||
)
|
||||
rusty_v8_module_bazel.check_module_bazel_text(updated, checksums, "146.4.0")
|
||||
|
||||
def test_check_module_bazel_rejects_manifest_drift(self) -> None:
|
||||
module_bazel = textwrap.dedent(
|
||||
"""\
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
sha256 = "1111111111111111111111111111111111111111111111111111111111111111",
|
||||
urls = [
|
||||
"https://example.test/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
],
|
||||
)
|
||||
"""
|
||||
)
|
||||
checksums = {
|
||||
"librusty_v8_release_x86_64-unknown-linux-gnu.a.gz": (
|
||||
"1111111111111111111111111111111111111111111111111111111111111111"
|
||||
),
|
||||
"orphan.gz": (
|
||||
"2222222222222222222222222222222222222222222222222222222222222222"
|
||||
),
|
||||
}
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
rusty_v8_module_bazel.RustyV8ChecksumError,
|
||||
"manifest has orphan.gz",
|
||||
):
|
||||
rusty_v8_module_bazel.check_module_bazel_text(
|
||||
module_bazel,
|
||||
checksums,
|
||||
"146.4.0",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -25,10 +25,7 @@ TOP_LEVEL_NAME_EXCEPTIONS = {
|
||||
UTILITY_NAME_EXCEPTIONS = {
|
||||
"path-utils": "codex-utils-path",
|
||||
}
|
||||
MANIFEST_FEATURE_EXCEPTIONS = {
|
||||
"codex-rs/code-mode/Cargo.toml": {"sandbox": ("v8/v8_enable_sandbox",)},
|
||||
"codex-rs/v8-poc/Cargo.toml": {"sandbox": ("v8/v8_enable_sandbox",)},
|
||||
}
|
||||
MANIFEST_FEATURE_EXCEPTIONS = {}
|
||||
OPTIONAL_DEPENDENCY_EXCEPTIONS = set()
|
||||
INTERNAL_DEPENDENCY_FEATURE_EXCEPTIONS = {}
|
||||
|
||||
|
||||
89
.github/scripts/verify_tui_core_boundary.py
vendored
89
.github/scripts/verify_tui_core_boundary.py
vendored
@@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""Verify codex-tui does not depend on or import codex-core directly."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import sys
|
||||
import tomllib
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[2]
|
||||
TUI_ROOT = ROOT / "codex-rs" / "tui"
|
||||
TUI_MANIFEST = TUI_ROOT / "Cargo.toml"
|
||||
FORBIDDEN_PACKAGE = "codex-core"
|
||||
FORBIDDEN_SOURCE_PATTERNS = (
|
||||
re.compile(r"\bcodex_core::"),
|
||||
re.compile(r"\buse\s+codex_core\b"),
|
||||
re.compile(r"\bextern\s+crate\s+codex_core\b"),
|
||||
)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
failures = []
|
||||
failures.extend(manifest_failures())
|
||||
failures.extend(source_failures())
|
||||
|
||||
if not failures:
|
||||
return 0
|
||||
|
||||
print("codex-tui must not depend on or import codex-core directly.")
|
||||
print(
|
||||
"Use the app-server protocol/client boundary instead; temporary embedded "
|
||||
"startup gaps belong behind codex_app_server_client::legacy_core."
|
||||
)
|
||||
print()
|
||||
for failure in failures:
|
||||
print(f"- {failure}")
|
||||
|
||||
return 1
|
||||
|
||||
|
||||
def manifest_failures() -> list[str]:
|
||||
manifest = tomllib.loads(TUI_MANIFEST.read_text())
|
||||
failures = []
|
||||
for section_name, dependencies in dependency_sections(manifest):
|
||||
if FORBIDDEN_PACKAGE in dependencies:
|
||||
failures.append(
|
||||
f"{relative_path(TUI_MANIFEST)} declares `{FORBIDDEN_PACKAGE}` "
|
||||
f"in `[{section_name}]`"
|
||||
)
|
||||
return failures
|
||||
|
||||
|
||||
def dependency_sections(manifest: dict) -> list[tuple[str, dict]]:
|
||||
sections: list[tuple[str, dict]] = []
|
||||
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
|
||||
dependencies = manifest.get(section_name)
|
||||
if isinstance(dependencies, dict):
|
||||
sections.append((section_name, dependencies))
|
||||
|
||||
for target_name, target in manifest.get("target", {}).items():
|
||||
if not isinstance(target, dict):
|
||||
continue
|
||||
for section_name in ("dependencies", "dev-dependencies", "build-dependencies"):
|
||||
dependencies = target.get(section_name)
|
||||
if isinstance(dependencies, dict):
|
||||
sections.append((f'target.{target_name}.{section_name}', dependencies))
|
||||
|
||||
return sections
|
||||
|
||||
|
||||
def source_failures() -> list[str]:
|
||||
failures = []
|
||||
for path in sorted(TUI_ROOT.glob("**/*.rs")):
|
||||
text = path.read_text()
|
||||
for line_number, line in enumerate(text.splitlines(), start=1):
|
||||
if any(pattern.search(line) for pattern in FORBIDDEN_SOURCE_PATTERNS):
|
||||
failures.append(f"{relative_path(path)}:{line_number} imports `codex_core`")
|
||||
return failures
|
||||
|
||||
|
||||
def relative_path(path: Path) -> str:
|
||||
return str(path.relative_to(ROOT))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
18
.github/workflows/Dockerfile.bazel
vendored
18
.github/workflows/Dockerfile.bazel
vendored
@@ -8,9 +8,25 @@ FROM ubuntu:24.04
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
curl git python3 ca-certificates && \
|
||||
curl git python3 ca-certificates xz-utils && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY codex-rs/node-version.txt /tmp/node-version.txt
|
||||
|
||||
RUN set -eux; \
|
||||
node_arch="$(dpkg --print-architecture)"; \
|
||||
case "${node_arch}" in \
|
||||
amd64) node_dist_arch="x64" ;; \
|
||||
arm64) node_dist_arch="arm64" ;; \
|
||||
*) echo "unsupported architecture: ${node_arch}"; exit 1 ;; \
|
||||
esac; \
|
||||
node_version="$(tr -d '[:space:]' </tmp/node-version.txt)"; \
|
||||
curl -fsSLO "https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-${node_dist_arch}.tar.xz"; \
|
||||
tar -xJf "node-v${node_version}-linux-${node_dist_arch}.tar.xz" -C /usr/local --strip-components=1; \
|
||||
rm "node-v${node_version}-linux-${node_dist_arch}.tar.xz" /tmp/node-version.txt; \
|
||||
node --version; \
|
||||
npm --version
|
||||
|
||||
# Install dotslash.
|
||||
RUN curl -LSfs "https://github.com/facebook/dotslash/releases/download/v0.5.8/dotslash-ubuntu-22.04.$(uname -m).tar.gz" | tar fxz - -C /usr/local/bin
|
||||
|
||||
|
||||
314
.github/workflows/bazel.yml
vendored
314
.github/workflows/bazel.yml
vendored
@@ -17,10 +17,6 @@ concurrency:
|
||||
cancel-in-progress: ${{ github.ref_name != 'main' }}
|
||||
jobs:
|
||||
test:
|
||||
# PRs use a fast Windows cross-compiled test leg for pre-merge signal.
|
||||
# Post-merge pushes to main also run the native Windows test job below for
|
||||
# broader Windows signal without putting PR latency back on the critical
|
||||
# path. Cargo CI owns V8/code-mode test coverage for now.
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -44,39 +40,49 @@ jobs:
|
||||
# - os: ubuntu-24.04-arm
|
||||
# target: aarch64-unknown-linux-gnu
|
||||
|
||||
# Windows fast path: build the windows-gnullvm binaries with Linux
|
||||
# RBE, then run the resulting Windows tests on the Windows runner.
|
||||
# Cargo CI preserves V8/code-mode coverage while Bazel CI keeps broad
|
||||
# non-code-mode signal.
|
||||
# Windows
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
# Configure a human readable name for each job
|
||||
name: Bazel test on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
name: Local Bazel build on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Check rusty_v8 MODULE.bazel checksums
|
||||
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
|
||||
shell: bash
|
||||
run: |
|
||||
python3 .github/scripts/rusty_v8_bazel.py check-module-bazel
|
||||
python3 -m unittest discover -s .github/scripts -p test_rusty_v8_bazel.py
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
- name: Set up Bazel CI
|
||||
id: setup_bazel
|
||||
uses: ./.github/actions/setup-bazel-ci
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
install-test-prereqs: "true"
|
||||
|
||||
# Restore the Bazel repository cache explicitly so external dependencies
|
||||
# do not need to be re-downloaded on every CI run. Keep restore failures
|
||||
# non-fatal so transient cache-service errors degrade to a cold build
|
||||
# instead of failing the job.
|
||||
- name: Restore bazel repository cache
|
||||
id: cache_bazel_repository_restore
|
||||
continue-on-error: true
|
||||
uses: actions/cache/restore@v5
|
||||
with:
|
||||
path: ${{ steps.setup_bazel.outputs.repository-cache-path }}
|
||||
key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
|
||||
restore-keys: |
|
||||
bazel-cache-${{ matrix.target }}
|
||||
|
||||
- name: Check MODULE.bazel.lock is up to date
|
||||
if: matrix.os == 'ubuntu-24.04' && matrix.target == 'x86_64-unknown-linux-gnu'
|
||||
shell: bash
|
||||
run: ./scripts/check-module-bazel-lock.sh
|
||||
|
||||
- name: Set up Bazel execution logs
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p "${RUNNER_TEMP}/bazel-execution-logs"
|
||||
echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: bazel test //...
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
@@ -88,34 +94,23 @@ jobs:
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
# V8-backed code-mode tests are covered by Cargo CI. Bazel CI
|
||||
# cross-compiles in several legs, and those tests are not stable in
|
||||
# that setup yet.
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
|
||||
bazel_wrapper_args=(
|
||||
--print-failed-action-summary
|
||||
--print-failed-test-logs
|
||||
)
|
||||
bazel_test_args=(
|
||||
test
|
||||
--test_tag_filters=-argument-comment-lint
|
||||
--test_verbose_timeout_warnings
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--use-node-test-env
|
||||
)
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
bazel_wrapper_args+=(
|
||||
--windows-cross-compile
|
||||
--remote-download-toplevel
|
||||
)
|
||||
bazel_wrapper_args+=(--windows-msvc-host-platform)
|
||||
fi
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
"${bazel_wrapper_args[@]}" \
|
||||
-- \
|
||||
"${bazel_test_args[@]}" \
|
||||
test \
|
||||
--test_tag_filters=-argument-comment-lint \
|
||||
--test_verbose_timeout_warnings \
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
|
||||
-- \
|
||||
"${bazel_targets[@]}"
|
||||
|
||||
@@ -128,88 +123,15 @@ jobs:
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
# Save bazel repository cache explicitly; make non-fatal so cache uploading
|
||||
# never fails the overall job. Only save when key wasn't hit.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
if: always() && !cancelled() && steps.cache_bazel_repository_restore.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
test-windows-native-main:
|
||||
# Native Windows Bazel tests are slower and frequently approach the
|
||||
# 30-minute PR budget. Run this only for post-merge commits to main and give
|
||||
# it a larger timeout.
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 40
|
||||
runs-on: windows-latest
|
||||
name: Bazel test on windows-latest for x86_64-pc-windows-gnullvm (native main)
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
with:
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
install-test-prereqs: "true"
|
||||
|
||||
- name: bazel test //...
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
bazel_targets=(
|
||||
//...
|
||||
# Keep standalone V8 library targets out of the ordinary Bazel CI
|
||||
# path. V8 consumers under `//codex-rs/...` still participate
|
||||
# transitively through `//...`.
|
||||
-//third_party/v8:all
|
||||
# Keep this aligned with the main Bazel job. The native Windows
|
||||
# job preserves broad post-merge coverage, but code-mode/V8 tests
|
||||
# are covered by Cargo CI rather than Bazel for now.
|
||||
-//codex-rs/code-mode:code-mode-unit-tests
|
||||
-//codex-rs/v8-poc:v8-poc-unit-tests
|
||||
)
|
||||
|
||||
bazel_test_args=(
|
||||
test
|
||||
--test_tag_filters=-argument-comment-lint
|
||||
--test_verbose_timeout_warnings
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--build_metadata=TAG_windows_native_main=true
|
||||
)
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
--print-failed-action-summary \
|
||||
--print-failed-test-logs \
|
||||
-- \
|
||||
"${bazel_test_args[@]}" \
|
||||
-- \
|
||||
"${bazel_targets[@]}"
|
||||
|
||||
- name: Upload Bazel execution logs
|
||||
if: always() && !cancelled()
|
||||
continue-on-error: true
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: bazel-execution-logs-test-windows-native-x86_64-pc-windows-gnullvm
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
path: ${{ steps.setup_bazel.outputs.repository-cache-path }}
|
||||
key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
|
||||
|
||||
clippy:
|
||||
timeout-minutes: 30
|
||||
@@ -233,12 +155,31 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
- name: Set up Bazel CI
|
||||
id: setup_bazel
|
||||
uses: ./.github/actions/setup-bazel-ci
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
|
||||
# Restore the Bazel repository cache explicitly so external dependencies
|
||||
# do not need to be re-downloaded on every CI run. Keep restore failures
|
||||
# non-fatal so transient cache-service errors degrade to a cold build
|
||||
# instead of failing the job.
|
||||
- name: Restore bazel repository cache
|
||||
id: cache_bazel_repository_restore
|
||||
continue-on-error: true
|
||||
uses: actions/cache/restore@v5
|
||||
with:
|
||||
path: ${{ steps.setup_bazel.outputs.repository-cache-path }}
|
||||
key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
|
||||
restore-keys: |
|
||||
bazel-cache-${{ matrix.target }}
|
||||
|
||||
- name: Set up Bazel execution logs
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p "${RUNNER_TEMP}/bazel-execution-logs"
|
||||
echo "CODEX_BAZEL_EXECUTION_LOG_COMPACT_DIR=${RUNNER_TEMP}/bazel-execution-logs" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: bazel build --config=clippy lint targets
|
||||
env:
|
||||
@@ -250,33 +191,20 @@ jobs:
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--build_metadata=TAG_job=clippy
|
||||
)
|
||||
bazel_wrapper_args=()
|
||||
bazel_target_list_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# Keep this aligned with the fast Windows Bazel test job: use
|
||||
# Linux RBE for clippy build actions while targeting Windows
|
||||
# gnullvm. Fork/community PRs without the BuildBuddy secret fall
|
||||
# back inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
bazel_target_list_args+=(--windows-cross-compile)
|
||||
if [[ -z "${BUILDBUDDY_API_KEY:-}" ]]; then
|
||||
# The fork fallback can see incompatible explicit Windows-cross
|
||||
# internal test binaries in the generated target list. Preserve
|
||||
# the old local-fallback behavior there.
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
# Some explicit targets pulled in through //codex-rs/... are
|
||||
# intentionally incompatible with `//:local_windows`, but the lint
|
||||
# aspect still traverses their compatible Rust deps.
|
||||
bazel_clippy_args+=(--skip_incompatible_explicit_targets)
|
||||
fi
|
||||
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh "${bazel_target_list_args[@]}")"
|
||||
bazel_target_lines="$(./scripts/list-bazel-clippy-targets.sh)"
|
||||
bazel_targets=()
|
||||
while IFS= read -r target; do
|
||||
bazel_targets+=("${target}")
|
||||
done <<< "${bazel_target_lines}"
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
--print-failed-action-summary \
|
||||
"${bazel_wrapper_args[@]}" \
|
||||
-- \
|
||||
build \
|
||||
"${bazel_clippy_args[@]}" \
|
||||
@@ -292,116 +220,12 @@ jobs:
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
# Save bazel repository cache explicitly; make non-fatal so cache uploading
|
||||
# never fails the overall job. Only save when key wasn't hit.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
|
||||
verify-release-build:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- os: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-gnullvm
|
||||
runs-on: ${{ matrix.os }}
|
||||
name: Verify release build on ${{ matrix.os }} for ${{ matrix.target }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Prepare Bazel CI
|
||||
id: prepare_bazel
|
||||
uses: ./.github/actions/prepare-bazel-ci
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
cache-scope: bazel-${{ github.job }}
|
||||
|
||||
- name: bazel build verify-release-build targets
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
# This job exists to compile Rust code behind
|
||||
# `cfg(not(debug_assertions))` so PR CI catches failures that would
|
||||
# otherwise show up only in a release build. We do not need the full
|
||||
# optimizer and debug-info work that normally comes with a release
|
||||
# build to get that signal, so keep Bazel in `fastbuild` and disable
|
||||
# Rust debug assertions explicitly.
|
||||
bazel_wrapper_args=()
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
# This is build-only signal, so use the same Linux-RBE
|
||||
# cross-compile path as the fast Windows test and clippy jobs.
|
||||
# Fork/community PRs without the BuildBuddy secret fall back
|
||||
# inside `run-bazel-ci.sh` to the previous local Windows MSVC
|
||||
# host-platform shape.
|
||||
bazel_wrapper_args+=(--windows-cross-compile)
|
||||
fi
|
||||
|
||||
bazel_build_args=(
|
||||
--compilation_mode=fastbuild
|
||||
--@rules_rust//rust/settings:extra_rustc_flag=-Cdebug-assertions=no
|
||||
--@rules_rust//rust/settings:extra_exec_rustc_flag=-Cdebug-assertions=no
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
--build_metadata=TAG_job=verify-release-build
|
||||
--build_metadata=TAG_rust_debug_assertions=off
|
||||
)
|
||||
|
||||
bazel_target_lines="$(bash ./scripts/list-bazel-release-targets.sh)"
|
||||
bazel_targets=()
|
||||
while IFS= read -r target; do
|
||||
bazel_targets+=("${target}")
|
||||
done <<< "${bazel_target_lines}"
|
||||
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
"${bazel_wrapper_args[@]}" \
|
||||
-- \
|
||||
build \
|
||||
"${bazel_build_args[@]}" \
|
||||
-- \
|
||||
"${bazel_targets[@]}"
|
||||
|
||||
- name: Verify Bazel builds bwrap
|
||||
if: runner.os == 'Linux'
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
--remote-download-toplevel \
|
||||
--print-failed-action-summary \
|
||||
-- \
|
||||
build \
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
|
||||
--build_metadata=TAG_job=verify-bwrap \
|
||||
-- \
|
||||
//codex-rs/bwrap:bwrap
|
||||
|
||||
- name: Upload Bazel execution logs
|
||||
if: always() && !cancelled()
|
||||
continue-on-error: true
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: bazel-execution-logs-verify-release-build-${{ matrix.target }}
|
||||
path: ${{ runner.temp }}/bazel-execution-logs
|
||||
if-no-files-found: ignore
|
||||
|
||||
# Save the job-scoped Bazel repository cache after cache misses. Keep the
|
||||
# upload non-fatal so cache service issues never fail the job itself.
|
||||
- name: Save bazel repository cache
|
||||
if: always() && !cancelled() && steps.prepare_bazel.outputs.repository-cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5
|
||||
with:
|
||||
path: ${{ steps.prepare_bazel.outputs.repository-cache-path }}
|
||||
key: ${{ steps.prepare_bazel.outputs.repository-cache-key }}
|
||||
path: ${{ steps.setup_bazel.outputs.repository-cache-path }}
|
||||
key: bazel-cache-${{ matrix.target }}-${{ hashFiles('MODULE.bazel', 'codex-rs/Cargo.lock', 'codex-rs/Cargo.toml') }}
|
||||
|
||||
4
.github/workflows/cargo-deny.yml
vendored
4
.github/workflows/cargo-deny.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
|
||||
|
||||
- name: Run cargo-deny
|
||||
uses: EmbarkStudios/cargo-deny-action@82eb9f621fbc699dd0918f3ea06864c14cc84246 # v2
|
||||
with:
|
||||
rust-version: 1.93.0
|
||||
rust-version: stable
|
||||
manifest-path: ./codex-rs/Cargo.toml
|
||||
|
||||
14
.github/workflows/ci.yml
vendored
14
.github/workflows/ci.yml
vendored
@@ -17,9 +17,6 @@ jobs:
|
||||
- name: Verify codex-rs Cargo manifests inherit workspace settings
|
||||
run: python3 .github/scripts/verify_cargo_workspace_manifests.py
|
||||
|
||||
- name: Verify codex-tui does not import codex-core directly
|
||||
run: python3 .github/scripts/verify_tui_core_boundary.py
|
||||
|
||||
- name: Verify Bazel clippy flags match Cargo workspace lints
|
||||
run: python3 .github/scripts/verify_bazel_clippy_lints.py
|
||||
|
||||
@@ -45,19 +42,12 @@ jobs:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Use a recent successful rust-release run that published the full
|
||||
# cross-platform native payload required by the npm package layout.
|
||||
# Passing the workflow URL directly avoids relying on old rust-v*
|
||||
# branches remaining discoverable via `gh run list --branch ...`.
|
||||
CODEX_VERSION=0.125.0
|
||||
WORKFLOW_URL="https://github.com/openai/codex/actions/runs/24901475298"
|
||||
# Use a rust-release version that includes all native binaries.
|
||||
CODEX_VERSION=0.115.0
|
||||
OUTPUT_DIR="${RUNNER_TEMP}"
|
||||
# This reused workflow predates the standalone bwrap artifact.
|
||||
python3 ./scripts/stage_npm_packages.py \
|
||||
--release-version "$CODEX_VERSION" \
|
||||
--workflow-url "$WORKFLOW_URL" \
|
||||
--package codex \
|
||||
--allow-missing-native-component bwrap \
|
||||
--output-dir "$OUTPUT_DIR"
|
||||
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
|
||||
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"
|
||||
|
||||
4
.github/workflows/issue-deduplicator.yml
vendored
4
.github/workflows/issue-deduplicator.yml
vendored
@@ -61,7 +61,7 @@ jobs:
|
||||
# .github/prompts/issue-deduplicator.txt file is obsolete and removed.
|
||||
- id: codex-all
|
||||
name: Find duplicates (pass 1, all issues)
|
||||
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
|
||||
uses: openai/codex-action@0b91f4a2703c23df3102c3f0967d3c6db34eedef # v1
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
@@ -195,7 +195,7 @@ jobs:
|
||||
|
||||
- id: codex-open
|
||||
name: Find duplicates (pass 2, open issues)
|
||||
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
|
||||
uses: openai/codex-action@0b91f4a2703c23df3102c3f0967d3c6db34eedef # v1
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
|
||||
18
.github/workflows/issue-labeler.yml
vendored
18
.github/workflows/issue-labeler.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- id: codex
|
||||
uses: openai/codex-action@5c3f4ccdb2b8790f73d6b21751ac00e602aa0c02 # v1.7
|
||||
uses: openai/codex-action@0b91f4a2703c23df3102c3f0967d3c6db34eedef # v1
|
||||
with:
|
||||
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
|
||||
allow-users: "*"
|
||||
@@ -44,7 +44,6 @@ jobs:
|
||||
6. iOS — Issues with the Codex iOS app.
|
||||
|
||||
- Additionally add zero or more of the following labels that are relevant to the issue content. Prefer a small set of precise labels over many broad ones.
|
||||
- For agent-area issues, prefer the most specific applicable label. Use "agent" only as a fallback for agent-related issues that do not fit a more specific agent-area label. Prefer "app-server" over "session" or "config" when the issue is about app-server protocol, API, RPC, schema, launch, or bridge behavior. Use "memory" for agentic memory storage/retrieval and "performance" for high process memory utilization or memory leaks.
|
||||
1. windows-os — Bugs or friction specific to Windows environments (always when PowerShell is mentioned, path handling, copy/paste, OS-specific auth or tooling failures).
|
||||
2. mcp — Topics involving Model Context Protocol servers/clients.
|
||||
3. mcp-server — Problems related to the codex mcp-server command, where codex runs as an MCP server.
|
||||
@@ -62,21 +61,6 @@ jobs:
|
||||
15. sandbox - Issues related to local sandbox environments or tool call approvals to override sandbox restrictions.
|
||||
16. tool-calls - Problems related to specific tool call invocations including unexpected errors, failures, or hangs.
|
||||
17. TUI - Problems with the terminal user interface (TUI) including keyboard shortcuts, copy & pasting, menus, or screen update issues.
|
||||
18. app-server - Issues involving the app-server protocol or interfaces, including SDK/API payloads, thread/* and turn/* RPCs, app-server launch behavior, external app/controller bridges, and app-server protocol/schema behavior.
|
||||
19. connectivity - Network connectivity or endpoint issues, including reconnecting messages, stream dropped/disconnected errors, websocket/SSE/transport failures, timeout/network/VPN/proxy/API endpoint failures, and related retry behavior.
|
||||
20. subagent - Issues involving subagents, sub-agents, or multi-agent behavior, including spawn_agent, wait_agent, close_agent, worker/explorer roles, delegation, agent teams, lifecycle, model/config inheritance, quotas, and orchestration.
|
||||
21. session - Issues involving session or thread management, including resume, fork, archive, rename/title, thread history, rollout persistence, compaction, checkpoints, retention, and cross-session state.
|
||||
22. config - Issues involving config.toml, config keys, config key merging, config updates, profiles, hooks config, project config, agent role TOMLs, instruction/personality config, and config schema behavior.
|
||||
23. plan - Issues involving plan mode, planning workflows, or plan-specific tools/behavior.
|
||||
24. computer-use - Issues involving agentic computer use or SkyComputerUseService.
|
||||
25. browser - Issues involving agentic browser use, IAB, or the built-in browser within the Codex app.
|
||||
26. memory - Issues involving agentic memory storage and retrieval.
|
||||
27. imagen - Issues involving image generation.
|
||||
28. remote - Issues involving remote access, remote control, or SSH.
|
||||
29. performance - Issues involving slow, laggy performance, high memory utilization, or memory leaks.
|
||||
30. automations - Issues involving scheduled automation tasks or heartbeats.
|
||||
31. pets - Issues involving pets avatars and animations.
|
||||
32. agent - Fallback only for core agent loop or agent-related issues that do not fit app-server, connectivity, subagent, session, config, plan, computer-use, browser, memory, imagen, remote, performance, automations, or pets.
|
||||
|
||||
Issue number: ${{ github.event.issue.number }}
|
||||
|
||||
|
||||
38
.github/workflows/rust-ci-full.yml
vendored
38
.github/workflows/rust-ci-full.yml
vendored
@@ -43,9 +43,6 @@ jobs:
|
||||
argument_comment_lint_package:
|
||||
name: Argument comment lint package
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
CARGO_DYLINT_VERSION: 5.0.0
|
||||
DYLINT_LINK_VERSION: 5.0.0
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
@@ -62,13 +59,10 @@ jobs:
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
|
||||
key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
|
||||
- name: Install cargo-dylint tooling
|
||||
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION"
|
||||
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
|
||||
run: cargo install --locked cargo-dylint dylint-link
|
||||
- name: Check Python wrapper syntax
|
||||
run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py
|
||||
- name: Test Python wrapper helpers
|
||||
@@ -76,8 +70,6 @@ jobs:
|
||||
- name: Test argument comment lint package
|
||||
working-directory: tools/argument-comment-lint
|
||||
run: cargo test
|
||||
env:
|
||||
RUST_MIN_STACK: "8388608" # 8 MiB
|
||||
|
||||
argument_comment_lint_prebuilt:
|
||||
name: Argument comment lint - ${{ matrix.name }}
|
||||
@@ -423,10 +415,22 @@ jobs:
|
||||
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
|
||||
name: Configure musl rusty_v8 artifact overrides and verify checksums
|
||||
uses: ./.github/actions/setup-rusty-v8-musl
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
name: Configure musl rusty_v8 artifact overrides
|
||||
env:
|
||||
TARGET: ${{ matrix.target }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
|
||||
release_tag="rusty-v8-v${version}"
|
||||
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
|
||||
archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz"
|
||||
binding_dir="${RUNNER_TEMP}/rusty_v8"
|
||||
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
|
||||
mkdir -p "${binding_dir}"
|
||||
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
|
||||
echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV"
|
||||
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Install cargo-chef
|
||||
if: ${{ matrix.profile == 'release' }}
|
||||
@@ -560,6 +564,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- name: Set up Node.js for js_repl tests
|
||||
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6
|
||||
with:
|
||||
node-version-file: codex-rs/node-version.txt
|
||||
- name: Install Linux build dependencies
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
@@ -662,14 +670,12 @@ jobs:
|
||||
export CODEX_TEST_REMOTE_ENV_CONTAINER_NAME=codex-remote-test-env
|
||||
source "${GITHUB_WORKSPACE}/scripts/test-remote-env.sh"
|
||||
echo "CODEX_TEST_REMOTE_ENV=${CODEX_TEST_REMOTE_ENV}" >> "$GITHUB_ENV"
|
||||
echo "CODEX_TEST_REMOTE_EXEC_SERVER_URL=${CODEX_TEST_REMOTE_EXEC_SERVER_URL}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: tests
|
||||
id: test
|
||||
run: cargo nextest run --no-fail-fast --target ${{ matrix.target }} --cargo-profile ci-test --timings
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
RUST_MIN_STACK: "8388608" # 8 MiB
|
||||
NEXTEST_STATUS_LEVEL: leak
|
||||
|
||||
- name: Upload Cargo timings (nextest)
|
||||
|
||||
67
.github/workflows/rust-ci.yml
vendored
67
.github/workflows/rust-ci.yml
vendored
@@ -41,7 +41,6 @@ jobs:
|
||||
for f in "${files[@]}"; do
|
||||
[[ $f == codex-rs/* ]] && codex=true
|
||||
[[ $f == codex-rs/* || $f == tools/argument-comment-lint/* || $f == justfile ]] && argument_comment_lint=true
|
||||
[[ $f == defs.bzl || $f == workspace_root_test_launcher.sh.tpl || $f == workspace_root_test_launcher.bat.tpl ]] && argument_comment_lint=true
|
||||
[[ $f == tools/argument-comment-lint/* || $f == .github/workflows/rust-ci.yml || $f == .github/workflows/rust-ci-full.yml ]] && argument_comment_lint_package=true
|
||||
[[ $f == .github/* ]] && workflows=true
|
||||
done
|
||||
@@ -91,9 +90,6 @@ jobs:
|
||||
runs-on: ubuntu-24.04
|
||||
needs: changed
|
||||
if: ${{ needs.changed.outputs.argument_comment_lint_package == 'true' }}
|
||||
env:
|
||||
CARGO_DYLINT_VERSION: 5.0.0
|
||||
DYLINT_LINK_VERSION: 5.0.0
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
@@ -117,13 +113,10 @@ jobs:
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
key: argument-comment-lint-${{ runner.os }}-${{ env.CARGO_DYLINT_VERSION }}-${{ env.DYLINT_LINK_VERSION }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
|
||||
key: argument-comment-lint-${{ runner.os }}-${{ hashFiles('tools/argument-comment-lint/Cargo.lock', 'tools/argument-comment-lint/rust-toolchain', '.github/workflows/rust-ci.yml', '.github/workflows/rust-ci-full.yml') }}
|
||||
- name: Install cargo-dylint tooling
|
||||
if: ${{ steps.cargo_dylint_cache.outputs.cache-hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION"
|
||||
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
|
||||
run: cargo install --locked cargo-dylint dylint-link
|
||||
- name: Check Python wrapper syntax
|
||||
run: python3 -m py_compile tools/argument-comment-lint/wrapper_common.py tools/argument-comment-lint/run.py tools/argument-comment-lint/run-prebuilt-linter.py tools/argument-comment-lint/test_wrapper_common.py
|
||||
- name: Test Python wrapper helpers
|
||||
@@ -131,14 +124,13 @@ jobs:
|
||||
- name: Test argument comment lint package
|
||||
working-directory: tools/argument-comment-lint
|
||||
run: cargo test
|
||||
env:
|
||||
RUST_MIN_STACK: "8388608" # 8 MiB
|
||||
|
||||
argument_comment_lint_prebuilt:
|
||||
name: Argument comment lint - ${{ matrix.name }}
|
||||
runs-on: ${{ matrix.runs_on || matrix.runner }}
|
||||
timeout-minutes: ${{ matrix.timeout_minutes }}
|
||||
needs: changed
|
||||
if: ${{ needs.changed.outputs.argument_comment_lint == 'true' || needs.changed.outputs.workflows == 'true' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -156,28 +148,43 @@ jobs:
|
||||
group: codex-runners
|
||||
labels: codex-windows-x64
|
||||
steps:
|
||||
- name: Check whether argument comment lint should run
|
||||
id: argument_comment_lint_gate
|
||||
shell: bash
|
||||
env:
|
||||
ARGUMENT_COMMENT_LINT: ${{ needs.changed.outputs.argument_comment_lint }}
|
||||
WORKFLOWS: ${{ needs.changed.outputs.workflows }}
|
||||
run: |
|
||||
if [[ "$ARGUMENT_COMMENT_LINT" == "true" || "$WORKFLOWS" == "true" ]]; then
|
||||
echo "run=true" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "No argument-comment-lint relevant changes."
|
||||
echo "run=false" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
if: ${{ steps.argument_comment_lint_gate.outputs.run == 'true' }}
|
||||
- name: Run argument comment lint on codex-rs via Bazel
|
||||
if: ${{ steps.argument_comment_lint_gate.outputs.run == 'true' }}
|
||||
uses: ./.github/actions/run-argument-comment-lint
|
||||
- uses: ./.github/actions/setup-bazel-ci
|
||||
with:
|
||||
target: ${{ runner.os }}
|
||||
buildbuddy-api-key: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
install-test-prereqs: true
|
||||
- name: Install Linux sandbox build dependencies
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get update
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
|
||||
- name: Run argument comment lint on codex-rs via Bazel
|
||||
if: ${{ runner.os != 'Windows' }}
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
bazel_targets="$(./tools/argument-comment-lint/list-bazel-targets.sh)"
|
||||
./.github/scripts/run-bazel-ci.sh \
|
||||
-- \
|
||||
build \
|
||||
--config=argument-comment-lint \
|
||||
--keep_going \
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA} \
|
||||
-- \
|
||||
${bazel_targets}
|
||||
- name: Run argument comment lint on codex-rs via Bazel
|
||||
if: ${{ runner.os == 'Windows' }}
|
||||
env:
|
||||
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/run-argument-comment-lint-bazel.sh \
|
||||
--config=argument-comment-lint \
|
||||
--platforms=//:local_windows \
|
||||
--keep_going \
|
||||
--build_metadata=COMMIT_SHA=${GITHUB_SHA}
|
||||
|
||||
# --- Gatherer job that you mark as the ONLY required status -----------------
|
||||
results:
|
||||
|
||||
@@ -19,9 +19,6 @@ jobs:
|
||||
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runs_on || matrix.runner }}
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
CARGO_DYLINT_VERSION: 5.0.0
|
||||
DYLINT_LINK_VERSION: 5.0.0
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -68,8 +65,8 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
install_root="${RUNNER_TEMP}/argument-comment-lint-tools"
|
||||
cargo install --locked cargo-dylint --version "$CARGO_DYLINT_VERSION" --root "$install_root"
|
||||
cargo install --locked dylint-link --version "$DYLINT_LINK_VERSION"
|
||||
cargo install --locked cargo-dylint --root "$install_root"
|
||||
cargo install --locked dylint-link
|
||||
echo "INSTALL_ROOT=$install_root" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Cargo build
|
||||
|
||||
2
.github/workflows/rust-release-prepare.yml
vendored
2
.github/workflows/rust-release-prepare.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
)
|
||||
|
||||
url="${base_url%/}/models?client_version=${client_version}"
|
||||
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/models-manager/models.json
|
||||
curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json
|
||||
|
||||
- name: Open pull request (if changed)
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
|
||||
|
||||
68
.github/workflows/rust-release-windows.yml
vendored
68
.github/workflows/rust-release-windows.yml
vendored
@@ -24,9 +24,7 @@ jobs:
|
||||
build-windows-binaries:
|
||||
name: Build Windows binaries - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
|
||||
runs-on: ${{ matrix.runs_on }}
|
||||
# Windows release builds can exceed an hour on fat-LTO mainline releases,
|
||||
# so keep the timeout aligned with the top-level release build headroom.
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
contents: read
|
||||
defaults:
|
||||
@@ -42,42 +40,28 @@ jobs:
|
||||
- runner: windows-x64
|
||||
target: x86_64-pc-windows-msvc
|
||||
bundle: primary
|
||||
binaries: "codex codex-responses-api-proxy"
|
||||
build_args: --bin codex --bin codex-responses-api-proxy
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-windows-x64
|
||||
- runner: windows-arm64
|
||||
target: aarch64-pc-windows-msvc
|
||||
bundle: primary
|
||||
binaries: "codex codex-responses-api-proxy"
|
||||
build_args: --bin codex --bin codex-responses-api-proxy
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-windows-arm64
|
||||
- runner: windows-x64
|
||||
target: x86_64-pc-windows-msvc
|
||||
bundle: helpers
|
||||
binaries: "codex-windows-sandbox-setup codex-command-runner"
|
||||
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-windows-x64
|
||||
- runner: windows-arm64
|
||||
target: aarch64-pc-windows-msvc
|
||||
bundle: helpers
|
||||
binaries: "codex-windows-sandbox-setup codex-command-runner"
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-windows-arm64
|
||||
- runner: windows-x64
|
||||
target: x86_64-pc-windows-msvc
|
||||
bundle: app-server
|
||||
binaries: "codex-app-server"
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-windows-x64
|
||||
- runner: windows-arm64
|
||||
target: aarch64-pc-windows-msvc
|
||||
bundle: app-server
|
||||
binaries: "codex-app-server"
|
||||
build_args: --bin codex-windows-sandbox-setup --bin codex-command-runner
|
||||
runs_on:
|
||||
group: codex-runners
|
||||
labels: codex-windows-arm64
|
||||
@@ -105,11 +89,7 @@ jobs:
|
||||
- name: Cargo build (Windows binaries)
|
||||
shell: bash
|
||||
run: |
|
||||
build_args=()
|
||||
for binary in ${{ matrix.binaries }}; do
|
||||
build_args+=(--bin "$binary")
|
||||
done
|
||||
cargo build --target ${{ matrix.target }} --release --timings "${build_args[@]}"
|
||||
cargo build --target ${{ matrix.target }} --release --timings ${{ matrix.build_args }}
|
||||
|
||||
- name: Upload Cargo timings
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
@@ -123,9 +103,13 @@ jobs:
|
||||
run: |
|
||||
output_dir="target/${{ matrix.target }}/release/staged-${{ matrix.bundle }}"
|
||||
mkdir -p "$output_dir"
|
||||
for binary in ${{ matrix.binaries }}; do
|
||||
cp "target/${{ matrix.target }}/release/${binary}.exe" "$output_dir/${binary}.exe"
|
||||
done
|
||||
if [[ "${{ matrix.bundle }}" == "primary" ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex.exe "$output_dir/codex.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$output_dir/codex-responses-api-proxy.exe"
|
||||
else
|
||||
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$output_dir/codex-windows-sandbox-setup.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$output_dir/codex-command-runner.exe"
|
||||
fi
|
||||
|
||||
- name: Upload Windows binaries
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
@@ -139,15 +123,13 @@ jobs:
|
||||
- build-windows-binaries
|
||||
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runs_on }}
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: codex-rs
|
||||
env:
|
||||
WINDOWS_BINARIES: "codex codex-responses-api-proxy codex-windows-sandbox-setup codex-command-runner codex-app-server"
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -179,25 +161,19 @@ jobs:
|
||||
name: windows-binaries-${{ matrix.target }}-helpers
|
||||
path: codex-rs/target/${{ matrix.target }}/release
|
||||
|
||||
- name: Download prebuilt Windows app-server binary
|
||||
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8
|
||||
with:
|
||||
name: windows-binaries-${{ matrix.target }}-app-server
|
||||
path: codex-rs/target/${{ matrix.target }}/release
|
||||
|
||||
- name: Verify binaries
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for binary in ${WINDOWS_BINARIES}; do
|
||||
ls -lh "target/${{ matrix.target }}/release/${binary}.exe"
|
||||
done
|
||||
ls -lh target/${{ matrix.target }}/release/codex.exe
|
||||
ls -lh target/${{ matrix.target }}/release/codex-responses-api-proxy.exe
|
||||
ls -lh target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe
|
||||
ls -lh target/${{ matrix.target }}/release/codex-command-runner.exe
|
||||
|
||||
- name: Sign Windows binaries with Azure Trusted Signing
|
||||
uses: ./.github/actions/windows-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
binaries: ${{ env.WINDOWS_BINARIES }}
|
||||
client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
|
||||
tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZURE_TRUSTED_SIGNING_SUBSCRIPTION_ID }}
|
||||
@@ -211,10 +187,10 @@ jobs:
|
||||
dest="dist/${{ matrix.target }}"
|
||||
mkdir -p "$dest"
|
||||
|
||||
for binary in ${WINDOWS_BINARIES}; do
|
||||
cp "target/${{ matrix.target }}/release/${binary}.exe" \
|
||||
"$dest/${binary}-${{ matrix.target }}.exe"
|
||||
done
|
||||
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.exe "$dest/codex-responses-api-proxy-${{ matrix.target }}.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-windows-sandbox-setup.exe "$dest/codex-windows-sandbox-setup-${{ matrix.target }}.exe"
|
||||
cp target/${{ matrix.target }}/release/codex-command-runner.exe "$dest/codex-command-runner-${{ matrix.target }}.exe"
|
||||
|
||||
- name: Install DotSlash
|
||||
uses: facebook/install-dotslash@1e4e7b3e07eaca387acb98f1d4720e0bee8dbb6a # v2
|
||||
|
||||
204
.github/workflows/rust-release.yml
vendored
204
.github/workflows/rust-release.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- uses: dtolnay/rust-toolchain@c2b55edffaf41a251c410bb32bed22afefa800f1 # 1.92
|
||||
- name: Validate tag matches Cargo.toml version
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -47,11 +47,9 @@ jobs:
|
||||
|
||||
build:
|
||||
needs: tag-check
|
||||
name: Build - ${{ matrix.runner }} - ${{ matrix.target }} - ${{ matrix.bundle }}
|
||||
name: Build - ${{ matrix.runner }} - ${{ matrix.target }}
|
||||
runs-on: ${{ matrix.runs_on || matrix.runner }}
|
||||
# Release builds can take a long time, so leave some headroom to avoid
|
||||
# having to restart the full workflow due to a timeout.
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
@@ -69,53 +67,16 @@ jobs:
|
||||
include:
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
bundle: primary
|
||||
artifact_name: aarch64-apple-darwin
|
||||
binaries: "codex codex-responses-api-proxy"
|
||||
build_dmg: "true"
|
||||
- runner: macos-15-xlarge
|
||||
target: aarch64-apple-darwin
|
||||
bundle: app-server
|
||||
artifact_name: aarch64-apple-darwin-app-server
|
||||
binaries: "codex-app-server"
|
||||
build_dmg: "false"
|
||||
- runner: macos-15-xlarge
|
||||
target: x86_64-apple-darwin
|
||||
bundle: primary
|
||||
artifact_name: x86_64-apple-darwin
|
||||
binaries: "codex codex-responses-api-proxy"
|
||||
build_dmg: "true"
|
||||
- runner: macos-15-xlarge
|
||||
target: x86_64-apple-darwin
|
||||
bundle: app-server
|
||||
artifact_name: x86_64-apple-darwin-app-server
|
||||
binaries: "codex-app-server"
|
||||
build_dmg: "false"
|
||||
# Release artifacts intentionally ship MUSL-linked Linux binaries.
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
bundle: primary
|
||||
artifact_name: x86_64-unknown-linux-musl
|
||||
binaries: "codex codex-responses-api-proxy bwrap"
|
||||
build_dmg: "false"
|
||||
- runner: ubuntu-24.04
|
||||
target: x86_64-unknown-linux-musl
|
||||
bundle: app-server
|
||||
artifact_name: x86_64-unknown-linux-musl-app-server
|
||||
binaries: "codex-app-server"
|
||||
build_dmg: "false"
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
bundle: primary
|
||||
artifact_name: aarch64-unknown-linux-musl
|
||||
binaries: "codex codex-responses-api-proxy bwrap"
|
||||
build_dmg: "false"
|
||||
- runner: ubuntu-24.04-arm
|
||||
target: aarch64-unknown-linux-musl
|
||||
bundle: app-server
|
||||
artifact_name: aarch64-unknown-linux-musl-app-server
|
||||
binaries: "codex-app-server"
|
||||
build_dmg: "false"
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
@@ -250,43 +211,33 @@ jobs:
|
||||
echo "CXXFLAGS=${cxxflags}" >> "$GITHUB_ENV"
|
||||
|
||||
- if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
|
||||
name: Configure musl rusty_v8 artifact overrides and verify checksums
|
||||
uses: ./.github/actions/setup-rusty-v8-musl
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
- if: ${{ contains(matrix.target, 'linux') && matrix.bundle == 'primary' }}
|
||||
name: Build bwrap and export digest
|
||||
name: Configure musl rusty_v8 artifact overrides
|
||||
env:
|
||||
TARGET: ${{ matrix.target }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
target="${{ matrix.target }}"
|
||||
cargo build --target "$target" --release --timings --bin bwrap
|
||||
|
||||
bwrap_path="target/${target}/release/bwrap"
|
||||
if [[ ! -f "$bwrap_path" ]]; then
|
||||
echo "bwrap binary ${bwrap_path} not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
digest="$(sha256sum "$bwrap_path" | awk '{print $1}')"
|
||||
echo "CODEX_BWRAP_SHA256=${digest}" >> "$GITHUB_ENV"
|
||||
echo "Built bwrap ${bwrap_path} with sha256:${digest}"
|
||||
version="$(python3 "${GITHUB_WORKSPACE}/.github/scripts/rusty_v8_bazel.py" resolved-v8-crate-version)"
|
||||
release_tag="rusty-v8-v${version}"
|
||||
base_url="https://github.com/openai/codex/releases/download/${release_tag}"
|
||||
archive="https://github.com/openai/codex/releases/download/rusty-v8-v${version}/librusty_v8_release_${TARGET}.a.gz"
|
||||
binding_dir="${RUNNER_TEMP}/rusty_v8"
|
||||
binding_path="${binding_dir}/src_binding_release_${TARGET}.rs"
|
||||
mkdir -p "${binding_dir}"
|
||||
curl -fsSL "${base_url}/src_binding_release_${TARGET}.rs" -o "${binding_path}"
|
||||
echo "RUSTY_V8_ARCHIVE=${archive}" >> "$GITHUB_ENV"
|
||||
echo "RUSTY_V8_SRC_BINDING_PATH=${binding_path}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Cargo build
|
||||
shell: bash
|
||||
run: |
|
||||
build_args=()
|
||||
for binary in ${{ matrix.binaries }}; do
|
||||
build_args+=(--bin "$binary")
|
||||
done
|
||||
echo "CARGO_PROFILE_RELEASE_LTO: ${CARGO_PROFILE_RELEASE_LTO}"
|
||||
cargo build --target ${{ matrix.target }} --release --timings "${build_args[@]}"
|
||||
cargo build --target ${{ matrix.target }} --release --timings --bin codex --bin codex-responses-api-proxy
|
||||
|
||||
- name: Upload Cargo timings
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: cargo-timings-rust-release-${{ matrix.target }}-${{ matrix.bundle }}
|
||||
name: cargo-timings-rust-release-${{ matrix.target }}
|
||||
path: codex-rs/target/**/cargo-timings/cargo-timing.html
|
||||
if-no-files-found: warn
|
||||
|
||||
@@ -296,14 +247,12 @@ jobs:
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
artifacts-dir: ${{ github.workspace }}/codex-rs/target/${{ matrix.target }}/release
|
||||
binaries: ${{ matrix.binaries }}
|
||||
|
||||
- if: ${{ runner.os == 'macOS' }}
|
||||
name: MacOS code signing (binaries)
|
||||
uses: ./.github/actions/macos-code-sign
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
binaries: ${{ matrix.binaries }}
|
||||
sign-binaries: "true"
|
||||
sign-dmg: "false"
|
||||
apple-certificate: ${{ secrets.APPLE_CERTIFICATE_P12 }}
|
||||
@@ -312,7 +261,7 @@ jobs:
|
||||
apple-notarization-key-id: ${{ secrets.APPLE_NOTARIZATION_KEY_ID }}
|
||||
apple-notarization-issuer-id: ${{ secrets.APPLE_NOTARIZATION_ISSUER_ID }}
|
||||
|
||||
- if: ${{ runner.os == 'macOS' && matrix.build_dmg == 'true' }}
|
||||
- if: ${{ runner.os == 'macOS' }}
|
||||
name: Build macOS dmg
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -327,17 +276,23 @@ jobs:
|
||||
# The previous "MacOS code signing (binaries)" step signs + notarizes the
|
||||
# built artifacts in `${release_dir}`. This step packages *those same*
|
||||
# signed binaries into a dmg.
|
||||
codex_binary_path="${release_dir}/codex"
|
||||
proxy_binary_path="${release_dir}/codex-responses-api-proxy"
|
||||
|
||||
rm -rf "$dmg_root"
|
||||
mkdir -p "$dmg_root"
|
||||
|
||||
for binary in ${{ matrix.binaries }}; do
|
||||
binary_path="${release_dir}/${binary}"
|
||||
if [[ ! -f "${binary_path}" ]]; then
|
||||
echo "Binary ${binary_path} not found"
|
||||
exit 1
|
||||
fi
|
||||
ditto "${binary_path}" "${dmg_root}/${binary}"
|
||||
done
|
||||
if [[ ! -f "$codex_binary_path" ]]; then
|
||||
echo "Binary $codex_binary_path not found"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "$proxy_binary_path" ]]; then
|
||||
echo "Binary $proxy_binary_path not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ditto "$codex_binary_path" "${dmg_root}/codex"
|
||||
ditto "$proxy_binary_path" "${dmg_root}/codex-responses-api-proxy"
|
||||
|
||||
rm -f "$dmg_path"
|
||||
hdiutil create \
|
||||
@@ -352,7 +307,7 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- if: ${{ runner.os == 'macOS' && matrix.build_dmg == 'true' }}
|
||||
- if: ${{ runner.os == 'macOS' }}
|
||||
name: MacOS code signing (dmg)
|
||||
uses: ./.github/actions/macos-code-sign
|
||||
with:
|
||||
@@ -371,26 +326,15 @@ jobs:
|
||||
dest="dist/${{ matrix.target }}"
|
||||
mkdir -p "$dest"
|
||||
|
||||
for binary in ${{ matrix.binaries }}; do
|
||||
cp "target/${{ matrix.target }}/release/${binary}" "$dest/${binary}-${{ matrix.target }}"
|
||||
if [[ "${{ matrix.target }}" == *linux* ]]; then
|
||||
cp "target/${{ matrix.target }}/release/${binary}.sigstore" \
|
||||
"$dest/${binary}-${{ matrix.target }}.sigstore"
|
||||
fi
|
||||
done
|
||||
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy "$dest/codex-responses-api-proxy-${{ matrix.target }}"
|
||||
|
||||
if [[ "${{ matrix.target }}" == *linux* && "${{ matrix.bundle }}" == "primary" ]]; then
|
||||
bundle_root="${RUNNER_TEMP}/codex-${{ matrix.target }}-bundle"
|
||||
rm -rf "$bundle_root"
|
||||
mkdir -p "$bundle_root/codex-resources"
|
||||
cp "$dest/codex-${{ matrix.target }}" "$bundle_root/codex"
|
||||
cp "$dest/bwrap-${{ matrix.target }}" "$bundle_root/codex-resources/bwrap"
|
||||
chmod 0755 "$bundle_root/codex" "$bundle_root/codex-resources/bwrap"
|
||||
tar -C "$bundle_root" -cf - codex codex-resources/bwrap |
|
||||
zstd -T0 -19 -o "$dest/codex-${{ matrix.target }}-bundle.tar.zst"
|
||||
if [[ "${{ matrix.target }}" == *linux* ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex.sigstore "$dest/codex-${{ matrix.target }}.sigstore"
|
||||
cp target/${{ matrix.target }}/release/codex-responses-api-proxy.sigstore "$dest/codex-responses-api-proxy-${{ matrix.target }}.sigstore"
|
||||
fi
|
||||
|
||||
if [[ "${{ matrix.build_dmg }}" == "true" ]]; then
|
||||
if [[ "${{ matrix.target }}" == *apple-darwin ]]; then
|
||||
cp target/${{ matrix.target }}/release/codex-${{ matrix.target }}.dmg "$dest/codex-${{ matrix.target }}.dmg"
|
||||
fi
|
||||
|
||||
@@ -413,7 +357,7 @@ jobs:
|
||||
base="$(basename "$f")"
|
||||
# Skip files that are already archives (shouldn't happen, but be
|
||||
# safe).
|
||||
if [[ "$base" == *.tar.gz || "$base" == *.tar.zst || "$base" == *.zip || "$base" == *.dmg ]]; then
|
||||
if [[ "$base" == *.tar.gz || "$base" == *.zip || "$base" == *.dmg ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
@@ -432,9 +376,9 @@ jobs:
|
||||
|
||||
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
with:
|
||||
name: ${{ matrix.artifact_name }}
|
||||
# Upload the per-binary .zst files, .tar.gz equivalents, and any
|
||||
# prebuilt archives staged above.
|
||||
name: ${{ matrix.target }}
|
||||
# Upload the per-binary .zst files as well as the new .tar.gz
|
||||
# equivalents we generated in the previous step.
|
||||
path: |
|
||||
codex-rs/dist/${{ matrix.target }}/*
|
||||
|
||||
@@ -682,59 +626,11 @@ jobs:
|
||||
prefix="${NPM_TAG}-"
|
||||
fi
|
||||
|
||||
root_tarball="dist/npm/codex-npm-${VERSION}.tgz"
|
||||
sdk_tarball="dist/npm/codex-sdk-npm-${VERSION}.tgz"
|
||||
# Keep this list in sync with CODEX_PLATFORM_PACKAGES in
|
||||
# codex-cli/scripts/build_npm_package.py. The root wrapper advances
|
||||
# @openai/codex@latest as soon as it publishes, so every platform
|
||||
# package it aliases must already exist in the registry first.
|
||||
platform_tarballs=(
|
||||
"dist/npm/codex-npm-linux-x64-${VERSION}.tgz"
|
||||
"dist/npm/codex-npm-linux-arm64-${VERSION}.tgz"
|
||||
"dist/npm/codex-npm-darwin-x64-${VERSION}.tgz"
|
||||
"dist/npm/codex-npm-darwin-arm64-${VERSION}.tgz"
|
||||
"dist/npm/codex-npm-win32-x64-${VERSION}.tgz"
|
||||
"dist/npm/codex-npm-win32-arm64-${VERSION}.tgz"
|
||||
)
|
||||
|
||||
for required_tarball in "${platform_tarballs[@]}" "${root_tarball}"; do
|
||||
if [[ ! -f "${required_tarball}" ]]; then
|
||||
echo "Missing npm tarball: ${required_tarball}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
shopt -s nullglob
|
||||
other_tarballs=()
|
||||
for tarball in dist/npm/*-"${VERSION}".tgz; do
|
||||
if [[ "${tarball}" == "${root_tarball}" || "${tarball}" == "${sdk_tarball}" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
is_platform_tarball=false
|
||||
for platform_tarball in "${platform_tarballs[@]}"; do
|
||||
if [[ "${tarball}" == "${platform_tarball}" ]]; then
|
||||
is_platform_tarball=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "${is_platform_tarball}" == true ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
other_tarballs+=("${tarball}")
|
||||
done
|
||||
|
||||
# Publish the platform packages before the root CLI wrapper. The root
|
||||
# wrapper advances @openai/codex@latest, so it should only publish
|
||||
# after the optional dependency versions it references exist.
|
||||
tarballs=(
|
||||
"${platform_tarballs[@]}"
|
||||
"${other_tarballs[@]}"
|
||||
"${root_tarball}"
|
||||
)
|
||||
if [[ -f "${sdk_tarball}" ]]; then
|
||||
tarballs+=("${sdk_tarball}")
|
||||
tarballs=(dist/npm/*-"${VERSION}".tgz)
|
||||
if [[ ${#tarballs[@]} -eq 0 ]]; then
|
||||
echo "No npm tarballs found in dist/npm for version ${VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for tarball in "${tarballs[@]}"; do
|
||||
|
||||
43
.github/workflows/rusty-v8-release.yml
vendored
43
.github/workflows/rusty-v8-release.yml
vendored
@@ -1,12 +1,20 @@
|
||||
name: rusty-v8-release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "rusty-v8-v*.*.*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: Optional release tag. Defaults to rusty-v8-v<resolved_v8_version>.
|
||||
required: false
|
||||
type: string
|
||||
publish:
|
||||
description: Publish the staged musl artifacts to a GitHub release.
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}::${{ github.ref_name }}
|
||||
group: ${{ github.workflow }}::${{ inputs.release_tag || github.run_id }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
@@ -35,17 +43,15 @@ jobs:
|
||||
- name: Resolve release tag
|
||||
id: release_tag
|
||||
env:
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
RELEASE_TAG_INPUT: ${{ inputs.release_tag }}
|
||||
V8_VERSION: ${{ steps.v8_version.outputs.version }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
expected_release_tag="rusty-v8-v${V8_VERSION}"
|
||||
release_tag="${GITHUB_REF_NAME}"
|
||||
if [[ "${release_tag}" != "${expected_release_tag}" ]]; then
|
||||
echo "Tag ${release_tag} does not match resolved v8 crate version ${V8_VERSION}." >&2
|
||||
exit 1
|
||||
release_tag="${RELEASE_TAG_INPUT}"
|
||||
if [[ -z "${release_tag}" ]]; then
|
||||
release_tag="rusty-v8-v${V8_VERSION}"
|
||||
fi
|
||||
|
||||
echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT"
|
||||
@@ -72,9 +78,7 @@ jobs:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Set up Bazel
|
||||
uses: ./.github/actions/setup-bazel-ci
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
uses: bazelbuild/setup-bazelisk@6ecf4fd8b7d1f9721785f1dd656a689acf9add47 # v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6
|
||||
@@ -105,7 +109,6 @@ jobs:
|
||||
-c
|
||||
opt
|
||||
"--platforms=@llvm//platforms:${PLATFORM}"
|
||||
--config=v8-release-compat
|
||||
"${pair_target}"
|
||||
"${extra_targets[@]}"
|
||||
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
|
||||
@@ -129,7 +132,6 @@ jobs:
|
||||
--platform "${PLATFORM}" \
|
||||
--target "${TARGET}" \
|
||||
--compilation-mode opt \
|
||||
--bazel-config v8-release-compat \
|
||||
--output-dir "dist/${TARGET}"
|
||||
|
||||
- name: Upload staged musl artifacts
|
||||
@@ -139,6 +141,7 @@ jobs:
|
||||
path: dist/${{ matrix.target }}/*
|
||||
|
||||
publish-release:
|
||||
if: ${{ inputs.publish }}
|
||||
needs:
|
||||
- metadata
|
||||
- build
|
||||
@@ -148,6 +151,16 @@ jobs:
|
||||
actions: read
|
||||
|
||||
steps:
|
||||
- name: Ensure publishing from default branch
|
||||
if: ${{ github.ref_name != github.event.repository.default_branch }}
|
||||
env:
|
||||
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Publishing is only allowed from ${DEFAULT_BRANCH}; current ref is ${GITHUB_REF_NAME}." >&2
|
||||
exit 1
|
||||
|
||||
- name: Ensure release tag is new
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
8
.github/workflows/v8-canary.yml
vendored
8
.github/workflows/v8-canary.yml
vendored
@@ -3,7 +3,6 @@ name: v8-canary
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/actions/setup-bazel-ci/**"
|
||||
- ".github/scripts/rusty_v8_bazel.py"
|
||||
- ".github/workflows/rusty-v8-release.yml"
|
||||
- ".github/workflows/v8-canary.yml"
|
||||
@@ -17,7 +16,6 @@ on:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- ".github/actions/setup-bazel-ci/**"
|
||||
- ".github/scripts/rusty_v8_bazel.py"
|
||||
- ".github/workflows/rusty-v8-release.yml"
|
||||
- ".github/workflows/v8-canary.yml"
|
||||
@@ -77,9 +75,7 @@ jobs:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Set up Bazel
|
||||
uses: ./.github/actions/setup-bazel-ci
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
uses: bazelbuild/setup-bazelisk@6ecf4fd8b7d1f9721785f1dd656a689acf9add47 # v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6
|
||||
@@ -105,7 +101,6 @@ jobs:
|
||||
bazel_args=(
|
||||
build
|
||||
"--platforms=@llvm//platforms:${PLATFORM}"
|
||||
--config=v8-release-compat
|
||||
"${pair_target}"
|
||||
"${extra_targets[@]}"
|
||||
--build_metadata=COMMIT_SHA=$(git rev-parse HEAD)
|
||||
@@ -128,7 +123,6 @@ jobs:
|
||||
python3 .github/scripts/rusty_v8_bazel.py stage-release-pair \
|
||||
--platform "${PLATFORM}" \
|
||||
--target "${TARGET}" \
|
||||
--bazel-config v8-release-compat \
|
||||
--output-dir "dist/${TARGET}"
|
||||
|
||||
- name: Upload staged musl artifacts
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -52,7 +52,6 @@ yarn-error.log*
|
||||
# env
|
||||
.env*
|
||||
!.env.example
|
||||
.venv/
|
||||
|
||||
# package
|
||||
*.tgz
|
||||
@@ -92,3 +91,4 @@ CHANGELOG.ignore.md
|
||||
# Python bytecode files
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
||||
|
||||
@@ -19,12 +19,6 @@ In the codex-rs folder where the rust code lives:
|
||||
- You can run `just argument-comment-lint` to run the lint check locally. This is powered by Bazel, so running it the first time can be slow if Bazel is not warmed up, though incremental invocations should take <15s. Most of the time, it is best to update the PR and let CI take responsibility for checking this (or run it asynchronously in the background after submitting the PR). Note CI checks all three platforms, which the local run does not.
|
||||
- When possible, make `match` statements exhaustive and avoid wildcard arms.
|
||||
- Newly added traits should include doc comments that explain their role and how implementations are expected to use them.
|
||||
- Discourage both `#[async_trait]` and `#[allow(async_fn_in_trait)]` in Rust traits.
|
||||
- Prefer native RPITIT trait methods with explicit `Send` bounds on the returned future, as in `3c7f013f9735` / `#16630`.
|
||||
- Preferred trait shape:
|
||||
`fn foo(&self, ...) -> impl std::future::Future<Output = T> + Send;`
|
||||
- Implementations may still use `async fn foo(&self, ...) -> T` when they satisfy that contract.
|
||||
- Do not use `#[allow(async_fn_in_trait)]` as a shortcut around spelling the future contract explicitly.
|
||||
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
||||
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
|
||||
- Prefer private modules and explicitly exported public crate API.
|
||||
@@ -50,8 +44,6 @@ In the codex-rs folder where the rust code lives:
|
||||
`codex-rs/tui/src/bottom_pane/mod.rs`, and similarly central orchestration modules.
|
||||
- When extracting code from a large module, move the related tests and module/type docs toward
|
||||
the new implementation so the invariants stay close to the code that owns them.
|
||||
- Avoid adding new standalone methods to `codex-rs/tui/src/chatwidget.rs` unless the change is
|
||||
trivial; prefer new modules/files and keep `chatwidget.rs` focused on orchestration.
|
||||
- When running Rust commands (e.g. `just fix` or `cargo test`) be patient with the command and never try to kill them using the PID. Rust lock can make the execution slow, this is expected.
|
||||
|
||||
Run `just fmt` (in `codex-rs` directory) automatically after you have finished making Rust code changes; do not ask for approval to run it. Additionally, run the tests:
|
||||
|
||||
34
BUILD.bazel
34
BUILD.bazel
@@ -30,40 +30,6 @@ platform(
|
||||
parents = ["@platforms//host"],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_gnullvm",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
)
|
||||
|
||||
platform(
|
||||
name = "windows_x86_64_msvc",
|
||||
constraint_values = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
)
|
||||
|
||||
toolchain(
|
||||
name = "windows_gnullvm_tests_on_msvc_host_toolchain",
|
||||
exec_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_msvc",
|
||||
],
|
||||
target_compatible_with = [
|
||||
"@platforms//cpu:x86_64",
|
||||
"@platforms//os:windows",
|
||||
"@rules_rs//rs/experimental/platforms/constraints:windows_gnullvm",
|
||||
],
|
||||
toolchain = "@bazel_tools//tools/test:empty_toolchain",
|
||||
toolchain_type = "@bazel_tools//tools/test:default_test_toolchain_type",
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "rbe",
|
||||
actual = "@rbe_platform",
|
||||
|
||||
34
MODULE.bazel
34
MODULE.bazel
@@ -1,8 +1,8 @@
|
||||
module(name = "codex")
|
||||
|
||||
bazel_dep(name = "bazel_skylib", version = "1.9.0")
|
||||
bazel_dep(name = "bazel_skylib", version = "1.8.2")
|
||||
bazel_dep(name = "platforms", version = "1.0.0")
|
||||
bazel_dep(name = "llvm", version = "0.7.1")
|
||||
bazel_dep(name = "llvm", version = "0.6.8")
|
||||
# The upstream LLVM archive contains a few unix-only symlink entries and is
|
||||
# missing a couple of MinGW compatibility archives that windows-gnullvm needs
|
||||
# during extraction and linking, so patch it until upstream grows native support.
|
||||
@@ -78,8 +78,8 @@ use_repo(osx, "macos_sdk")
|
||||
bazel_dep(name = "apple_support", version = "2.1.0")
|
||||
bazel_dep(name = "rules_cc", version = "0.2.16")
|
||||
bazel_dep(name = "rules_platform", version = "0.1.0")
|
||||
bazel_dep(name = "rules_rs", version = "0.0.58")
|
||||
# `rules_rs` still does not model `windows-gnullvm` as a distinct Windows exec
|
||||
bazel_dep(name = "rules_rs", version = "0.0.43")
|
||||
# `rules_rs` 0.0.43 does not model `windows-gnullvm` as a distinct Windows exec
|
||||
# platform, so patch it until upstream grows that support for both x86_64 and
|
||||
# aarch64.
|
||||
single_version_override(
|
||||
@@ -87,9 +87,10 @@ single_version_override(
|
||||
patch_strip = 1,
|
||||
patches = [
|
||||
"//patches:rules_rs_windows_gnullvm_exec.patch",
|
||||
"//patches:rules_rs_delete_git_worktree_pointer.patch",
|
||||
"//patches:rules_rs_windows_exec_linker.patch",
|
||||
],
|
||||
version = "0.0.58",
|
||||
version = "0.0.43",
|
||||
)
|
||||
|
||||
rules_rust = use_extension("@rules_rs//rs/experimental:rules_rust.bzl", "rules_rust")
|
||||
@@ -107,6 +108,7 @@ rules_rust.patch(
|
||||
"//patches:rules_rust_windows_exec_bin_target.patch",
|
||||
"//patches:rules_rust_windows_exec_std.patch",
|
||||
"//patches:rules_rust_windows_exec_rustc_dev_rlib.patch",
|
||||
"//patches:rules_rust_repository_set_exec_constraints.patch",
|
||||
],
|
||||
strip = 1,
|
||||
)
|
||||
@@ -327,18 +329,6 @@ crate.annotation(
|
||||
"RUSTY_V8_SRC_BINDING_PATH": "$(execpath @v8_targets//:rusty_v8_binding_for_target)",
|
||||
},
|
||||
crate = "v8",
|
||||
# Keep the Rust feature aligned with the source-built Bazel artifacts.
|
||||
# Windows MSVC still consumes upstream non-sandboxed prebuilts.
|
||||
crate_features_select = {
|
||||
"aarch64-apple-darwin": ["v8_enable_sandbox"],
|
||||
"aarch64-pc-windows-gnullvm": ["v8_enable_sandbox"],
|
||||
"aarch64-unknown-linux-gnu": ["v8_enable_sandbox"],
|
||||
"aarch64-unknown-linux-musl": ["v8_enable_sandbox"],
|
||||
"x86_64-apple-darwin": ["v8_enable_sandbox"],
|
||||
"x86_64-pc-windows-gnullvm": ["v8_enable_sandbox"],
|
||||
"x86_64-unknown-linux-gnu": ["v8_enable_sandbox"],
|
||||
"x86_64-unknown-linux-musl": ["v8_enable_sandbox"],
|
||||
},
|
||||
gen_build_script = "on",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
@@ -433,7 +423,6 @@ http_archive(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_aarch64_apple_darwin_archive",
|
||||
downloaded_file_path = "librusty_v8_release_aarch64-apple-darwin.a.gz",
|
||||
sha256 = "bfe2c9be32a56c28546f0f965825ee68fbf606405f310cc4e17b448a568cf98a",
|
||||
urls = [
|
||||
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-apple-darwin.a.gz",
|
||||
],
|
||||
@@ -442,7 +431,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_aarch64_unknown_linux_gnu_archive",
|
||||
downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-gnu.a.gz",
|
||||
sha256 = "dbf165b07c81bdb054bc046b43d23e69fcf7bcc1a4c1b5b4776983a71062ecd8",
|
||||
urls = [
|
||||
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_aarch64-unknown-linux-gnu.a.gz",
|
||||
],
|
||||
@@ -451,7 +439,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_aarch64_pc_windows_msvc_archive",
|
||||
downloaded_file_path = "rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
|
||||
sha256 = "ed13363659c6d08583ac8fdc40493445c5767d8b94955a4d5d7bb8d5a81f6bf8",
|
||||
urls = [
|
||||
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_aarch64-pc-windows-msvc.lib.gz",
|
||||
],
|
||||
@@ -460,7 +447,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_apple_darwin_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-apple-darwin.a.gz",
|
||||
sha256 = "630cd240f1bbecdb071417dc18387ab81cf67c549c1c515a0b4fcf9eba647bb7",
|
||||
urls = [
|
||||
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-apple-darwin.a.gz",
|
||||
],
|
||||
@@ -469,7 +455,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_gnu_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
sha256 = "e64b4d99e4ae293a2e846244a89b80178ba10382c13fb591c1fa6968f5291153",
|
||||
urls = [
|
||||
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/librusty_v8_release_x86_64-unknown-linux-gnu.a.gz",
|
||||
],
|
||||
@@ -478,7 +463,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_pc_windows_msvc_archive",
|
||||
downloaded_file_path = "rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
|
||||
sha256 = "90a9a2346acd3685a355e98df85c24dbe406cb124367d16259a4b5d522621862",
|
||||
urls = [
|
||||
"https://github.com/denoland/rusty_v8/releases/download/v146.4.0/rusty_v8_release_x86_64-pc-windows-msvc.lib.gz",
|
||||
],
|
||||
@@ -487,7 +471,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_archive",
|
||||
downloaded_file_path = "librusty_v8_release_aarch64-unknown-linux-musl.a.gz",
|
||||
sha256 = "27a08ed26c34297bfd93e514692ccc44b85f8b15c6aa39cf34e784f84fb37e8e",
|
||||
urls = [
|
||||
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_aarch64-unknown-linux-musl.a.gz",
|
||||
],
|
||||
@@ -496,7 +479,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_aarch64_unknown_linux_musl_binding",
|
||||
downloaded_file_path = "src_binding_release_aarch64-unknown-linux-musl.rs",
|
||||
sha256 = "09f8900ced8297c229246c7a50b2e0ec23c54d0a554f369619cc29863f38dd1a",
|
||||
urls = [
|
||||
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_aarch64-unknown-linux-musl.rs",
|
||||
],
|
||||
@@ -505,7 +487,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_archive",
|
||||
downloaded_file_path = "librusty_v8_release_x86_64-unknown-linux-musl.a.gz",
|
||||
sha256 = "20d8271ad712323d352c1383c36e3c4b755abc41ece35819c49c75ec7134d2f8",
|
||||
urls = [
|
||||
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/librusty_v8_release_x86_64-unknown-linux-musl.a.gz",
|
||||
],
|
||||
@@ -514,7 +495,6 @@ http_file(
|
||||
http_file(
|
||||
name = "rusty_v8_146_4_0_x86_64_unknown_linux_musl_binding",
|
||||
downloaded_file_path = "src_binding_release_x86_64-unknown-linux-musl.rs",
|
||||
sha256 = "09f8900ced8297c229246c7a50b2e0ec23c54d0a554f369619cc29863f38dd1a",
|
||||
urls = [
|
||||
"https://github.com/openai/codex/releases/download/rusty-v8-v146.4.0/src_binding_release_x86_64-unknown-linux-musl.rs",
|
||||
],
|
||||
|
||||
231
MODULE.bazel.lock
generated
231
MODULE.bazel.lock
generated
File diff suppressed because one or more lines are too long
3
NOTICE
3
NOTICE
@@ -4,3 +4,6 @@ Copyright 2025 OpenAI
|
||||
This project includes code derived from [Ratatui](https://github.com/ratatui/ratatui), licensed under the MIT license.
|
||||
Copyright (c) 2016-2022 Florian Dehau
|
||||
Copyright (c) 2023-2025 The Ratatui Developers
|
||||
|
||||
This project includes Meriyah parser assets from [meriyah](https://github.com/meriyah/meriyah), licensed under the ISC license.
|
||||
Copyright (c) 2019 and later, KFlash and others.
|
||||
|
||||
@@ -11,7 +11,3 @@ Our security program is managed through Bugcrowd, and we ask that any validated
|
||||
## Vulnerability Disclosure Program
|
||||
|
||||
Our Vulnerability Program Guidelines are defined on our [Bugcrowd program page](https://bugcrowd.com/engagements/openai).
|
||||
|
||||
## How to operate CODEX safely
|
||||
|
||||
For details on Codex security boundaries, including sandboxing, approvals, and network controls, see [Agent approvals & security](https://developers.openai.com/codex/agent-approvals-security).
|
||||
|
||||
@@ -4,14 +4,20 @@
|
||||
# version_regex matches against the CLI version (env!("CARGO_PKG_VERSION")); omit to apply to all versions.
|
||||
# target_app specify which app should display the announcement (cli, vsce, ...).
|
||||
|
||||
# Test announcement only for local build version until 2027-05-10 excluded
|
||||
[[announcements]]
|
||||
content = "Welcome to Codex! Check out the new onboarding flow."
|
||||
from_date = "2024-10-01"
|
||||
to_date = "2024-10-15"
|
||||
target_app = "cli"
|
||||
|
||||
# Test announcement only for local build version until 2026-01-10 excluded (past)
|
||||
[[announcements]]
|
||||
content = "This is a test announcement"
|
||||
version_regex = "^0\\.0\\.0$"
|
||||
to_date = "2027-05-10"
|
||||
to_date = "2026-05-10"
|
||||
|
||||
[[announcements]]
|
||||
content = "Update Required - This version will no longer be supported starting May 8th. Please upgrade to the latest version (https://github.com/openai/codex/releases/latest) using your preferred package manager."
|
||||
# Matches 0.x.y versions from 0.0.y through 0.119.y; excludes 0.120.0 and newer.
|
||||
version_regex = "^0\\.(?:[0-9]|[1-9][0-9]|1[01][0-9])\\."
|
||||
to_date = "2026-05-08"
|
||||
content = "**BREAKING NEWS**: `gpt-5.3-codex` is out! Upgrade to `0.98.0` for a faster, smarter, more steerable agent."
|
||||
from_date = "2026-02-01"
|
||||
to_date = "2026-02-16"
|
||||
version_regex = "^0\\.(?:[0-9]|[1-8][0-9]|9[0-7])\\."
|
||||
|
||||
1
codex-cli/.dockerignore
Normal file
1
codex-cli/.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
node_modules/
|
||||
59
codex-cli/Dockerfile
Normal file
59
codex-cli/Dockerfile
Normal file
@@ -0,0 +1,59 @@
|
||||
FROM node:24-slim
|
||||
|
||||
ARG TZ
|
||||
ENV TZ="$TZ"
|
||||
|
||||
# Install basic development tools, ca-certificates, and iptables/ipset, then clean up apt cache to reduce image size
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
aggregate \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dnsutils \
|
||||
fzf \
|
||||
gh \
|
||||
git \
|
||||
gnupg2 \
|
||||
iproute2 \
|
||||
ipset \
|
||||
iptables \
|
||||
jq \
|
||||
less \
|
||||
man-db \
|
||||
procps \
|
||||
unzip \
|
||||
ripgrep \
|
||||
zsh \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Ensure default node user has access to /usr/local/share
|
||||
RUN mkdir -p /usr/local/share/npm-global && \
|
||||
chown -R node:node /usr/local/share
|
||||
|
||||
ARG USERNAME=node
|
||||
|
||||
# Set up non-root user
|
||||
USER node
|
||||
|
||||
# Install global packages
|
||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
||||
|
||||
# Install codex
|
||||
COPY dist/codex.tgz codex.tgz
|
||||
RUN npm install -g codex.tgz \
|
||||
&& npm cache clean --force \
|
||||
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/node_modules/.cache \
|
||||
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/tests \
|
||||
&& rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/docs
|
||||
|
||||
# Inside the container we consider the environment already sufficiently locked
|
||||
# down, therefore instruct Codex CLI to allow running without sandboxing.
|
||||
ENV CODEX_UNSAFE_ALLOW_NO_SANDBOX=1
|
||||
|
||||
# Copy and set up firewall script as root.
|
||||
USER root
|
||||
COPY scripts/init_firewall.sh /usr/local/bin/
|
||||
RUN chmod 500 /usr/local/bin/init_firewall.sh
|
||||
|
||||
# Drop back to non-root.
|
||||
USER node
|
||||
@@ -18,5 +18,5 @@
|
||||
"url": "git+https://github.com/openai/codex.git",
|
||||
"directory": "codex-cli"
|
||||
},
|
||||
"packageManager": "pnpm@10.33.0+sha512.10568bb4a6afb58c9eb3630da90cc9516417abebd3fabbe6739f0ae795728da1491e9db5a544c76ad8eb7570f5c4bb3d6c637b2cb41bfdcdb47fa823c8649319"
|
||||
"packageManager": "pnpm@10.29.3+sha512.498e1fb4cca5aa06c1dcf2611e6fafc50972ffe7189998c409e90de74566444298ffe43e6cd2acdc775ba1aa7cc5e092a8b7054c811ba8c5770f84693d33d2dc"
|
||||
}
|
||||
|
||||
16
codex-cli/scripts/build_container.sh
Executable file
16
codex-cli/scripts/build_container.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR=$(realpath "$(dirname "$0")")
|
||||
trap "popd >> /dev/null" EXIT
|
||||
pushd "$SCRIPT_DIR/.." >> /dev/null || {
|
||||
echo "Error: Failed to change directory to $SCRIPT_DIR/.."
|
||||
exit 1
|
||||
}
|
||||
pnpm install
|
||||
pnpm run build
|
||||
rm -rf ./dist/openai-codex-*.tgz
|
||||
pnpm pack --pack-destination ./dist
|
||||
mv ./dist/openai-codex-*.tgz ./dist/codex.tgz
|
||||
docker build -t codex -f "./Dockerfile" .
|
||||
@@ -69,8 +69,8 @@ PACKAGE_EXPANSIONS: dict[str, list[str]] = {
|
||||
|
||||
PACKAGE_NATIVE_COMPONENTS: dict[str, list[str]] = {
|
||||
"codex": [],
|
||||
"codex-linux-x64": ["bwrap", "codex", "rg"],
|
||||
"codex-linux-arm64": ["bwrap", "codex", "rg"],
|
||||
"codex-linux-x64": ["codex", "rg"],
|
||||
"codex-linux-arm64": ["codex", "rg"],
|
||||
"codex-darwin-x64": ["codex", "rg"],
|
||||
"codex-darwin-arm64": ["codex", "rg"],
|
||||
"codex-win32-x64": ["codex", "rg", "codex-windows-sandbox-setup", "codex-command-runner"],
|
||||
@@ -87,7 +87,6 @@ PACKAGE_TARGET_FILTERS: dict[str, str] = {
|
||||
PACKAGE_CHOICES = tuple(PACKAGE_NATIVE_COMPONENTS)
|
||||
|
||||
COMPONENT_DEST_DIR: dict[str, str] = {
|
||||
"bwrap": "codex-resources",
|
||||
"codex": "codex",
|
||||
"codex-responses-api-proxy": "codex-responses-api-proxy",
|
||||
"codex-windows-sandbox-setup": "codex",
|
||||
@@ -138,16 +137,6 @@ def parse_args() -> argparse.Namespace:
|
||||
type=Path,
|
||||
help="Directory containing pre-installed native binaries to bundle (vendor root).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--allow-missing-native-component",
|
||||
dest="allow_missing_native_components",
|
||||
action="append",
|
||||
default=[],
|
||||
help=(
|
||||
"Native component that may be absent from --vendor-src. Intended for CI "
|
||||
"compatibility with older artifact workflows; releases should not use this."
|
||||
),
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@@ -188,7 +177,6 @@ def main() -> int:
|
||||
staging_dir,
|
||||
native_components,
|
||||
target_filter={target_filter} if target_filter else None,
|
||||
allow_missing_components=set(args.allow_missing_native_components),
|
||||
)
|
||||
|
||||
if release_version:
|
||||
@@ -377,14 +365,12 @@ def copy_native_binaries(
|
||||
staging_dir: Path,
|
||||
components: list[str],
|
||||
target_filter: set[str] | None = None,
|
||||
allow_missing_components: set[str] | None = None,
|
||||
) -> None:
|
||||
vendor_src = vendor_src.resolve()
|
||||
if not vendor_src.exists():
|
||||
raise RuntimeError(f"Vendor source directory not found: {vendor_src}")
|
||||
|
||||
components_set = {component for component in components if component in COMPONENT_DEST_DIR}
|
||||
allow_missing_components = allow_missing_components or set()
|
||||
if not components_set:
|
||||
return
|
||||
|
||||
@@ -413,8 +399,6 @@ def copy_native_binaries(
|
||||
|
||||
src_component_dir = target_dir / dest_dir_name
|
||||
if not src_component_dir.exists():
|
||||
if component in allow_missing_components:
|
||||
continue
|
||||
raise RuntimeError(
|
||||
f"Missing native component '{component}' in vendor source: {src_component_dir}"
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Install Codex native binaries (Rust CLI, bwrap, and ripgrep helpers)."""
|
||||
"""Install Codex native binaries (Rust CLI plus ripgrep helpers)."""
|
||||
|
||||
import argparse
|
||||
from contextlib import contextmanager
|
||||
@@ -42,15 +42,8 @@ class BinaryComponent:
|
||||
|
||||
|
||||
WINDOWS_TARGETS = tuple(target for target in BINARY_TARGETS if "windows" in target)
|
||||
LINUX_TARGETS = tuple(target for target in BINARY_TARGETS if "linux" in target)
|
||||
|
||||
BINARY_COMPONENTS = {
|
||||
"bwrap": BinaryComponent(
|
||||
artifact_prefix="bwrap",
|
||||
dest_dir="codex-resources",
|
||||
binary_basename="bwrap",
|
||||
targets=LINUX_TARGETS,
|
||||
),
|
||||
"codex": BinaryComponent(
|
||||
artifact_prefix="codex",
|
||||
dest_dir="codex",
|
||||
@@ -142,7 +135,7 @@ def parse_args() -> argparse.Namespace:
|
||||
choices=tuple(list(BINARY_COMPONENTS) + ["rg"]),
|
||||
help=(
|
||||
"Limit installation to the specified components."
|
||||
" May be repeated. Defaults to bwrap, codex, codex-windows-sandbox-setup,"
|
||||
" May be repeated. Defaults to codex, codex-windows-sandbox-setup,"
|
||||
" codex-command-runner, and rg."
|
||||
),
|
||||
)
|
||||
@@ -166,7 +159,6 @@ def main() -> int:
|
||||
vendor_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
components = args.components or [
|
||||
"bwrap",
|
||||
"codex",
|
||||
"codex-windows-sandbox-setup",
|
||||
"codex-command-runner",
|
||||
|
||||
@@ -92,4 +92,4 @@ quoted_args=""
|
||||
for arg in "$@"; do
|
||||
quoted_args+=" $(printf '%q' "$arg")"
|
||||
done
|
||||
docker exec -it "$CONTAINER_NAME" bash -c "cd \"/app$WORK_DIR\" && codex --sandbox workspace-write --ask-for-approval on-request ${quoted_args}"
|
||||
docker exec -it "$CONTAINER_NAME" bash -c "cd \"/app$WORK_DIR\" && codex --full-auto ${quoted_args}"
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
[advisories]
|
||||
# Reviewed 2026-04-15. Keep this list in sync with ../deny.toml.
|
||||
ignore = [
|
||||
"RUSTSEC-2024-0388", # derivative 2.2.0 via starlark; upstream crate is unmaintained
|
||||
"RUSTSEC-2025-0057", # fxhash 0.2.1 via starlark_map; upstream crate is unmaintained
|
||||
"RUSTSEC-2024-0436", # paste 1.0.15 via starlark/ratatui; upstream crate is unmaintained
|
||||
"RUSTSEC-2024-0320", # yaml-rust via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2025-0141", # bincode via syntect; remove when syntect drops or updates it
|
||||
"RUSTSEC-2026-0118", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
"RUSTSEC-2026-0119", # hickory-proto via rama-dns/rama-tcp; remove when rama updates to hickory 0.26.1 or hickory-net
|
||||
]
|
||||
|
||||
@@ -8,11 +8,6 @@ max-threads = 1
|
||||
[test-groups.app_server_integration]
|
||||
max-threads = 1
|
||||
|
||||
[test-groups.core_apply_patch_cli_integration]
|
||||
max-threads = 1
|
||||
|
||||
[test-groups.windows_sandbox_legacy_sessions]
|
||||
max-threads = 1
|
||||
|
||||
[[profile.default.overrides]]
|
||||
# Do not add new tests here
|
||||
@@ -32,15 +27,3 @@ test-group = 'app_server_protocol_codegen'
|
||||
# Keep the library unit tests parallel.
|
||||
filter = 'package(codex-app-server) & kind(test)'
|
||||
test-group = 'app_server_integration'
|
||||
|
||||
[[profile.default.overrides]]
|
||||
# These tests exercise full Codex turns and apply_patch execution, and they are
|
||||
# sensitive to Windows runner process-startup stalls when many cases launch at once.
|
||||
filter = 'package(codex-core) & kind(test) & test(apply_patch_cli)'
|
||||
test-group = 'core_apply_patch_cli_integration'
|
||||
|
||||
[[profile.default.overrides]]
|
||||
# These tests create restricted-token Windows child processes and private desktops.
|
||||
# Serialize them to avoid exhausting Windows session/global desktop resources in CI.
|
||||
filter = 'package(codex-windows-sandbox) & test(legacy_)'
|
||||
test-group = 'windows_sandbox_legacy_sessions'
|
||||
|
||||
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
2
codex-rs/.github/workflows/cargo-audit.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
working-directory: codex-rs
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@a0b273b48ed29de4470960879e8381ff45632f26 # 1.93.0
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
exports_files([
|
||||
"clippy.toml",
|
||||
"node-version.txt",
|
||||
])
|
||||
|
||||
filegroup(
|
||||
|
||||
2467
codex-rs/Cargo.lock
generated
2467
codex-rs/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,10 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"aws-auth",
|
||||
"analytics",
|
||||
"agent-graph-store",
|
||||
"agent-identity",
|
||||
"backend-client",
|
||||
"builtin-mcps",
|
||||
"bwrap",
|
||||
"ansi-escape",
|
||||
"async-utils",
|
||||
"app-server",
|
||||
"app-server-transport",
|
||||
"app-server-client",
|
||||
"app-server-protocol",
|
||||
"app-server-test-client",
|
||||
@@ -19,7 +13,6 @@ members = [
|
||||
"arg0",
|
||||
"feedback",
|
||||
"features",
|
||||
"install-context",
|
||||
"codex-backend-openapi-models",
|
||||
"code-mode",
|
||||
"cloud-requirements",
|
||||
@@ -34,18 +27,14 @@ members = [
|
||||
"shell-escalation",
|
||||
"skills",
|
||||
"core",
|
||||
"core-api",
|
||||
"core-plugins",
|
||||
"core-skills",
|
||||
"hooks",
|
||||
"instructions",
|
||||
"secrets",
|
||||
"exec",
|
||||
"file-system",
|
||||
"exec-server",
|
||||
"execpolicy",
|
||||
"execpolicy-legacy",
|
||||
"external-agent-migration",
|
||||
"external-agent-sessions",
|
||||
"keyring-store",
|
||||
"file-search",
|
||||
"linux-sandbox",
|
||||
@@ -53,9 +42,6 @@ members = [
|
||||
"login",
|
||||
"codex-mcp",
|
||||
"mcp-server",
|
||||
"memories/mcp",
|
||||
"memories/read",
|
||||
"memories/write",
|
||||
"model-provider-info",
|
||||
"models-manager",
|
||||
"network-proxy",
|
||||
@@ -64,7 +50,6 @@ members = [
|
||||
"protocol",
|
||||
"realtime-webrtc",
|
||||
"rollout",
|
||||
"rollout-trace",
|
||||
"rmcp-client",
|
||||
"responses-api-proxy",
|
||||
"response-debug-context",
|
||||
@@ -101,13 +86,8 @@ members = [
|
||||
"codex-api",
|
||||
"state",
|
||||
"terminal-detection",
|
||||
"test-binary-support",
|
||||
"thread-manager-sample",
|
||||
"thread-store",
|
||||
"uds",
|
||||
"codex-experimental-api-macros",
|
||||
"plugin",
|
||||
"model-provider",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -124,13 +104,9 @@ license = "Apache-2.0"
|
||||
# Internal
|
||||
app_test_support = { path = "app-server/tests/common" }
|
||||
codex-analytics = { path = "analytics" }
|
||||
codex-agent-graph-store = { path = "agent-graph-store" }
|
||||
codex-agent-identity = { path = "agent-identity" }
|
||||
codex-ansi-escape = { path = "ansi-escape" }
|
||||
codex-api = { path = "codex-api" }
|
||||
codex-aws-auth = { path = "aws-auth" }
|
||||
codex-app-server = { path = "app-server" }
|
||||
codex-app-server-transport = { path = "app-server-transport" }
|
||||
codex-app-server-client = { path = "app-server-client" }
|
||||
codex-app-server-protocol = { path = "app-server-protocol" }
|
||||
codex-app-server-test-client = { path = "app-server-test-client" }
|
||||
@@ -138,7 +114,6 @@ codex-apply-patch = { path = "apply-patch" }
|
||||
codex-arg0 = { path = "arg0" }
|
||||
codex-async-utils = { path = "async-utils" }
|
||||
codex-backend-client = { path = "backend-client" }
|
||||
codex-builtin-mcps = { path = "builtin-mcps" }
|
||||
codex-chatgpt = { path = "chatgpt" }
|
||||
codex-cli = { path = "cli" }
|
||||
codex-client = { path = "codex-client" }
|
||||
@@ -150,30 +125,21 @@ codex-code-mode = { path = "code-mode" }
|
||||
codex-config = { path = "config" }
|
||||
codex-connectors = { path = "connectors" }
|
||||
codex-core = { path = "core" }
|
||||
codex-core-api = { path = "core-api" }
|
||||
codex-core-plugins = { path = "core-plugins" }
|
||||
codex-core-skills = { path = "core-skills" }
|
||||
codex-exec = { path = "exec" }
|
||||
codex-file-system = { path = "file-system" }
|
||||
codex-exec-server = { path = "exec-server" }
|
||||
codex-execpolicy = { path = "execpolicy" }
|
||||
codex-external-agent-migration = { path = "external-agent-migration" }
|
||||
codex-external-agent-sessions = { path = "external-agent-sessions" }
|
||||
codex-experimental-api-macros = { path = "codex-experimental-api-macros" }
|
||||
codex-features = { path = "features" }
|
||||
codex-feedback = { path = "feedback" }
|
||||
codex-install-context = { path = "install-context" }
|
||||
codex-file-search = { path = "file-search" }
|
||||
codex-git-utils = { path = "git-utils" }
|
||||
codex-hooks = { path = "hooks" }
|
||||
codex-instructions = { path = "instructions" }
|
||||
codex-keyring-store = { path = "keyring-store" }
|
||||
codex-linux-sandbox = { path = "linux-sandbox" }
|
||||
codex-lmstudio = { path = "lmstudio" }
|
||||
codex-login = { path = "login" }
|
||||
codex-message-history = { path = "message-history" }
|
||||
codex-memories-mcp = { path = "memories/mcp" }
|
||||
codex-memories-read = { path = "memories/read" }
|
||||
codex-memories-write = { path = "memories/write" }
|
||||
codex-mcp = { path = "codex-mcp" }
|
||||
codex-mcp-server = { path = "mcp-server" }
|
||||
codex-model-provider-info = { path = "model-provider-info" }
|
||||
@@ -182,7 +148,6 @@ codex-network-proxy = { path = "network-proxy" }
|
||||
codex-ollama = { path = "ollama" }
|
||||
codex-otel = { path = "otel" }
|
||||
codex-plugin = { path = "plugin" }
|
||||
codex-model-provider = { path = "model-provider" }
|
||||
codex-process-hardening = { path = "process-hardening" }
|
||||
codex-protocol = { path = "protocol" }
|
||||
codex-realtime-webrtc = { path = "realtime-webrtc" }
|
||||
@@ -190,7 +155,6 @@ codex-responses-api-proxy = { path = "responses-api-proxy" }
|
||||
codex-response-debug-context = { path = "response-debug-context" }
|
||||
codex-rmcp-client = { path = "rmcp-client" }
|
||||
codex-rollout = { path = "rollout" }
|
||||
codex-rollout-trace = { path = "rollout-trace" }
|
||||
codex-sandboxing = { path = "sandboxing" }
|
||||
codex-secrets = { path = "secrets" }
|
||||
codex-shell-command = { path = "shell-command" }
|
||||
@@ -199,11 +163,8 @@ codex-skills = { path = "skills" }
|
||||
codex-state = { path = "state" }
|
||||
codex-stdio-to-uds = { path = "stdio-to-uds" }
|
||||
codex-terminal-detection = { path = "terminal-detection" }
|
||||
codex-test-binary-support = { path = "test-binary-support" }
|
||||
codex-thread-store = { path = "thread-store" }
|
||||
codex-tools = { path = "tools" }
|
||||
codex-tui = { path = "tui" }
|
||||
codex-uds = { path = "uds" }
|
||||
codex-utils-absolute-path = { path = "utils/absolute-path" }
|
||||
codex-utils-approval-presets = { path = "utils/approval-presets" }
|
||||
codex-utils-cache = { path = "utils/cache" }
|
||||
@@ -241,13 +202,8 @@ arc-swap = "1.9.0"
|
||||
assert_cmd = "2"
|
||||
assert_matches = "1.5.0"
|
||||
async-channel = "2.3.1"
|
||||
async-io = "2.6.0"
|
||||
async-stream = "0.3.6"
|
||||
async-trait = "0.1.89"
|
||||
aws-config = "1"
|
||||
aws-credential-types = "1"
|
||||
aws-sigv4 = "1"
|
||||
aws-types = "1"
|
||||
axum = { version = "0.8", default-features = false }
|
||||
base64 = "0.22.1"
|
||||
bm25 = "2.3.2"
|
||||
@@ -259,27 +215,20 @@ clap_complete = "4"
|
||||
color-eyre = "0.6.3"
|
||||
constant_time_eq = "0.3.1"
|
||||
crossbeam-channel = "0.5.15"
|
||||
crypto_box = { version = "0.9.1", features = ["seal"] }
|
||||
crossterm = "0.28.1"
|
||||
csv = "1.3.1"
|
||||
ctor = "0.6.3"
|
||||
deno_core_icudata = "0.77.0"
|
||||
derive_more = "2"
|
||||
diffy = "0.4.2"
|
||||
dirs = "6"
|
||||
dns-lookup = "3.0.1"
|
||||
dotenvy = "0.15.7"
|
||||
dunce = "1.0.4"
|
||||
ed25519-dalek = { version = "2.2.0", features = ["pkcs8"] }
|
||||
encoding_rs = "0.8.35"
|
||||
env-flags = "0.1.1"
|
||||
env_logger = "0.11.9"
|
||||
eventsource-stream = "0.2.3"
|
||||
flate2 = "1.1.8"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
gethostname = "1.1.0"
|
||||
gix = { version = "0.81.0", default-features = false, features = ["sha1"] }
|
||||
glob = "0.3"
|
||||
globset = "0.4"
|
||||
hmac = "0.12.1"
|
||||
http = "1.3.1"
|
||||
@@ -325,16 +274,11 @@ quick-xml = "0.38.4"
|
||||
rand = "0.9"
|
||||
ratatui = "0.29.0"
|
||||
ratatui-macros = "0.6.0"
|
||||
rcgen = { version = "0.14.7", default-features = false, features = [
|
||||
"aws_lc_rs",
|
||||
"pem",
|
||||
] }
|
||||
regex = "1.12.3"
|
||||
regex-lite = "0.1.8"
|
||||
reqwest = { version = "0.12", features = ["cookies"] }
|
||||
reqwest = "0.12"
|
||||
rmcp = { version = "0.15.0", default-features = false }
|
||||
runfiles = { git = "https://github.com/dzbarsky/rules_rust", rev = "b56cbaa8465e74127f1ea216f813cd377295ad81" }
|
||||
russh-sftp = "2.1.2"
|
||||
rustls = { version = "0.23", default-features = false, features = [
|
||||
"ring",
|
||||
"std",
|
||||
@@ -372,7 +316,6 @@ strum_macros = "0.28.0"
|
||||
supports-color = "3.0.2"
|
||||
syntect = "5"
|
||||
sys-locale = "0.3.2"
|
||||
tar = { version = "=0.4.45", default-features = false }
|
||||
tempfile = "3.23.0"
|
||||
test-log = "0.2.19"
|
||||
textwrap = "0.16.2"
|
||||
@@ -394,8 +337,6 @@ tracing-appender = "0.2.3"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = "0.3.22"
|
||||
tracing-test = "0.2.5"
|
||||
tonic = { version = "0.14.3", default-features = false, features = ["channel", "codegen"] }
|
||||
tonic-prost = "0.14.3"
|
||||
tree-sitter = "0.25.10"
|
||||
tree-sitter-bash = "0.25"
|
||||
ts-rs = "11"
|
||||
@@ -413,7 +354,6 @@ webbrowser = "1.0"
|
||||
which = "8"
|
||||
whoami = "1.6.1"
|
||||
wildmatch = "2.6.1"
|
||||
winapi-util = "0.1.11"
|
||||
zip = "2.4.2"
|
||||
zstd = "0.13"
|
||||
|
||||
@@ -424,8 +364,6 @@ zeroize = "1.8.2"
|
||||
rust = {}
|
||||
|
||||
[workspace.lints.clippy]
|
||||
await_holding_invalid_type = "deny"
|
||||
await_holding_lock = "deny"
|
||||
expect_used = "deny"
|
||||
identity_op = "deny"
|
||||
manual_clamp = "deny"
|
||||
@@ -464,8 +402,6 @@ unwrap_used = "deny"
|
||||
# silence the false positive here instead of deleting a real dependency.
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = [
|
||||
"codex-agent-graph-store",
|
||||
"codex-memories-mcp",
|
||||
"icu_provider",
|
||||
"openssl-sys",
|
||||
"codex-utils-readiness",
|
||||
@@ -473,17 +409,6 @@ ignored = [
|
||||
"codex-v8-poc",
|
||||
]
|
||||
|
||||
[profile.dev]
|
||||
# Keep line tables/backtraces while avoiding expensive full variable debug info
|
||||
# across local dev builds.
|
||||
debug = 1
|
||||
|
||||
[profile.dev-small]
|
||||
inherits = "dev"
|
||||
opt-level = 0
|
||||
debug = 0
|
||||
strip = true
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
split-debuginfo = "off"
|
||||
@@ -502,8 +427,8 @@ opt-level = 0
|
||||
[patch.crates-io]
|
||||
# Uncomment to debug local changes.
|
||||
# ratatui = { path = "../../ratatui" }
|
||||
crossterm = { git = "https://github.com/nornagon/crossterm", rev = "87db8bfa6dc99427fd3b071681b07fc31c6ce995" }
|
||||
ratatui = { git = "https://github.com/nornagon/ratatui", rev = "9b2ad1298408c45918ee9f8241a6f95498cdbed2" }
|
||||
crossterm = { git = "https://github.com/nornagon/crossterm", branch = "nornagon/color-query" }
|
||||
ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" }
|
||||
tokio-tungstenite = { git = "https://github.com/openai-oss-forks/tokio-tungstenite", rev = "132f5b39c862e3a970f731d709608b3e6276d5f6" }
|
||||
tungstenite = { git = "https://github.com/openai-oss-forks/tungstenite-rs", rev = "9200079d3b54a1ff51072e24d81fd354f085156f" }
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Codex CLI (Rust Implementation)
|
||||
|
||||
We provide Codex CLI as a standalone executable to ensure a zero-dependency install.
|
||||
We provide Codex CLI as a standalone, native executable to ensure a zero-dependency install.
|
||||
|
||||
## Installing Codex
|
||||
|
||||
@@ -59,22 +59,19 @@ To test to see what happens when a command is run under the sandbox provided by
|
||||
|
||||
```
|
||||
# macOS
|
||||
codex sandbox macos [--log-denials] [COMMAND]...
|
||||
codex sandbox macos [--full-auto] [--log-denials] [COMMAND]...
|
||||
|
||||
# Linux
|
||||
codex sandbox linux [COMMAND]...
|
||||
codex sandbox linux [--full-auto] [COMMAND]...
|
||||
|
||||
# Windows
|
||||
codex sandbox windows [COMMAND]...
|
||||
codex sandbox windows [--full-auto] [COMMAND]...
|
||||
|
||||
# Legacy aliases
|
||||
codex debug seatbelt [--log-denials] [COMMAND]...
|
||||
codex debug landlock [COMMAND]...
|
||||
codex debug seatbelt [--full-auto] [--log-denials] [COMMAND]...
|
||||
codex debug landlock [--full-auto] [COMMAND]...
|
||||
```
|
||||
|
||||
To try a writable legacy sandbox mode with these commands, pass an explicit config override such
|
||||
as `-c 'sandbox_mode="workspace-write"'`.
|
||||
|
||||
### Selecting a sandbox policy via `--sandbox`
|
||||
|
||||
The Rust CLI exposes a dedicated `--sandbox` (`-s`) flag that lets you pick the sandbox policy **without** having to reach for the generic `-c/--config` option:
|
||||
@@ -97,7 +94,7 @@ In `workspace-write`, Codex also includes `~/.codex/memories` in its writable ro
|
||||
|
||||
This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates:
|
||||
|
||||
- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this becomes a library crate that is generally useful for building other Rust/native applications that use Codex.
|
||||
- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex.
|
||||
- [`exec/`](./exec) "headless" CLI for use in automation.
|
||||
- [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/).
|
||||
- [`cli/`](./cli) CLI multitool that provides the aforementioned CLIs via subcommands.
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "agent-graph-store",
|
||||
crate_name = "codex_agent_graph_store",
|
||||
)
|
||||
@@ -1,25 +0,0 @@
|
||||
[package]
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
name = "codex-agent-graph-store"
|
||||
version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "codex_agent_graph_store"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-state = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
@@ -1,20 +0,0 @@
|
||||
/// Result type returned by agent graph store operations.
|
||||
pub type AgentGraphStoreResult<T> = Result<T, AgentGraphStoreError>;
|
||||
|
||||
/// Error type shared by agent graph store implementations.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AgentGraphStoreError {
|
||||
/// The caller supplied invalid request data.
|
||||
#[error("invalid agent graph store request: {message}")]
|
||||
InvalidRequest {
|
||||
/// User-facing explanation of the invalid request.
|
||||
message: String,
|
||||
},
|
||||
|
||||
/// Catch-all for implementation failures that do not fit a more specific category.
|
||||
#[error("agent graph store internal error: {message}")]
|
||||
Internal {
|
||||
/// User-facing explanation of the implementation failure.
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
//! Storage-neutral parent/child topology for thread-spawned agents.
|
||||
|
||||
mod error;
|
||||
mod local;
|
||||
mod store;
|
||||
mod types;
|
||||
|
||||
pub use error::AgentGraphStoreError;
|
||||
pub use error::AgentGraphStoreResult;
|
||||
pub use local::LocalAgentGraphStore;
|
||||
pub use store::AgentGraphStore;
|
||||
pub use types::ThreadSpawnEdgeStatus;
|
||||
@@ -1,325 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_state::StateRuntime;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::AgentGraphStore;
|
||||
use crate::AgentGraphStoreError;
|
||||
use crate::AgentGraphStoreResult;
|
||||
use crate::ThreadSpawnEdgeStatus;
|
||||
|
||||
/// SQLite-backed implementation of [`AgentGraphStore`] using an existing state runtime.
|
||||
#[derive(Clone)]
|
||||
pub struct LocalAgentGraphStore {
|
||||
state_db: Arc<StateRuntime>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for LocalAgentGraphStore {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("LocalAgentGraphStore")
|
||||
.field("codex_home", &self.state_db.codex_home())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalAgentGraphStore {
|
||||
/// Create a local graph store from an already-initialized state runtime.
|
||||
pub fn new(state_db: Arc<StateRuntime>) -> Self {
|
||||
Self { state_db }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AgentGraphStore for LocalAgentGraphStore {
|
||||
async fn upsert_thread_spawn_edge(
|
||||
&self,
|
||||
parent_thread_id: ThreadId,
|
||||
child_thread_id: ThreadId,
|
||||
status: ThreadSpawnEdgeStatus,
|
||||
) -> AgentGraphStoreResult<()> {
|
||||
self.state_db
|
||||
.upsert_thread_spawn_edge(parent_thread_id, child_thread_id, to_state_status(status))
|
||||
.await
|
||||
.map_err(internal_error)
|
||||
}
|
||||
|
||||
async fn set_thread_spawn_edge_status(
|
||||
&self,
|
||||
child_thread_id: ThreadId,
|
||||
status: ThreadSpawnEdgeStatus,
|
||||
) -> AgentGraphStoreResult<()> {
|
||||
self.state_db
|
||||
.set_thread_spawn_edge_status(child_thread_id, to_state_status(status))
|
||||
.await
|
||||
.map_err(internal_error)
|
||||
}
|
||||
|
||||
async fn list_thread_spawn_children(
|
||||
&self,
|
||||
parent_thread_id: ThreadId,
|
||||
status_filter: Option<ThreadSpawnEdgeStatus>,
|
||||
) -> AgentGraphStoreResult<Vec<ThreadId>> {
|
||||
if let Some(status) = status_filter {
|
||||
return self
|
||||
.state_db
|
||||
.list_thread_spawn_children_with_status(parent_thread_id, to_state_status(status))
|
||||
.await
|
||||
.map_err(internal_error);
|
||||
}
|
||||
|
||||
self.state_db
|
||||
.list_thread_spawn_children(parent_thread_id)
|
||||
.await
|
||||
.map_err(internal_error)
|
||||
}
|
||||
|
||||
async fn list_thread_spawn_descendants(
|
||||
&self,
|
||||
root_thread_id: ThreadId,
|
||||
status_filter: Option<ThreadSpawnEdgeStatus>,
|
||||
) -> AgentGraphStoreResult<Vec<ThreadId>> {
|
||||
match status_filter {
|
||||
Some(status) => self
|
||||
.state_db
|
||||
.list_thread_spawn_descendants_with_status(root_thread_id, to_state_status(status))
|
||||
.await
|
||||
.map_err(internal_error),
|
||||
None => self
|
||||
.state_db
|
||||
.list_thread_spawn_descendants(root_thread_id)
|
||||
.await
|
||||
.map_err(internal_error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn to_state_status(status: ThreadSpawnEdgeStatus) -> codex_state::DirectionalThreadSpawnEdgeStatus {
|
||||
match status {
|
||||
ThreadSpawnEdgeStatus::Open => codex_state::DirectionalThreadSpawnEdgeStatus::Open,
|
||||
ThreadSpawnEdgeStatus::Closed => codex_state::DirectionalThreadSpawnEdgeStatus::Closed,
|
||||
}
|
||||
}
|
||||
|
||||
fn internal_error(err: impl std::fmt::Display) -> AgentGraphStoreError {
|
||||
AgentGraphStoreError::Internal {
|
||||
message: err.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_state::DirectionalThreadSpawnEdgeStatus;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
struct TestRuntime {
|
||||
state_db: Arc<StateRuntime>,
|
||||
_codex_home: TempDir,
|
||||
}
|
||||
|
||||
fn thread_id(suffix: u128) -> ThreadId {
|
||||
ThreadId::from_string(&format!("00000000-0000-0000-0000-{suffix:012}"))
|
||||
.expect("valid thread id")
|
||||
}
|
||||
|
||||
async fn state_runtime() -> TestRuntime {
|
||||
let codex_home = TempDir::new().expect("tempdir should be created");
|
||||
let state_db =
|
||||
StateRuntime::init(codex_home.path().to_path_buf(), "test-provider".to_string())
|
||||
.await
|
||||
.expect("state db should initialize");
|
||||
TestRuntime {
|
||||
state_db,
|
||||
_codex_home: codex_home,
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn local_store_upserts_and_lists_direct_children_with_status_filters() {
|
||||
let fixture = state_runtime().await;
|
||||
let state_db = fixture.state_db;
|
||||
let store = LocalAgentGraphStore::new(state_db.clone());
|
||||
let parent_thread_id = thread_id(/*suffix*/ 1);
|
||||
let first_child_thread_id = thread_id(/*suffix*/ 2);
|
||||
let second_child_thread_id = thread_id(/*suffix*/ 3);
|
||||
|
||||
store
|
||||
.upsert_thread_spawn_edge(
|
||||
parent_thread_id,
|
||||
second_child_thread_id,
|
||||
ThreadSpawnEdgeStatus::Closed,
|
||||
)
|
||||
.await
|
||||
.expect("closed child edge should insert");
|
||||
store
|
||||
.upsert_thread_spawn_edge(
|
||||
parent_thread_id,
|
||||
first_child_thread_id,
|
||||
ThreadSpawnEdgeStatus::Open,
|
||||
)
|
||||
.await
|
||||
.expect("open child edge should insert");
|
||||
|
||||
let all_children = store
|
||||
.list_thread_spawn_children(parent_thread_id, /*status_filter*/ None)
|
||||
.await
|
||||
.expect("all children should load");
|
||||
assert_eq!(
|
||||
all_children,
|
||||
vec![first_child_thread_id, second_child_thread_id]
|
||||
);
|
||||
|
||||
let open_children = store
|
||||
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Open))
|
||||
.await
|
||||
.expect("open children should load");
|
||||
let state_open_children = state_db
|
||||
.list_thread_spawn_children_with_status(
|
||||
parent_thread_id,
|
||||
DirectionalThreadSpawnEdgeStatus::Open,
|
||||
)
|
||||
.await
|
||||
.expect("state open children should load");
|
||||
assert_eq!(open_children, state_open_children);
|
||||
assert_eq!(open_children, vec![first_child_thread_id]);
|
||||
|
||||
let closed_children = store
|
||||
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Closed))
|
||||
.await
|
||||
.expect("closed children should load");
|
||||
assert_eq!(closed_children, vec![second_child_thread_id]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn local_store_updates_edge_status() {
|
||||
let fixture = state_runtime().await;
|
||||
let state_db = fixture.state_db;
|
||||
let store = LocalAgentGraphStore::new(state_db);
|
||||
let parent_thread_id = thread_id(/*suffix*/ 10);
|
||||
let child_thread_id = thread_id(/*suffix*/ 11);
|
||||
|
||||
store
|
||||
.upsert_thread_spawn_edge(
|
||||
parent_thread_id,
|
||||
child_thread_id,
|
||||
ThreadSpawnEdgeStatus::Open,
|
||||
)
|
||||
.await
|
||||
.expect("child edge should insert");
|
||||
store
|
||||
.set_thread_spawn_edge_status(child_thread_id, ThreadSpawnEdgeStatus::Closed)
|
||||
.await
|
||||
.expect("child edge should close");
|
||||
|
||||
let open_children = store
|
||||
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Open))
|
||||
.await
|
||||
.expect("open children should load");
|
||||
assert_eq!(open_children, Vec::<ThreadId>::new());
|
||||
|
||||
let closed_children = store
|
||||
.list_thread_spawn_children(parent_thread_id, Some(ThreadSpawnEdgeStatus::Closed))
|
||||
.await
|
||||
.expect("closed children should load");
|
||||
assert_eq!(closed_children, vec![child_thread_id]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn local_store_lists_descendants_breadth_first_with_status_filters() {
|
||||
let fixture = state_runtime().await;
|
||||
let state_db = fixture.state_db;
|
||||
let store = LocalAgentGraphStore::new(state_db.clone());
|
||||
let root_thread_id = thread_id(/*suffix*/ 20);
|
||||
let later_child_thread_id = thread_id(/*suffix*/ 22);
|
||||
let earlier_child_thread_id = thread_id(/*suffix*/ 21);
|
||||
let closed_grandchild_thread_id = thread_id(/*suffix*/ 23);
|
||||
let open_grandchild_thread_id = thread_id(/*suffix*/ 24);
|
||||
let closed_child_thread_id = thread_id(/*suffix*/ 25);
|
||||
let closed_great_grandchild_thread_id = thread_id(/*suffix*/ 26);
|
||||
|
||||
for (parent_thread_id, child_thread_id, status) in [
|
||||
(
|
||||
root_thread_id,
|
||||
later_child_thread_id,
|
||||
ThreadSpawnEdgeStatus::Open,
|
||||
),
|
||||
(
|
||||
root_thread_id,
|
||||
earlier_child_thread_id,
|
||||
ThreadSpawnEdgeStatus::Open,
|
||||
),
|
||||
(
|
||||
earlier_child_thread_id,
|
||||
open_grandchild_thread_id,
|
||||
ThreadSpawnEdgeStatus::Open,
|
||||
),
|
||||
(
|
||||
later_child_thread_id,
|
||||
closed_grandchild_thread_id,
|
||||
ThreadSpawnEdgeStatus::Closed,
|
||||
),
|
||||
(
|
||||
root_thread_id,
|
||||
closed_child_thread_id,
|
||||
ThreadSpawnEdgeStatus::Closed,
|
||||
),
|
||||
(
|
||||
closed_child_thread_id,
|
||||
closed_great_grandchild_thread_id,
|
||||
ThreadSpawnEdgeStatus::Closed,
|
||||
),
|
||||
] {
|
||||
store
|
||||
.upsert_thread_spawn_edge(parent_thread_id, child_thread_id, status)
|
||||
.await
|
||||
.expect("edge should insert");
|
||||
}
|
||||
|
||||
let all_descendants = store
|
||||
.list_thread_spawn_descendants(root_thread_id, /*status_filter*/ None)
|
||||
.await
|
||||
.expect("all descendants should load");
|
||||
assert_eq!(
|
||||
all_descendants,
|
||||
vec![
|
||||
earlier_child_thread_id,
|
||||
later_child_thread_id,
|
||||
closed_child_thread_id,
|
||||
closed_grandchild_thread_id,
|
||||
open_grandchild_thread_id,
|
||||
closed_great_grandchild_thread_id,
|
||||
]
|
||||
);
|
||||
|
||||
let open_descendants = store
|
||||
.list_thread_spawn_descendants(root_thread_id, Some(ThreadSpawnEdgeStatus::Open))
|
||||
.await
|
||||
.expect("open descendants should load");
|
||||
let state_open_descendants = state_db
|
||||
.list_thread_spawn_descendants_with_status(
|
||||
root_thread_id,
|
||||
DirectionalThreadSpawnEdgeStatus::Open,
|
||||
)
|
||||
.await
|
||||
.expect("state open descendants should load");
|
||||
assert_eq!(open_descendants, state_open_descendants);
|
||||
assert_eq!(
|
||||
open_descendants,
|
||||
vec![
|
||||
earlier_child_thread_id,
|
||||
later_child_thread_id,
|
||||
open_grandchild_thread_id,
|
||||
]
|
||||
);
|
||||
|
||||
let closed_descendants = store
|
||||
.list_thread_spawn_descendants(root_thread_id, Some(ThreadSpawnEdgeStatus::Closed))
|
||||
.await
|
||||
.expect("closed descendants should load");
|
||||
assert_eq!(
|
||||
closed_descendants,
|
||||
vec![closed_child_thread_id, closed_great_grandchild_thread_id]
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use codex_protocol::ThreadId;
|
||||
|
||||
use crate::AgentGraphStoreResult;
|
||||
use crate::ThreadSpawnEdgeStatus;
|
||||
|
||||
/// Storage-neutral boundary for persisted thread-spawn parent/child topology.
|
||||
///
|
||||
/// Implementations are expected to return stable ordering for list methods so callers can merge
|
||||
/// persisted graph state with live in-memory state without introducing nondeterministic output.
|
||||
#[async_trait]
|
||||
pub trait AgentGraphStore: Send + Sync {
|
||||
/// Insert or replace the directional parent/child edge for a spawned thread.
|
||||
///
|
||||
/// `child_thread_id` has at most one persisted parent. Re-inserting the same child should
|
||||
/// update both the parent and status to match the supplied values.
|
||||
async fn upsert_thread_spawn_edge(
|
||||
&self,
|
||||
parent_thread_id: ThreadId,
|
||||
child_thread_id: ThreadId,
|
||||
status: ThreadSpawnEdgeStatus,
|
||||
) -> AgentGraphStoreResult<()>;
|
||||
|
||||
/// Update the persisted lifecycle status of a spawned thread's incoming edge.
|
||||
///
|
||||
/// Implementations should treat missing children as a successful no-op.
|
||||
async fn set_thread_spawn_edge_status(
|
||||
&self,
|
||||
child_thread_id: ThreadId,
|
||||
status: ThreadSpawnEdgeStatus,
|
||||
) -> AgentGraphStoreResult<()>;
|
||||
|
||||
/// List direct spawned children of a parent thread.
|
||||
///
|
||||
/// When `status_filter` is `Some`, only child edges with that exact status are returned. When
|
||||
/// it is `None`, all direct child edges are returned regardless of status, including statuses
|
||||
/// that may be added by a future store implementation.
|
||||
async fn list_thread_spawn_children(
|
||||
&self,
|
||||
parent_thread_id: ThreadId,
|
||||
status_filter: Option<ThreadSpawnEdgeStatus>,
|
||||
) -> AgentGraphStoreResult<Vec<ThreadId>>;
|
||||
|
||||
/// List spawned descendants breadth-first by depth, then by thread id.
|
||||
///
|
||||
/// `status_filter` is applied to every traversed edge, not just to the returned descendants.
|
||||
/// For example, `Some(Open)` walks only open edges, so descendants under a closed edge are not
|
||||
/// included even if their own incoming edge is open. `None` walks and returns every persisted
|
||||
/// edge regardless of status.
|
||||
async fn list_thread_spawn_descendants(
|
||||
&self,
|
||||
root_thread_id: ThreadId,
|
||||
status_filter: Option<ThreadSpawnEdgeStatus>,
|
||||
) -> AgentGraphStoreResult<Vec<ThreadId>>;
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
/// Lifecycle status attached to a directional thread-spawn edge.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ThreadSpawnEdgeStatus {
|
||||
/// The child thread is still live or resumable as an open spawned agent.
|
||||
Open,
|
||||
/// The child thread has been closed from the parent/child graph's perspective.
|
||||
Closed,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn thread_spawn_edge_status_serializes_as_snake_case() {
|
||||
assert_eq!(
|
||||
serde_json::to_string(&ThreadSpawnEdgeStatus::Open)
|
||||
.expect("open status should serialize"),
|
||||
"\"open\""
|
||||
);
|
||||
assert_eq!(
|
||||
serde_json::to_string(&ThreadSpawnEdgeStatus::Closed)
|
||||
.expect("closed status should serialize"),
|
||||
"\"closed\""
|
||||
);
|
||||
assert_eq!(
|
||||
serde_json::from_str::<ThreadSpawnEdgeStatus>("\"open\"")
|
||||
.expect("open status should deserialize"),
|
||||
ThreadSpawnEdgeStatus::Open
|
||||
);
|
||||
assert_eq!(
|
||||
serde_json::from_str::<ThreadSpawnEdgeStatus>("\"closed\"")
|
||||
.expect("closed status should deserialize"),
|
||||
ThreadSpawnEdgeStatus::Closed
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
load("//:defs.bzl", "codex_rust_crate")
|
||||
|
||||
codex_rust_crate(
|
||||
name = "agent-identity",
|
||||
crate_name = "codex_agent_identity",
|
||||
)
|
||||
@@ -1,30 +0,0 @@
|
||||
[package]
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
name = "codex-agent-identity"
|
||||
version.workspace = true
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
name = "codex_agent_identity"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
crypto_box = { workspace = true }
|
||||
ed25519-dalek = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["json"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = { workspace = true }
|
||||
@@ -1,737 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
|
||||
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
|
||||
use chrono::SecondsFormat;
|
||||
use chrono::Utc;
|
||||
use codex_protocol::auth::PlanType as AuthPlanType;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
use crypto_box::SecretKey as Curve25519SecretKey;
|
||||
use ed25519_dalek::Signer as _;
|
||||
use ed25519_dalek::SigningKey;
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use ed25519_dalek::pkcs8::DecodePrivateKey;
|
||||
use ed25519_dalek::pkcs8::EncodePrivateKey;
|
||||
use jsonwebtoken::Algorithm;
|
||||
use jsonwebtoken::DecodingKey;
|
||||
use jsonwebtoken::Validation;
|
||||
use jsonwebtoken::decode;
|
||||
use jsonwebtoken::decode_header;
|
||||
use jsonwebtoken::jwk::JwkSet;
|
||||
use rand::TryRngCore;
|
||||
use rand::rngs::OsRng;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use sha2::Digest as _;
|
||||
use sha2::Sha512;
|
||||
|
||||
const AGENT_TASK_REGISTRATION_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
const AGENT_IDENTITY_JWKS_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
const AGENT_IDENTITY_JWT_AUDIENCE: &str = "codex-app-server";
|
||||
const AGENT_IDENTITY_JWT_ISSUER: &str = "https://chatgpt.com/codex-backend/agent-identity";
|
||||
|
||||
/// Stored key material for a registered agent identity.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct AgentIdentityKey<'a> {
|
||||
pub agent_runtime_id: &'a str,
|
||||
pub private_key_pkcs8_base64: &'a str,
|
||||
}
|
||||
|
||||
/// Task binding to use when constructing a task-scoped AgentAssertion.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct AgentTaskAuthorizationTarget<'a> {
|
||||
pub agent_runtime_id: &'a str,
|
||||
pub task_id: &'a str,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct AgentBillOfMaterials {
|
||||
pub agent_version: String,
|
||||
pub agent_harness_id: String,
|
||||
pub running_location: String,
|
||||
}
|
||||
|
||||
pub struct GeneratedAgentKeyMaterial {
|
||||
pub private_key_pkcs8_base64: String,
|
||||
pub public_key_ssh: String,
|
||||
}
|
||||
|
||||
/// Claims carried by an Agent Identity JWT.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
|
||||
pub struct AgentIdentityJwtClaims {
|
||||
pub iss: String,
|
||||
pub aud: String,
|
||||
pub iat: usize,
|
||||
pub exp: usize,
|
||||
pub agent_runtime_id: String,
|
||||
pub agent_private_key: String,
|
||||
pub account_id: String,
|
||||
pub chatgpt_user_id: String,
|
||||
pub email: String,
|
||||
pub plan_type: AuthPlanType,
|
||||
pub chatgpt_account_is_fedramp: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
struct AgentAssertionEnvelope {
|
||||
agent_runtime_id: String,
|
||||
task_id: String,
|
||||
timestamp: String,
|
||||
signature: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct RegisterTaskRequest {
|
||||
timestamp: String,
|
||||
signature: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct RegisterTaskResponse {
|
||||
#[serde(default)]
|
||||
task_id: Option<String>,
|
||||
#[serde(default, rename = "taskId")]
|
||||
task_id_camel: Option<String>,
|
||||
#[serde(default)]
|
||||
encrypted_task_id: Option<String>,
|
||||
#[serde(default, rename = "encryptedTaskId")]
|
||||
encrypted_task_id_camel: Option<String>,
|
||||
}
|
||||
|
||||
pub fn authorization_header_for_agent_task(
|
||||
key: AgentIdentityKey<'_>,
|
||||
target: AgentTaskAuthorizationTarget<'_>,
|
||||
) -> Result<String> {
|
||||
anyhow::ensure!(
|
||||
key.agent_runtime_id == target.agent_runtime_id,
|
||||
"agent task runtime {} does not match stored agent identity {}",
|
||||
target.agent_runtime_id,
|
||||
key.agent_runtime_id
|
||||
);
|
||||
|
||||
let timestamp = Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true);
|
||||
let envelope = AgentAssertionEnvelope {
|
||||
agent_runtime_id: target.agent_runtime_id.to_string(),
|
||||
task_id: target.task_id.to_string(),
|
||||
timestamp: timestamp.clone(),
|
||||
signature: sign_agent_assertion_payload(key, target.task_id, ×tamp)?,
|
||||
};
|
||||
let serialized_assertion = serialize_agent_assertion(&envelope)?;
|
||||
Ok(format!("AgentAssertion {serialized_assertion}"))
|
||||
}
|
||||
|
||||
pub async fn fetch_agent_identity_jwks(
|
||||
client: &reqwest::Client,
|
||||
chatgpt_base_url: &str,
|
||||
) -> Result<JwkSet> {
|
||||
let response = client
|
||||
.get(agent_identity_jwks_url(chatgpt_base_url))
|
||||
.timeout(AGENT_IDENTITY_JWKS_TIMEOUT)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to request agent identity JWKS")?
|
||||
.error_for_status()
|
||||
.context("agent identity JWKS endpoint returned an error")?;
|
||||
|
||||
response
|
||||
.json()
|
||||
.await
|
||||
.context("failed to decode agent identity JWKS")
|
||||
}
|
||||
|
||||
pub fn decode_agent_identity_jwt(
|
||||
jwt: &str,
|
||||
jwks: Option<&JwkSet>,
|
||||
) -> Result<AgentIdentityJwtClaims> {
|
||||
let Some(jwks) = jwks else {
|
||||
return decode_agent_identity_jwt_payload(jwt);
|
||||
};
|
||||
|
||||
let header = decode_header(jwt).context("failed to decode agent identity JWT header")?;
|
||||
let kid = header
|
||||
.kid
|
||||
.context("agent identity JWT header does not include a kid")?;
|
||||
let jwk = jwks
|
||||
.find(&kid)
|
||||
.with_context(|| format!("agent identity JWT kid {kid} is not trusted"))?;
|
||||
let decoding_key = DecodingKey::from_jwk(jwk).context("failed to build JWT decoding key")?;
|
||||
let mut validation = Validation::new(Algorithm::RS256);
|
||||
validation.set_audience(&[AGENT_IDENTITY_JWT_AUDIENCE]);
|
||||
validation.set_issuer(&[AGENT_IDENTITY_JWT_ISSUER]);
|
||||
validation.required_spec_claims.insert("iss".to_string());
|
||||
validation.required_spec_claims.insert("aud".to_string());
|
||||
decode::<AgentIdentityJwtClaims>(jwt, &decoding_key, &validation)
|
||||
.map(|data| data.claims)
|
||||
.context("failed to verify agent identity JWT")
|
||||
}
|
||||
|
||||
fn decode_agent_identity_jwt_payload<T: DeserializeOwned>(jwt: &str) -> Result<T> {
|
||||
let mut parts = jwt.split('.');
|
||||
let (_header_b64, payload_b64, _sig_b64) = match (parts.next(), parts.next(), parts.next()) {
|
||||
(Some(h), Some(p), Some(s)) if !h.is_empty() && !p.is_empty() && !s.is_empty() => (h, p, s),
|
||||
_ => anyhow::bail!("invalid agent identity JWT format"),
|
||||
};
|
||||
anyhow::ensure!(parts.next().is_none(), "invalid agent identity JWT format");
|
||||
|
||||
let payload_bytes = URL_SAFE_NO_PAD
|
||||
.decode(payload_b64)
|
||||
.context("agent identity JWT payload is not valid base64url")?;
|
||||
serde_json::from_slice(&payload_bytes).context("agent identity JWT payload is not valid JSON")
|
||||
}
|
||||
|
||||
pub fn sign_task_registration_payload(
|
||||
key: AgentIdentityKey<'_>,
|
||||
timestamp: &str,
|
||||
) -> Result<String> {
|
||||
let signing_key = signing_key_from_private_key_pkcs8_base64(key.private_key_pkcs8_base64)?;
|
||||
let payload = format!("{}:{timestamp}", key.agent_runtime_id);
|
||||
Ok(BASE64_STANDARD.encode(signing_key.sign(payload.as_bytes()).to_bytes()))
|
||||
}
|
||||
|
||||
pub async fn register_agent_task(
|
||||
client: &reqwest::Client,
|
||||
chatgpt_base_url: &str,
|
||||
key: AgentIdentityKey<'_>,
|
||||
) -> Result<String> {
|
||||
let timestamp = Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true);
|
||||
let request = RegisterTaskRequest {
|
||||
signature: sign_task_registration_payload(key, ×tamp)?,
|
||||
timestamp,
|
||||
};
|
||||
let url = agent_task_registration_url(chatgpt_base_url, key.agent_runtime_id);
|
||||
|
||||
let response = client
|
||||
.post(url)
|
||||
.timeout(AGENT_TASK_REGISTRATION_TIMEOUT)
|
||||
.json(&request)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to register agent task")?;
|
||||
if !response.status().is_success() {
|
||||
let status = response.status();
|
||||
let body = response.text().await.unwrap_or_default();
|
||||
let body = if body.len() > 512 {
|
||||
format!("{}...", body.chars().take(512).collect::<String>())
|
||||
} else {
|
||||
body
|
||||
};
|
||||
anyhow::bail!("failed to register agent task with status {status}: {body}");
|
||||
}
|
||||
|
||||
let response = response
|
||||
.json()
|
||||
.await
|
||||
.context("failed to decode agent task registration response")?;
|
||||
|
||||
task_id_from_register_task_response(key, response)
|
||||
}
|
||||
|
||||
fn task_id_from_register_task_response(
|
||||
key: AgentIdentityKey<'_>,
|
||||
response: RegisterTaskResponse,
|
||||
) -> Result<String> {
|
||||
if let Some(task_id) = response.task_id.or(response.task_id_camel) {
|
||||
return Ok(task_id);
|
||||
}
|
||||
let encrypted_task_id = response
|
||||
.encrypted_task_id
|
||||
.or(response.encrypted_task_id_camel)
|
||||
.context("agent task registration response omitted task id")?;
|
||||
decrypt_task_id_response(key, &encrypted_task_id)
|
||||
}
|
||||
|
||||
pub fn decrypt_task_id_response(
|
||||
key: AgentIdentityKey<'_>,
|
||||
encrypted_task_id: &str,
|
||||
) -> Result<String> {
|
||||
let signing_key = signing_key_from_private_key_pkcs8_base64(key.private_key_pkcs8_base64)?;
|
||||
let ciphertext = BASE64_STANDARD
|
||||
.decode(encrypted_task_id)
|
||||
.context("encrypted task id is not valid base64")?;
|
||||
let plaintext = curve25519_secret_key_from_signing_key(&signing_key)
|
||||
.unseal(&ciphertext)
|
||||
.map_err(|_| anyhow::anyhow!("failed to decrypt encrypted task id"))?;
|
||||
String::from_utf8(plaintext).context("decrypted task id is not valid UTF-8")
|
||||
}
|
||||
|
||||
pub fn generate_agent_key_material() -> Result<GeneratedAgentKeyMaterial> {
|
||||
let mut secret_key_bytes = [0u8; 32];
|
||||
OsRng
|
||||
.try_fill_bytes(&mut secret_key_bytes)
|
||||
.context("failed to generate agent identity private key bytes")?;
|
||||
let signing_key = SigningKey::from_bytes(&secret_key_bytes);
|
||||
let private_key_pkcs8 = signing_key
|
||||
.to_pkcs8_der()
|
||||
.context("failed to encode agent identity private key as PKCS#8")?;
|
||||
|
||||
Ok(GeneratedAgentKeyMaterial {
|
||||
private_key_pkcs8_base64: BASE64_STANDARD.encode(private_key_pkcs8.as_bytes()),
|
||||
public_key_ssh: encode_ssh_ed25519_public_key(&signing_key.verifying_key()),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn public_key_ssh_from_private_key_pkcs8_base64(
|
||||
private_key_pkcs8_base64: &str,
|
||||
) -> Result<String> {
|
||||
let signing_key = signing_key_from_private_key_pkcs8_base64(private_key_pkcs8_base64)?;
|
||||
Ok(encode_ssh_ed25519_public_key(&signing_key.verifying_key()))
|
||||
}
|
||||
|
||||
pub fn verifying_key_from_private_key_pkcs8_base64(
|
||||
private_key_pkcs8_base64: &str,
|
||||
) -> Result<VerifyingKey> {
|
||||
let signing_key = signing_key_from_private_key_pkcs8_base64(private_key_pkcs8_base64)?;
|
||||
Ok(signing_key.verifying_key())
|
||||
}
|
||||
|
||||
pub fn curve25519_secret_key_from_private_key_pkcs8_base64(
|
||||
private_key_pkcs8_base64: &str,
|
||||
) -> Result<Curve25519SecretKey> {
|
||||
let signing_key = signing_key_from_private_key_pkcs8_base64(private_key_pkcs8_base64)?;
|
||||
Ok(curve25519_secret_key_from_signing_key(&signing_key))
|
||||
}
|
||||
|
||||
pub fn agent_registration_url(chatgpt_base_url: &str) -> String {
|
||||
let trimmed = chatgpt_base_url.trim_end_matches('/');
|
||||
format!("{trimmed}/v1/agent/register")
|
||||
}
|
||||
|
||||
pub fn agent_task_registration_url(chatgpt_base_url: &str, agent_runtime_id: &str) -> String {
|
||||
let trimmed = chatgpt_base_url.trim_end_matches('/');
|
||||
format!("{trimmed}/v1/agent/{agent_runtime_id}/task/register")
|
||||
}
|
||||
|
||||
pub fn agent_identity_biscuit_url(chatgpt_base_url: &str) -> String {
|
||||
let trimmed = chatgpt_base_url.trim_end_matches('/');
|
||||
format!("{trimmed}/authenticate_app_v2")
|
||||
}
|
||||
|
||||
pub fn agent_identity_jwks_url(chatgpt_base_url: &str) -> String {
|
||||
let trimmed = chatgpt_base_url.trim_end_matches('/');
|
||||
if trimmed.contains("/backend-api") {
|
||||
format!("{trimmed}/wham/agent-identities/jwks")
|
||||
} else {
|
||||
format!("{trimmed}/agent-identities/jwks")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn agent_identity_request_id() -> Result<String> {
|
||||
let mut request_id_bytes = [0u8; 16];
|
||||
OsRng
|
||||
.try_fill_bytes(&mut request_id_bytes)
|
||||
.context("failed to generate agent identity request id")?;
|
||||
Ok(format!(
|
||||
"codex-agent-identity-{}",
|
||||
URL_SAFE_NO_PAD.encode(request_id_bytes)
|
||||
))
|
||||
}
|
||||
|
||||
pub fn build_abom(session_source: SessionSource) -> AgentBillOfMaterials {
|
||||
AgentBillOfMaterials {
|
||||
agent_version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
agent_harness_id: match &session_source {
|
||||
SessionSource::VSCode => "codex-app".to_string(),
|
||||
SessionSource::Cli
|
||||
| SessionSource::Exec
|
||||
| SessionSource::Mcp
|
||||
| SessionSource::Custom(_)
|
||||
| SessionSource::Internal(_)
|
||||
| SessionSource::SubAgent(_)
|
||||
| SessionSource::Unknown => "codex-cli".to_string(),
|
||||
},
|
||||
running_location: format!("{}-{}", session_source, std::env::consts::OS),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode_ssh_ed25519_public_key(verifying_key: &VerifyingKey) -> String {
|
||||
let mut blob = Vec::with_capacity(4 + 11 + 4 + 32);
|
||||
append_ssh_string(&mut blob, b"ssh-ed25519");
|
||||
append_ssh_string(&mut blob, verifying_key.as_bytes());
|
||||
format!("ssh-ed25519 {}", BASE64_STANDARD.encode(blob))
|
||||
}
|
||||
|
||||
fn sign_agent_assertion_payload(
|
||||
key: AgentIdentityKey<'_>,
|
||||
task_id: &str,
|
||||
timestamp: &str,
|
||||
) -> Result<String> {
|
||||
let signing_key = signing_key_from_private_key_pkcs8_base64(key.private_key_pkcs8_base64)?;
|
||||
let payload = format!("{}:{task_id}:{timestamp}", key.agent_runtime_id);
|
||||
Ok(BASE64_STANDARD.encode(signing_key.sign(payload.as_bytes()).to_bytes()))
|
||||
}
|
||||
|
||||
fn serialize_agent_assertion(envelope: &AgentAssertionEnvelope) -> Result<String> {
|
||||
let payload = serde_json::to_vec(&BTreeMap::from([
|
||||
("agent_runtime_id", envelope.agent_runtime_id.as_str()),
|
||||
("signature", envelope.signature.as_str()),
|
||||
("task_id", envelope.task_id.as_str()),
|
||||
("timestamp", envelope.timestamp.as_str()),
|
||||
]))
|
||||
.context("failed to serialize agent assertion envelope")?;
|
||||
Ok(URL_SAFE_NO_PAD.encode(payload))
|
||||
}
|
||||
|
||||
fn curve25519_secret_key_from_signing_key(signing_key: &SigningKey) -> Curve25519SecretKey {
|
||||
let digest = Sha512::digest(signing_key.to_bytes());
|
||||
let mut secret_key = [0u8; 32];
|
||||
secret_key.copy_from_slice(&digest[..32]);
|
||||
secret_key[0] &= 248;
|
||||
secret_key[31] &= 127;
|
||||
secret_key[31] |= 64;
|
||||
Curve25519SecretKey::from(secret_key)
|
||||
}
|
||||
|
||||
fn append_ssh_string(buf: &mut Vec<u8>, value: &[u8]) {
|
||||
buf.extend_from_slice(&(value.len() as u32).to_be_bytes());
|
||||
buf.extend_from_slice(value);
|
||||
}
|
||||
|
||||
fn signing_key_from_private_key_pkcs8_base64(private_key_pkcs8_base64: &str) -> Result<SigningKey> {
|
||||
let private_key = BASE64_STANDARD
|
||||
.decode(private_key_pkcs8_base64)
|
||||
.context("stored agent identity private key is not valid base64")?;
|
||||
SigningKey::from_pkcs8_der(&private_key)
|
||||
.context("stored agent identity private key is not valid PKCS#8")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use base64::Engine as _;
|
||||
use ed25519_dalek::Signature;
|
||||
use ed25519_dalek::Verifier as _;
|
||||
use jsonwebtoken::EncodingKey;
|
||||
use jsonwebtoken::Header;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use codex_protocol::auth::KnownPlan;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn authorization_header_for_agent_task_serializes_signed_agent_assertion() {
|
||||
let signing_key = SigningKey::from_bytes(&[7u8; 32]);
|
||||
let private_key = signing_key
|
||||
.to_pkcs8_der()
|
||||
.expect("encode test key material");
|
||||
let key = AgentIdentityKey {
|
||||
agent_runtime_id: "agent-123",
|
||||
private_key_pkcs8_base64: &BASE64_STANDARD.encode(private_key.as_bytes()),
|
||||
};
|
||||
let target = AgentTaskAuthorizationTarget {
|
||||
agent_runtime_id: "agent-123",
|
||||
task_id: "task-123",
|
||||
};
|
||||
|
||||
let header =
|
||||
authorization_header_for_agent_task(key, target).expect("build agent assertion header");
|
||||
let token = header
|
||||
.strip_prefix("AgentAssertion ")
|
||||
.expect("agent assertion scheme");
|
||||
let payload = URL_SAFE_NO_PAD
|
||||
.decode(token)
|
||||
.expect("valid base64url payload");
|
||||
let envelope: AgentAssertionEnvelope =
|
||||
serde_json::from_slice(&payload).expect("valid assertion envelope");
|
||||
|
||||
assert_eq!(
|
||||
envelope,
|
||||
AgentAssertionEnvelope {
|
||||
agent_runtime_id: "agent-123".to_string(),
|
||||
task_id: "task-123".to_string(),
|
||||
timestamp: envelope.timestamp.clone(),
|
||||
signature: envelope.signature.clone(),
|
||||
}
|
||||
);
|
||||
let signature_bytes = BASE64_STANDARD
|
||||
.decode(&envelope.signature)
|
||||
.expect("valid base64 signature");
|
||||
let signature = Signature::from_slice(&signature_bytes).expect("valid signature bytes");
|
||||
signing_key
|
||||
.verifying_key()
|
||||
.verify(
|
||||
format!(
|
||||
"{}:{}:{}",
|
||||
envelope.agent_runtime_id, envelope.task_id, envelope.timestamp
|
||||
)
|
||||
.as_bytes(),
|
||||
&signature,
|
||||
)
|
||||
.expect("signature should verify");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn authorization_header_for_agent_task_rejects_mismatched_runtime() {
|
||||
let signing_key = SigningKey::from_bytes(&[7u8; 32]);
|
||||
let private_key = signing_key
|
||||
.to_pkcs8_der()
|
||||
.expect("encode test key material");
|
||||
let private_key_pkcs8_base64 = BASE64_STANDARD.encode(private_key.as_bytes());
|
||||
let key = AgentIdentityKey {
|
||||
agent_runtime_id: "agent-123",
|
||||
private_key_pkcs8_base64: &private_key_pkcs8_base64,
|
||||
};
|
||||
let target = AgentTaskAuthorizationTarget {
|
||||
agent_runtime_id: "agent-456",
|
||||
task_id: "task-123",
|
||||
};
|
||||
|
||||
let error = authorization_header_for_agent_task(key, target)
|
||||
.expect_err("runtime mismatch should fail");
|
||||
|
||||
assert_eq!(
|
||||
error.to_string(),
|
||||
"agent task runtime agent-456 does not match stored agent identity agent-123"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decode_agent_identity_jwt_reads_claims() {
|
||||
let jwt = jwt_with_payload(serde_json::json!({
|
||||
"iss": AGENT_IDENTITY_JWT_ISSUER,
|
||||
"aud": AGENT_IDENTITY_JWT_AUDIENCE,
|
||||
"iat": 1_700_000_000usize,
|
||||
"exp": 4_000_000_000usize,
|
||||
"agent_runtime_id": "agent-runtime-id",
|
||||
"agent_private_key": "private-key",
|
||||
"account_id": "account-id",
|
||||
"chatgpt_user_id": "user-id",
|
||||
"email": "user@example.com",
|
||||
"plan_type": "pro",
|
||||
"chatgpt_account_is_fedramp": false,
|
||||
}));
|
||||
|
||||
let claims = decode_agent_identity_jwt(&jwt, /*jwks*/ None).expect("JWT should decode");
|
||||
|
||||
assert_eq!(
|
||||
claims,
|
||||
AgentIdentityJwtClaims {
|
||||
iss: AGENT_IDENTITY_JWT_ISSUER.to_string(),
|
||||
aud: AGENT_IDENTITY_JWT_AUDIENCE.to_string(),
|
||||
iat: 1_700_000_000,
|
||||
exp: 4_000_000_000,
|
||||
agent_runtime_id: "agent-runtime-id".to_string(),
|
||||
agent_private_key: "private-key".to_string(),
|
||||
account_id: "account-id".to_string(),
|
||||
chatgpt_user_id: "user-id".to_string(),
|
||||
email: "user@example.com".to_string(),
|
||||
plan_type: AuthPlanType::Known(KnownPlan::Pro),
|
||||
chatgpt_account_is_fedramp: false,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decode_agent_identity_jwt_maps_raw_plan_aliases() {
|
||||
let jwt = jwt_with_payload(serde_json::json!({
|
||||
"iss": AGENT_IDENTITY_JWT_ISSUER,
|
||||
"aud": AGENT_IDENTITY_JWT_AUDIENCE,
|
||||
"iat": 1_700_000_000usize,
|
||||
"exp": 4_000_000_000usize,
|
||||
"agent_runtime_id": "agent-runtime-id",
|
||||
"agent_private_key": "private-key",
|
||||
"account_id": "account-id",
|
||||
"chatgpt_user_id": "user-id",
|
||||
"email": "user@example.com",
|
||||
"plan_type": "hc",
|
||||
"chatgpt_account_is_fedramp": false,
|
||||
}));
|
||||
|
||||
let claims = decode_agent_identity_jwt(&jwt, /*jwks*/ None).expect("JWT should decode");
|
||||
|
||||
assert_eq!(claims.plan_type, AuthPlanType::Known(KnownPlan::Enterprise));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decode_agent_identity_jwt_verifies_when_jwks_is_present() {
|
||||
let jwks = test_jwks("test-key");
|
||||
let claims = AgentIdentityJwtClaims {
|
||||
iss: AGENT_IDENTITY_JWT_ISSUER.to_string(),
|
||||
aud: AGENT_IDENTITY_JWT_AUDIENCE.to_string(),
|
||||
iat: 1_700_000_000,
|
||||
exp: 4_000_000_000,
|
||||
agent_runtime_id: "agent-runtime-id".to_string(),
|
||||
agent_private_key: "private-key".to_string(),
|
||||
account_id: "account-id".to_string(),
|
||||
chatgpt_user_id: "user-id".to_string(),
|
||||
email: "user@example.com".to_string(),
|
||||
plan_type: AuthPlanType::Known(KnownPlan::Pro),
|
||||
chatgpt_account_is_fedramp: false,
|
||||
};
|
||||
let jwt = jsonwebtoken::encode(
|
||||
&test_jwt_header("test-key"),
|
||||
&serde_json::json!({
|
||||
"iss": claims.iss,
|
||||
"aud": claims.aud,
|
||||
"iat": claims.iat,
|
||||
"exp": claims.exp,
|
||||
"agent_runtime_id": claims.agent_runtime_id,
|
||||
"agent_private_key": claims.agent_private_key,
|
||||
"account_id": claims.account_id,
|
||||
"chatgpt_user_id": claims.chatgpt_user_id,
|
||||
"email": claims.email,
|
||||
"plan_type": "pro",
|
||||
"chatgpt_account_is_fedramp": claims.chatgpt_account_is_fedramp,
|
||||
}),
|
||||
&test_rsa_encoding_key(),
|
||||
)
|
||||
.expect("JWT should encode");
|
||||
|
||||
let expected_claims = AgentIdentityJwtClaims {
|
||||
iss: AGENT_IDENTITY_JWT_ISSUER.to_string(),
|
||||
aud: AGENT_IDENTITY_JWT_AUDIENCE.to_string(),
|
||||
iat: 1_700_000_000,
|
||||
exp: 4_000_000_000,
|
||||
agent_runtime_id: "agent-runtime-id".to_string(),
|
||||
agent_private_key: "private-key".to_string(),
|
||||
account_id: "account-id".to_string(),
|
||||
chatgpt_user_id: "user-id".to_string(),
|
||||
email: "user@example.com".to_string(),
|
||||
plan_type: AuthPlanType::Known(KnownPlan::Pro),
|
||||
chatgpt_account_is_fedramp: false,
|
||||
};
|
||||
assert_eq!(
|
||||
decode_agent_identity_jwt(&jwt, Some(&jwks)).expect("JWT should verify"),
|
||||
expected_claims
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decode_agent_identity_jwt_rejects_untrusted_kid() {
|
||||
let jwks = test_jwks("other-key");
|
||||
|
||||
let jwt = jsonwebtoken::encode(
|
||||
&test_jwt_header("test-key"),
|
||||
&serde_json::json!({
|
||||
"iss": AGENT_IDENTITY_JWT_ISSUER,
|
||||
"aud": AGENT_IDENTITY_JWT_AUDIENCE,
|
||||
"iat": 1_700_000_000,
|
||||
"exp": 4_000_000_000usize,
|
||||
"agent_runtime_id": "agent-runtime-id",
|
||||
"agent_private_key": "private-key",
|
||||
"account_id": "account-id",
|
||||
"chatgpt_user_id": "user-id",
|
||||
"email": "user@example.com",
|
||||
"plan_type": "pro",
|
||||
"chatgpt_account_is_fedramp": false,
|
||||
}),
|
||||
&test_rsa_encoding_key(),
|
||||
)
|
||||
.expect("JWT should encode");
|
||||
|
||||
decode_agent_identity_jwt(&jwt, Some(&jwks)).expect_err("JWT should not verify");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decode_agent_identity_jwt_requires_issuer_and_audience() {
|
||||
let jwks = test_jwks("test-key");
|
||||
let jwt = jsonwebtoken::encode(
|
||||
&test_jwt_header("test-key"),
|
||||
&serde_json::json!({
|
||||
"iat": 1_700_000_000,
|
||||
"exp": 4_000_000_000usize,
|
||||
"agent_runtime_id": "agent-runtime-id",
|
||||
"agent_private_key": "private-key",
|
||||
"account_id": "account-id",
|
||||
"chatgpt_user_id": "user-id",
|
||||
"email": "user@example.com",
|
||||
"plan_type": "pro",
|
||||
"chatgpt_account_is_fedramp": false,
|
||||
}),
|
||||
&test_rsa_encoding_key(),
|
||||
)
|
||||
.expect("JWT should encode");
|
||||
|
||||
decode_agent_identity_jwt(&jwt, Some(&jwks)).expect_err("JWT should not verify");
|
||||
}
|
||||
|
||||
fn test_jwt_header(kid: &str) -> Header {
|
||||
let mut header = Header::new(Algorithm::RS256);
|
||||
header.kid = Some(kid.to_string());
|
||||
header
|
||||
}
|
||||
|
||||
fn test_rsa_encoding_key() -> EncodingKey {
|
||||
EncodingKey::from_rsa_pem(
|
||||
br#"-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDWpAXYypOsYAwO
|
||||
bvBduMk/mxaoYDze0AZSzaSzLuIlcsl2EKDgC3AabhIWXh/qTGEJLOU3VB1e5mO9
|
||||
FPbBlmIZSL3FQTbyt/hYutPFKfCou5PLmScw/TzILS3/RhT8UY9kxxZvXiEbTki9
|
||||
mvxRuZFpVqDFJHwfitIjKZGhXDCYVKurPTrxetYZJg0h8sQBLKjkZ0BqqaTUkAsg
|
||||
0eBgZAlXEzG3By8PGhUqYLt6W1Q3KYw0FmGy/gTyzH1g0ukGgSJvOd8SkNT8MbOs
|
||||
zl5kKxDNqpuEE6UZ3jbuJ+5382d31w+rOAJRzbf7QVdI9+luCSwJcDACYPQ4WNBa
|
||||
uCpV0ovpAgMBAAECggEAVu84LwZdqYN9XpswX8VoPYrjMm9IODapWQBRpQFoNyK2
|
||||
1ksF3bjEPvA2Azk8U/l7k+vLKw22l6lY3EyRZPcz5GnB8xLm3ogE3mtNOp4yCyVu
|
||||
RxhQ91aaN7mU17/a4BdorLi2LYVCg3zBmYociD1Q2AluNGsCmwPu+K7tfR2J0Sg8
|
||||
NjqiTbDG1XDpR/icwgC9t6vh8lZpCHDhF4tbQfLLVLeA/OdcuzXDyMCXbmdVIdBQ
|
||||
rm4aIFmr2e1/2ctTbCg85S6AGFTH+pSLjrwTzyvf+F6NW5uNjLQAQLFj+EznBDxj
|
||||
Xdx90cySrjsKK6PVWQF4RiTvkSW8eWL7R6B2FZbGwQKBgQDuVQRj72hWloR7mbEL
|
||||
aUEEv3pIXTMXWEsoMBNczos/1L1RnAN1AI44TurznasPZAWvQj+kVbLDR+TAeZrL
|
||||
iA8HIWswQUI18hFmgKzSkwIXGtubcKVrgsKeS4lMDKCM/Ef6WAYdeq6ronoY5lCN
|
||||
YrJFmGp81W5zcV7lyiycgbSiGwKBgQDmjWYf6pZjrK7Z+OJ3X1AZfi2vss15SCvL
|
||||
3fPgzIDbViztpGyQhc3DQZIsBNIu0xZp/veGce9TEeTds2ro9NfdJFeou8+fC7Pq
|
||||
sOsM3amGFFi+ZW/9BWyjZEM88bgWWAjqLHbpfHDxjAf5CSxddqxgHlbP0Ytyb1Vg
|
||||
gmPDn9YKSwKBgQDbTi3hC35WFuDHn0/zcSHcDZmnFuOZeqyFyV83yfMGhGrEuqvP
|
||||
sPgtRikajJ3IZsB4WZyYSidZXEFY/0z6NjOl2xF38MTNQPbT/FmK1q1Yt2UWrlv5
|
||||
BvSwlk87RG9D7C0LZo4R+D7cPoDdgqjiwMvMEIkEX5zn641oI1ZTmWKuuwKBgQCD
|
||||
KF+3unnRvHRAVoFnTZbA2fJdqMeRvogD04GhGlYX8V9f1hFY6nXTJaNlXVzA/J8c
|
||||
r8ra9kgjJuPfZ+ljG58OFFW2DRohLcQtuHYPfK6rMzoFHqnl9EcIcMp7ijuionR3
|
||||
29HOJFgQYgxLFXfit9d6WugiE+BTupiEbckZif13HwKBgE/lAlkVHP6YahOO2Ljc
|
||||
J1bwkqKZTB5dHolX9A58e/xXnfZ5P8f3Z83+Izap3FwqQulk7b1WO1MQcHuVg2NN
|
||||
5da0D4h2rYOXnbYIg0BVu4spQbaM6ewsp66b8+MzLOBvj8SzWdt1Oyw0q/MRyQAR
|
||||
8U4M2TSWCKUY/A6sT4W8+mT9
|
||||
-----END PRIVATE KEY-----"#,
|
||||
)
|
||||
.expect("test RSA key should parse")
|
||||
}
|
||||
|
||||
fn test_jwks(kid: &str) -> jsonwebtoken::jwk::JwkSet {
|
||||
serde_json::from_value(serde_json::json!({
|
||||
"keys": [{
|
||||
"kty": "RSA",
|
||||
"kid": kid,
|
||||
"use": "sig",
|
||||
"alg": "RS256",
|
||||
"n": "1qQF2MqTrGAMDm7wXbjJP5sWqGA83tAGUs2ksy7iJXLJdhCg4AtwGm4SFl4f6kxhCSzlN1QdXuZjvRT2wZZiGUi9xUE28rf4WLrTxSnwqLuTy5knMP08yC0t_0YU_FGPZMcWb14hG05IvZr8UbmRaVagxSR8H4rSIymRoVwwmFSrqz068XrWGSYNIfLEASyo5GdAaqmk1JALINHgYGQJVxMxtwcvDxoVKmC7eltUNymMNBZhsv4E8sx9YNLpBoEibznfEpDU_DGzrM5eZCsQzaqbhBOlGd427ifud_Nnd9cPqzgCUc23-0FXSPfpbgksCXAwAmD0OFjQWrgqVdKL6Q",
|
||||
"e": "AQAB",
|
||||
}]
|
||||
}))
|
||||
.expect("test JWKS should parse")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn agent_identity_jwks_url_uses_backend_api_base_url() {
|
||||
assert_eq!(
|
||||
agent_identity_jwks_url("https://chatgpt.com/backend-api"),
|
||||
"https://chatgpt.com/backend-api/wham/agent-identities/jwks"
|
||||
);
|
||||
assert_eq!(
|
||||
agent_identity_jwks_url("https://chatgpt.com/backend-api/"),
|
||||
"https://chatgpt.com/backend-api/wham/agent-identities/jwks"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn agent_identity_jwks_url_uses_codex_api_base_url() {
|
||||
assert_eq!(
|
||||
agent_identity_jwks_url("http://localhost:8080/api/codex"),
|
||||
"http://localhost:8080/api/codex/agent-identities/jwks"
|
||||
);
|
||||
assert_eq!(
|
||||
agent_identity_jwks_url("http://localhost:8080/api/codex/"),
|
||||
"http://localhost:8080/api/codex/agent-identities/jwks"
|
||||
);
|
||||
}
|
||||
|
||||
fn jwt_with_payload(payload: serde_json::Value) -> String {
|
||||
let encode = |bytes: &[u8]| URL_SAFE_NO_PAD.encode(bytes);
|
||||
let header_b64 = encode(br#"{"alg":"none","typ":"JWT"}"#);
|
||||
let payload_b64 = encode(&serde_json::to_vec(&payload).expect("payload should serialize"));
|
||||
let signature_b64 = encode(b"sig");
|
||||
format!("{header_b64}.{payload_b64}.{signature_b64}")
|
||||
}
|
||||
}
|
||||
@@ -16,12 +16,10 @@ workspace = true
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-git-utils = { workspace = true }
|
||||
codex-login = { workspace = true }
|
||||
codex-model-provider = { workspace = true }
|
||||
codex-plugin = { workspace = true }
|
||||
codex-protocol = { workspace = true }
|
||||
os_info = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha1 = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"macros",
|
||||
@@ -30,5 +28,5 @@ tokio = { workspace = true, features = [
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
|
||||
[dev-dependencies]
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,34 +1,21 @@
|
||||
use crate::events::AppServerRpcTransport;
|
||||
use crate::events::GuardianReviewAnalyticsResult;
|
||||
use crate::events::GuardianReviewTrackContext;
|
||||
use crate::events::TrackEventRequest;
|
||||
use crate::events::TrackEventsRequest;
|
||||
use crate::events::current_runtime_metadata;
|
||||
use crate::facts::AnalyticsFact;
|
||||
use crate::facts::AnalyticsJsonRpcError;
|
||||
use crate::facts::AppInvocation;
|
||||
use crate::facts::AppMentionedInput;
|
||||
use crate::facts::AppUsedInput;
|
||||
use crate::facts::CustomAnalyticsFact;
|
||||
use crate::facts::HookRunFact;
|
||||
use crate::facts::HookRunInput;
|
||||
use crate::facts::PluginState;
|
||||
use crate::facts::PluginStateChangedInput;
|
||||
use crate::facts::SkillInvocation;
|
||||
use crate::facts::SkillInvokedInput;
|
||||
use crate::facts::SubAgentThreadStartedInput;
|
||||
use crate::facts::TrackEventsContext;
|
||||
use crate::facts::TurnResolvedConfigFact;
|
||||
use crate::facts::TurnTokenUsageFact;
|
||||
use crate::reducer::AnalyticsReducer;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::ClientResponse;
|
||||
use codex_app_server_protocol::InitializeParams;
|
||||
use codex_app_server_protocol::JSONRPCErrorError;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::ServerNotification;
|
||||
use codex_app_server_protocol::ServerRequest;
|
||||
use codex_app_server_protocol::ServerResponse;
|
||||
use codex_login::AuthManager;
|
||||
use codex_login::default_client::create_client;
|
||||
use codex_plugin::PluginTelemetryMetadata;
|
||||
@@ -51,7 +38,8 @@ pub(crate) struct AnalyticsEventsQueue {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AnalyticsEventsClient {
|
||||
queue: Option<AnalyticsEventsQueue>,
|
||||
queue: AnalyticsEventsQueue,
|
||||
analytics_enabled: Option<bool>,
|
||||
}
|
||||
|
||||
impl AnalyticsEventsQueue {
|
||||
@@ -120,15 +108,11 @@ impl AnalyticsEventsClient {
|
||||
analytics_enabled: Option<bool>,
|
||||
) -> Self {
|
||||
Self {
|
||||
queue: (analytics_enabled != Some(false))
|
||||
.then(|| AnalyticsEventsQueue::new(Arc::clone(&auth_manager), base_url)),
|
||||
queue: AnalyticsEventsQueue::new(Arc::clone(&auth_manager), base_url),
|
||||
analytics_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn disabled() -> Self {
|
||||
Self { queue: None }
|
||||
}
|
||||
|
||||
pub fn track_skill_invocations(
|
||||
&self,
|
||||
tracking: TrackEventsContext,
|
||||
@@ -167,16 +151,6 @@ impl AnalyticsEventsClient {
|
||||
));
|
||||
}
|
||||
|
||||
pub fn track_guardian_review(
|
||||
&self,
|
||||
tracking: &GuardianReviewTrackContext,
|
||||
result: GuardianReviewAnalyticsResult,
|
||||
) {
|
||||
self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::GuardianReview(
|
||||
Box::new(tracking.event_params(result)),
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn track_app_mentioned(&self, tracking: TrackEventsContext, mentions: Vec<AppInvocation>) {
|
||||
if mentions.is_empty() {
|
||||
return;
|
||||
@@ -186,30 +160,8 @@ impl AnalyticsEventsClient {
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn track_request(
|
||||
&self,
|
||||
connection_id: u64,
|
||||
request_id: RequestId,
|
||||
request: &ClientRequest,
|
||||
) {
|
||||
if !matches!(
|
||||
request,
|
||||
ClientRequest::TurnStart { .. } | ClientRequest::TurnSteer { .. }
|
||||
) {
|
||||
return;
|
||||
}
|
||||
self.record_fact(AnalyticsFact::ClientRequest {
|
||||
connection_id,
|
||||
request_id,
|
||||
request: Box::new(request.clone()),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn track_app_used(&self, tracking: TrackEventsContext, app: AppInvocation) {
|
||||
let Some(queue) = self.queue.as_ref() else {
|
||||
return;
|
||||
};
|
||||
if !queue.should_enqueue_app_used(&tracking, &app) {
|
||||
if !self.queue.should_enqueue_app_used(&tracking, &app) {
|
||||
return;
|
||||
}
|
||||
self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::AppUsed(
|
||||
@@ -217,17 +169,8 @@ impl AnalyticsEventsClient {
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn track_hook_run(&self, tracking: TrackEventsContext, hook: HookRunFact) {
|
||||
self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::HookRun(
|
||||
HookRunInput { tracking, hook },
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn track_plugin_used(&self, tracking: TrackEventsContext, plugin: PluginTelemetryMetadata) {
|
||||
let Some(queue) = self.queue.as_ref() else {
|
||||
return;
|
||||
};
|
||||
if !queue.should_enqueue_plugin_used(&tracking, &plugin) {
|
||||
if !self.queue.should_enqueue_plugin_used(&tracking, &plugin) {
|
||||
return;
|
||||
}
|
||||
self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::PluginUsed(
|
||||
@@ -235,24 +178,6 @@ impl AnalyticsEventsClient {
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn track_compaction(&self, event: crate::facts::CodexCompactionEvent) {
|
||||
self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::Compaction(
|
||||
Box::new(event),
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn track_turn_resolved_config(&self, fact: TurnResolvedConfigFact) {
|
||||
self.record_fact(AnalyticsFact::Custom(
|
||||
CustomAnalyticsFact::TurnResolvedConfig(Box::new(fact)),
|
||||
));
|
||||
}
|
||||
|
||||
pub fn track_turn_token_usage(&self, fact: TurnTokenUsageFact) {
|
||||
self.record_fact(AnalyticsFact::Custom(CustomAnalyticsFact::TurnTokenUsage(
|
||||
Box::new(fact),
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn track_plugin_installed(&self, plugin: PluginTelemetryMetadata) {
|
||||
self.record_fact(AnalyticsFact::Custom(
|
||||
CustomAnalyticsFact::PluginStateChanged(PluginStateChangedInput {
|
||||
@@ -290,76 +215,18 @@ impl AnalyticsEventsClient {
|
||||
}
|
||||
|
||||
pub(crate) fn record_fact(&self, input: AnalyticsFact) {
|
||||
if let Some(queue) = self.queue.as_ref() {
|
||||
queue.try_send(input);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn track_response(
|
||||
&self,
|
||||
connection_id: u64,
|
||||
request_id: RequestId,
|
||||
response: ClientResponsePayload,
|
||||
) {
|
||||
if !matches!(
|
||||
response,
|
||||
ClientResponsePayload::ThreadStart(_)
|
||||
| ClientResponsePayload::ThreadResume(_)
|
||||
| ClientResponsePayload::ThreadFork(_)
|
||||
| ClientResponsePayload::TurnStart(_)
|
||||
| ClientResponsePayload::TurnSteer(_)
|
||||
) {
|
||||
if self.analytics_enabled == Some(false) {
|
||||
return;
|
||||
}
|
||||
self.record_fact(AnalyticsFact::ClientResponse {
|
||||
self.queue.try_send(input);
|
||||
}
|
||||
|
||||
pub fn track_response(&self, connection_id: u64, response: ClientResponse) {
|
||||
self.record_fact(AnalyticsFact::Response {
|
||||
connection_id,
|
||||
request_id,
|
||||
response: Box::new(response),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn track_error_response(
|
||||
&self,
|
||||
connection_id: u64,
|
||||
request_id: RequestId,
|
||||
error: JSONRPCErrorError,
|
||||
error_type: Option<AnalyticsJsonRpcError>,
|
||||
) {
|
||||
self.record_fact(AnalyticsFact::ErrorResponse {
|
||||
connection_id,
|
||||
request_id,
|
||||
error,
|
||||
error_type,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn track_server_request(&self, connection_id: u64, request: ServerRequest) {
|
||||
self.record_fact(AnalyticsFact::ServerRequest {
|
||||
connection_id,
|
||||
request: Box::new(request),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn track_server_response(&self, response: ServerResponse) {
|
||||
self.record_fact(AnalyticsFact::ServerResponse {
|
||||
response: Box::new(response),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn track_notification(&self, notification: ServerNotification) {
|
||||
if !matches!(
|
||||
notification,
|
||||
ServerNotification::TurnStarted(_)
|
||||
| ServerNotification::TurnCompleted(_)
|
||||
| ServerNotification::ItemStarted(_)
|
||||
| ServerNotification::ItemCompleted(_)
|
||||
| ServerNotification::ItemGuardianApprovalReviewStarted(_)
|
||||
| ServerNotification::ItemGuardianApprovalReviewCompleted(_)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
self.record_fact(AnalyticsFact::Notification(Box::new(notification)));
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_track_events(
|
||||
@@ -373,9 +240,16 @@ async fn send_track_events(
|
||||
let Some(auth) = auth_manager.auth().await else {
|
||||
return;
|
||||
};
|
||||
if !auth.uses_codex_backend() {
|
||||
if !auth.is_chatgpt_auth() {
|
||||
return;
|
||||
}
|
||||
let access_token = match auth.get_token() {
|
||||
Ok(token) => token,
|
||||
Err(_) => return,
|
||||
};
|
||||
let Some(account_id) = auth.get_account_id() else {
|
||||
return;
|
||||
};
|
||||
|
||||
let base_url = base_url.trim_end_matches('/');
|
||||
let url = format!("{base_url}/codex/analytics-events/events");
|
||||
@@ -384,7 +258,8 @@ async fn send_track_events(
|
||||
let response = create_client()
|
||||
.post(&url)
|
||||
.timeout(ANALYTICS_EVENTS_TIMEOUT)
|
||||
.headers(codex_model_provider::auth_provider_from_auth(&auth).to_auth_headers())
|
||||
.bearer_auth(&access_token)
|
||||
.header("chatgpt-account-id", &account_id)
|
||||
.header("Content-Type", "application/json")
|
||||
.json(&payload)
|
||||
.send()
|
||||
@@ -402,7 +277,3 @@ async fn send_track_events(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "client_tests.rs"]
|
||||
mod tests;
|
||||
|
||||
@@ -1,224 +0,0 @@
|
||||
use super::AnalyticsEventsClient;
|
||||
use super::AnalyticsEventsQueue;
|
||||
use crate::facts::AnalyticsFact;
|
||||
use codex_app_server_protocol::ApprovalsReviewer as AppServerApprovalsReviewer;
|
||||
use codex_app_server_protocol::AskForApproval as AppServerAskForApproval;
|
||||
use codex_app_server_protocol::ClientRequest;
|
||||
use codex_app_server_protocol::ClientResponsePayload;
|
||||
use codex_app_server_protocol::PermissionProfile as AppServerPermissionProfile;
|
||||
use codex_app_server_protocol::RequestId;
|
||||
use codex_app_server_protocol::SandboxPolicy as AppServerSandboxPolicy;
|
||||
use codex_app_server_protocol::SessionSource as AppServerSessionSource;
|
||||
use codex_app_server_protocol::Thread;
|
||||
use codex_app_server_protocol::ThreadArchiveParams;
|
||||
use codex_app_server_protocol::ThreadArchiveResponse;
|
||||
use codex_app_server_protocol::ThreadForkResponse;
|
||||
use codex_app_server_protocol::ThreadResumeResponse;
|
||||
use codex_app_server_protocol::ThreadStartResponse;
|
||||
use codex_app_server_protocol::ThreadStatus as AppServerThreadStatus;
|
||||
use codex_app_server_protocol::Turn;
|
||||
use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStatus as AppServerTurnStatus;
|
||||
use codex_app_server_protocol::TurnSteerParams;
|
||||
use codex_app_server_protocol::TurnSteerResponse;
|
||||
use codex_protocol::models::PermissionProfile as CorePermissionProfile;
|
||||
use codex_utils_absolute_path::test_support::PathBufExt;
|
||||
use codex_utils_absolute_path::test_support::test_path_buf;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::mpsc::error::TryRecvError;
|
||||
|
||||
fn client_with_receiver() -> (AnalyticsEventsClient, mpsc::Receiver<AnalyticsFact>) {
|
||||
let (sender, receiver) = mpsc::channel(8);
|
||||
let queue = AnalyticsEventsQueue {
|
||||
sender,
|
||||
app_used_emitted_keys: Arc::new(Mutex::new(HashSet::new())),
|
||||
plugin_used_emitted_keys: Arc::new(Mutex::new(HashSet::new())),
|
||||
};
|
||||
(AnalyticsEventsClient { queue: Some(queue) }, receiver)
|
||||
}
|
||||
|
||||
fn sample_turn_start_request() -> ClientRequest {
|
||||
ClientRequest::TurnStart {
|
||||
request_id: RequestId::Integer(1),
|
||||
params: TurnStartParams {
|
||||
thread_id: "thread-1".to_string(),
|
||||
input: Vec::new(),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_turn_steer_request() -> ClientRequest {
|
||||
ClientRequest::TurnSteer {
|
||||
request_id: RequestId::Integer(2),
|
||||
params: TurnSteerParams {
|
||||
thread_id: "thread-1".to_string(),
|
||||
expected_turn_id: "turn-1".to_string(),
|
||||
input: Vec::new(),
|
||||
responsesapi_client_metadata: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_thread_archive_request() -> ClientRequest {
|
||||
ClientRequest::ThreadArchive {
|
||||
request_id: RequestId::Integer(3),
|
||||
params: ThreadArchiveParams {
|
||||
thread_id: "thread-1".to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_thread(thread_id: &str) -> Thread {
|
||||
Thread {
|
||||
id: thread_id.to_string(),
|
||||
session_id: format!("session-{thread_id}"),
|
||||
forked_from_id: None,
|
||||
preview: "first prompt".to_string(),
|
||||
ephemeral: false,
|
||||
model_provider: "openai".to_string(),
|
||||
created_at: 1,
|
||||
updated_at: 2,
|
||||
status: AppServerThreadStatus::Idle,
|
||||
path: None,
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
cli_version: "0.0.0".to_string(),
|
||||
source: AppServerSessionSource::Exec,
|
||||
thread_source: None,
|
||||
agent_nickname: None,
|
||||
agent_role: None,
|
||||
git_info: None,
|
||||
name: None,
|
||||
turns: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_permission_profile() -> AppServerPermissionProfile {
|
||||
CorePermissionProfile::Disabled.into()
|
||||
}
|
||||
|
||||
fn sample_thread_start_response() -> ClientResponsePayload {
|
||||
ClientResponsePayload::ThreadStart(ThreadStartResponse {
|
||||
thread: sample_thread("thread-1"),
|
||||
model: "gpt-5".to_string(),
|
||||
model_provider: "openai".to_string(),
|
||||
service_tier: None,
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
instruction_sources: Vec::new(),
|
||||
approval_policy: AppServerAskForApproval::OnFailure,
|
||||
approvals_reviewer: AppServerApprovalsReviewer::User,
|
||||
sandbox: AppServerSandboxPolicy::DangerFullAccess,
|
||||
permission_profile: Some(sample_permission_profile()),
|
||||
active_permission_profile: None,
|
||||
reasoning_effort: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn sample_thread_resume_response() -> ClientResponsePayload {
|
||||
ClientResponsePayload::ThreadResume(ThreadResumeResponse {
|
||||
thread: sample_thread("thread-2"),
|
||||
model: "gpt-5".to_string(),
|
||||
model_provider: "openai".to_string(),
|
||||
service_tier: None,
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
instruction_sources: Vec::new(),
|
||||
approval_policy: AppServerAskForApproval::OnFailure,
|
||||
approvals_reviewer: AppServerApprovalsReviewer::User,
|
||||
sandbox: AppServerSandboxPolicy::DangerFullAccess,
|
||||
permission_profile: Some(sample_permission_profile()),
|
||||
active_permission_profile: None,
|
||||
reasoning_effort: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn sample_thread_fork_response() -> ClientResponsePayload {
|
||||
ClientResponsePayload::ThreadFork(ThreadForkResponse {
|
||||
thread: sample_thread("thread-3"),
|
||||
model: "gpt-5".to_string(),
|
||||
model_provider: "openai".to_string(),
|
||||
service_tier: None,
|
||||
cwd: test_path_buf("/tmp").abs(),
|
||||
instruction_sources: Vec::new(),
|
||||
approval_policy: AppServerAskForApproval::OnFailure,
|
||||
approvals_reviewer: AppServerApprovalsReviewer::User,
|
||||
sandbox: AppServerSandboxPolicy::DangerFullAccess,
|
||||
permission_profile: Some(sample_permission_profile()),
|
||||
active_permission_profile: None,
|
||||
reasoning_effort: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn sample_turn_start_response() -> ClientResponsePayload {
|
||||
ClientResponsePayload::TurnStart(TurnStartResponse {
|
||||
turn: Turn {
|
||||
id: "turn-1".to_string(),
|
||||
items_view: codex_app_server_protocol::TurnItemsView::Full,
|
||||
items: Vec::new(),
|
||||
status: AppServerTurnStatus::InProgress,
|
||||
error: None,
|
||||
started_at: None,
|
||||
completed_at: None,
|
||||
duration_ms: None,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn sample_turn_steer_response() -> ClientResponsePayload {
|
||||
ClientResponsePayload::TurnSteer(TurnSteerResponse {
|
||||
turn_id: "turn-2".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn track_request_only_enqueues_analytics_relevant_requests() {
|
||||
let (client, mut receiver) = client_with_receiver();
|
||||
|
||||
for (request_id, request) in [
|
||||
(RequestId::Integer(1), sample_turn_start_request()),
|
||||
(RequestId::Integer(2), sample_turn_steer_request()),
|
||||
] {
|
||||
client.track_request(/*connection_id*/ 7, request_id, &request);
|
||||
assert!(matches!(
|
||||
receiver.try_recv(),
|
||||
Ok(AnalyticsFact::ClientRequest { .. })
|
||||
));
|
||||
}
|
||||
|
||||
let ignored_request = sample_thread_archive_request();
|
||||
client.track_request(
|
||||
/*connection_id*/ 7,
|
||||
RequestId::Integer(3),
|
||||
&ignored_request,
|
||||
);
|
||||
assert!(matches!(receiver.try_recv(), Err(TryRecvError::Empty)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn track_response_only_enqueues_analytics_relevant_responses() {
|
||||
let (client, mut receiver) = client_with_receiver();
|
||||
|
||||
for (request_id, response) in [
|
||||
(RequestId::Integer(1), sample_thread_start_response()),
|
||||
(RequestId::Integer(2), sample_thread_resume_response()),
|
||||
(RequestId::Integer(3), sample_thread_fork_response()),
|
||||
(RequestId::Integer(4), sample_turn_start_response()),
|
||||
(RequestId::Integer(5), sample_turn_steer_response()),
|
||||
] {
|
||||
client.track_response(/*connection_id*/ 7, request_id, response);
|
||||
assert!(matches!(
|
||||
receiver.try_recv(),
|
||||
Ok(AnalyticsFact::ClientResponse { .. })
|
||||
));
|
||||
}
|
||||
|
||||
client.track_response(
|
||||
/*connection_id*/ 7,
|
||||
RequestId::Integer(6),
|
||||
ClientResponsePayload::ThreadArchive(ThreadArchiveResponse {}),
|
||||
);
|
||||
assert!(matches!(receiver.try_recv(), Err(TryRecvError::Empty)));
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user